diff options
Diffstat (limited to 'drivers')
862 files changed, 56610 insertions, 3958 deletions
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index 09f37b516808..4dde37c3d8fc 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -61,6 +61,7 @@ ACPI_GLOBAL(struct acpi_table_header, acpi_gbl_original_dsdt_header); ACPI_INIT_GLOBAL(u32, acpi_gbl_dsdt_index, ACPI_INVALID_TABLE_INDEX); ACPI_INIT_GLOBAL(u32, acpi_gbl_facs_index, ACPI_INVALID_TABLE_INDEX); ACPI_INIT_GLOBAL(u32, acpi_gbl_xfacs_index, ACPI_INVALID_TABLE_INDEX); +ACPI_INIT_GLOBAL(u32, acpi_gbl_fadt_index, ACPI_INVALID_TABLE_INDEX); #if (!ACPI_REDUCED_HARDWARE) ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS); diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h index f7731f260c31..591ea95319e2 100644 --- a/drivers/acpi/acpica/actables.h +++ b/drivers/acpi/acpica/actables.h @@ -85,7 +85,7 @@ void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded); /* * tbfadt - FADT parse/convert/validate */ -void acpi_tb_parse_fadt(u32 table_index); +void acpi_tb_parse_fadt(void); void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length); @@ -138,8 +138,6 @@ acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id); */ acpi_status acpi_tb_initialize_facs(void); -u8 acpi_tb_tables_loaded(void); - void acpi_tb_print_table_header(acpi_physical_address address, struct acpi_table_header *header); diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index faad911d46b5..10ce48e16ebf 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c @@ -71,7 +71,7 @@ acpi_status acpi_enable(void) /* ACPI tables must be present */ - if (!acpi_tb_tables_loaded()) { + if (acpi_gbl_fadt_index == ACPI_INVALID_TABLE_INDEX) { return_ACPI_STATUS(AE_NO_ACPI_TABLES); } diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c index 455a0700db39..a6454f4a6fb3 100644 --- a/drivers/acpi/acpica/tbfadt.c +++ b/drivers/acpi/acpica/tbfadt.c @@ -298,7 +298,7 @@ acpi_tb_select_address(char *register_name, u32 address32, u64 address64) * * FUNCTION: acpi_tb_parse_fadt * - * PARAMETERS: table_index - Index for the FADT + * PARAMETERS: None * * RETURN: None * @@ -307,7 +307,7 @@ acpi_tb_select_address(char *register_name, u32 address32, u64 address64) * ******************************************************************************/ -void acpi_tb_parse_fadt(u32 table_index) +void acpi_tb_parse_fadt(void) { u32 length; struct acpi_table_header *table; @@ -319,11 +319,11 @@ void acpi_tb_parse_fadt(u32 table_index) * Get a local copy of the FADT and convert it to a common format * Map entire FADT, assumed to be smaller than one page. */ - length = acpi_gbl_root_table_list.tables[table_index].length; + length = acpi_gbl_root_table_list.tables[acpi_gbl_fadt_index].length; table = - acpi_os_map_memory(acpi_gbl_root_table_list.tables[table_index]. - address, length); + acpi_os_map_memory(acpi_gbl_root_table_list. + tables[acpi_gbl_fadt_index].address, length); if (!table) { return; } diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index 4337990127cc..d8ddef38c947 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c @@ -99,29 +99,6 @@ acpi_status acpi_tb_initialize_facs(void) /******************************************************************************* * - * FUNCTION: acpi_tb_tables_loaded - * - * PARAMETERS: None - * - * RETURN: TRUE if required ACPI tables are loaded - * - * DESCRIPTION: Determine if the minimum required ACPI tables are present - * (FADT, FACS, DSDT) - * - ******************************************************************************/ - -u8 acpi_tb_tables_loaded(void) -{ - - if (acpi_gbl_root_table_list.current_table_count >= 4) { - return (TRUE); - } - - return (FALSE); -} - -/******************************************************************************* - * * FUNCTION: acpi_tb_check_dsdt_header * * PARAMETERS: None @@ -392,7 +369,8 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address) ACPI_COMPARE_NAME(&acpi_gbl_root_table_list. tables[table_index].signature, ACPI_SIG_FADT)) { - acpi_tb_parse_fadt(table_index); + acpi_gbl_fadt_index = table_index; + acpi_tb_parse_fadt(); } next_table: diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index 2a4154a09e4d..85e17bacc834 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c @@ -77,13 +77,16 @@ static bool default_stop_ok(struct device *dev) dev_update_qos_constraint); if (constraint_ns > 0) { - constraint_ns -= td->start_latency_ns; + constraint_ns -= td->save_state_latency_ns + + td->stop_latency_ns + + td->start_latency_ns + + td->restore_state_latency_ns; if (constraint_ns == 0) return false; } td->effective_constraint_ns = constraint_ns; - td->cached_stop_ok = constraint_ns > td->stop_latency_ns || - constraint_ns == 0; + td->cached_stop_ok = constraint_ns >= 0; + /* * The children have been suspended already, so we don't need to take * their stop latencies into account here. @@ -126,18 +129,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) off_on_time_ns = genpd->power_off_latency_ns + genpd->power_on_latency_ns; - /* - * It doesn't make sense to remove power from the domain if saving - * the state of all devices in it and the power off/power on operations - * take too much time. - * - * All devices in this domain have been stopped already at this point. - */ - list_for_each_entry(pdd, &genpd->dev_list, list_node) { - if (pdd->dev->driver) - off_on_time_ns += - to_gpd_data(pdd)->td.save_state_latency_ns; - } min_off_time_ns = -1; /* @@ -193,7 +184,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) * constraint_ns cannot be negative here, because the device has * been suspended. */ - constraint_ns -= td->restore_state_latency_ns; if (constraint_ns <= off_on_time_ns) return false; diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index f42f2bac6466..4c55cfbad19e 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -32,8 +32,7 @@ static DEFINE_MUTEX(regmap_debugfs_early_lock); /* Calculate the length of a fixed format */ static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size) { - snprintf(buf, buf_size, "%x", max_val); - return strlen(buf); + return snprintf(NULL, 0, "%x", max_val); } static ssize_t regmap_name_read_file(struct file *file, @@ -432,7 +431,7 @@ static ssize_t regmap_access_read_file(struct file *file, /* If we're in the region the user is trying to read */ if (p >= *ppos) { /* ...but not beyond it */ - if (buf_pos >= count - 1 - tot_len) + if (buf_pos + tot_len + 1 >= count) break; /* Format the register */ diff --git a/drivers/block/loop.c b/drivers/block/loop.c index f9889b6bc02c..674f800a3b57 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1486,17 +1486,16 @@ static void loop_handle_cmd(struct loop_cmd *cmd) { const bool write = cmd->rq->cmd_flags & REQ_WRITE; struct loop_device *lo = cmd->rq->q->queuedata; - int ret = -EIO; + int ret = 0; - if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) + if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { + ret = -EIO; goto failed; + } ret = do_req_filebacked(lo, cmd->rq); - failed: - if (ret) - cmd->rq->errors = -EIO; - blk_mq_complete_request(cmd->rq); + blk_mq_complete_request(cmd->rq, ret ? -EIO : 0); } static void loop_queue_write_work(struct work_struct *work) diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index a295b98c6bae..1c9e4fe5aa44 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -289,7 +289,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd) case NULL_IRQ_SOFTIRQ: switch (queue_mode) { case NULL_Q_MQ: - blk_mq_complete_request(cmd->rq); + blk_mq_complete_request(cmd->rq, cmd->rq->errors); break; case NULL_Q_RQ: blk_complete_request(cmd->rq); diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index b97fc3fe0916..6f04771f1019 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -618,16 +618,15 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx, spin_unlock_irqrestore(req->q->queue_lock, flags); return; } + if (req->cmd_type == REQ_TYPE_DRV_PRIV) { if (cmd_rq->ctx == CMD_CTX_CANCELLED) - req->errors = -EINTR; - else - req->errors = status; + status = -EINTR; } else { - req->errors = nvme_error_status(status); + status = nvme_error_status(status); } - } else - req->errors = 0; + } + if (req->cmd_type == REQ_TYPE_DRV_PRIV) { u32 result = le32_to_cpup(&cqe->result); req->special = (void *)(uintptr_t)result; @@ -650,7 +649,7 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx, } nvme_free_iod(nvmeq->dev, iod); - blk_mq_complete_request(req); + blk_mq_complete_request(req, status); } /* length is in bytes. gfp flags indicates whether we may sleep. */ @@ -863,8 +862,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, if (ns && ns->ms && !blk_integrity_rq(req)) { if (!(ns->pi_type && ns->ms == 8) && req->cmd_type != REQ_TYPE_DRV_PRIV) { - req->errors = -EFAULT; - blk_mq_complete_request(req); + blk_mq_complete_request(req, -EFAULT); return BLK_MQ_RQ_QUEUE_OK; } } @@ -2439,6 +2437,22 @@ static void nvme_scan_namespaces(struct nvme_dev *dev, unsigned nn) list_sort(NULL, &dev->namespaces, ns_cmp); } +static void nvme_set_irq_hints(struct nvme_dev *dev) +{ + struct nvme_queue *nvmeq; + int i; + + for (i = 0; i < dev->online_queues; i++) { + nvmeq = dev->queues[i]; + + if (!nvmeq->tags || !(*nvmeq->tags)) + continue; + + irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector, + blk_mq_tags_cpumask(*nvmeq->tags)); + } +} + static void nvme_dev_scan(struct work_struct *work) { struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work); @@ -2450,6 +2464,7 @@ static void nvme_dev_scan(struct work_struct *work) return; nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn)); kfree(ctrl); + nvme_set_irq_hints(dev); } /* @@ -2953,22 +2968,6 @@ static const struct file_operations nvme_dev_fops = { .compat_ioctl = nvme_dev_ioctl, }; -static void nvme_set_irq_hints(struct nvme_dev *dev) -{ - struct nvme_queue *nvmeq; - int i; - - for (i = 0; i < dev->online_queues; i++) { - nvmeq = dev->queues[i]; - - if (!nvmeq->tags || !(*nvmeq->tags)) - continue; - - irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector, - blk_mq_tags_cpumask(*nvmeq->tags)); - } -} - static int nvme_dev_start(struct nvme_dev *dev) { int result; @@ -3010,8 +3009,6 @@ static int nvme_dev_start(struct nvme_dev *dev) if (result) goto free_tags; - nvme_set_irq_hints(dev); - dev->event_limit = 1; return result; @@ -3062,7 +3059,6 @@ static int nvme_dev_resume(struct nvme_dev *dev) } else { nvme_unfreeze_queues(dev); nvme_dev_add(dev); - nvme_set_irq_hints(dev); } return 0; } diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index d93a0372b37b..f5e49b639818 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1863,9 +1863,11 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, rbd_osd_read_callback(obj_request); break; case CEPH_OSD_OP_SETALLOCHINT: - rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE); + rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE || + osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL); /* fall through */ case CEPH_OSD_OP_WRITE: + case CEPH_OSD_OP_WRITEFULL: rbd_osd_write_callback(obj_request); break; case CEPH_OSD_OP_STAT: @@ -2401,7 +2403,10 @@ static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request, opcode = CEPH_OSD_OP_ZERO; } } else if (op_type == OBJ_OP_WRITE) { - opcode = CEPH_OSD_OP_WRITE; + if (!offset && length == object_size) + opcode = CEPH_OSD_OP_WRITEFULL; + else + opcode = CEPH_OSD_OP_WRITE; osd_req_op_alloc_hint_init(osd_request, num_ops, object_size, object_size); num_ops++; @@ -3760,6 +3765,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) /* set io sizes to object size */ segment_size = rbd_obj_bytes(&rbd_dev->header); blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); + q->limits.max_sectors = queue_max_hw_sectors(q); blk_queue_max_segments(q, segment_size / SECTOR_SIZE); blk_queue_max_segment_size(q, segment_size); blk_queue_io_min(q, segment_size); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index e93899cc6f60..6ca35495a5be 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -144,7 +144,7 @@ static void virtblk_done(struct virtqueue *vq) do { virtqueue_disable_cb(vq); while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { - blk_mq_complete_request(vbr->req); + blk_mq_complete_request(vbr->req, vbr->req->errors); req_done = true; } if (unlikely(virtqueue_is_broken(vq))) diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index deb3f001791f..767657565de6 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -212,6 +212,9 @@ static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t *gref, static int xen_blkif_disconnect(struct xen_blkif *blkif) { + struct pending_req *req, *n; + int i = 0, j; + if (blkif->xenblkd) { kthread_stop(blkif->xenblkd); wake_up(&blkif->shutdown_wq); @@ -238,13 +241,28 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) /* Remove all persistent grants and the cache of ballooned pages. */ xen_blkbk_free_caches(blkif); + /* Check that there is no request in use */ + list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { + list_del(&req->free_list); + + for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) + kfree(req->segments[j]); + + for (j = 0; j < MAX_INDIRECT_PAGES; j++) + kfree(req->indirect_pages[j]); + + kfree(req); + i++; + } + + WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); + blkif->nr_ring_pages = 0; + return 0; } static void xen_blkif_free(struct xen_blkif *blkif) { - struct pending_req *req, *n; - int i = 0, j; xen_blkif_disconnect(blkif); xen_vbd_free(&blkif->vbd); @@ -257,22 +275,6 @@ static void xen_blkif_free(struct xen_blkif *blkif) BUG_ON(!list_empty(&blkif->free_pages)); BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); - /* Check that there is no request in use */ - list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { - list_del(&req->free_list); - - for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) - kfree(req->segments[j]); - - for (j = 0; j < MAX_INDIRECT_PAGES; j++) - kfree(req->indirect_pages[j]); - - kfree(req); - i++; - } - - WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); - kmem_cache_free(xen_blkif_cachep, blkif); } diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 0823a96902f8..611170896b8c 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1142,6 +1142,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) RING_IDX i, rp; unsigned long flags; struct blkfront_info *info = (struct blkfront_info *)dev_id; + int error; spin_lock_irqsave(&info->io_lock, flags); @@ -1182,37 +1183,37 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) continue; } - req->errors = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; + error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; switch (bret->operation) { case BLKIF_OP_DISCARD: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { struct request_queue *rq = info->rq; printk(KERN_WARNING "blkfront: %s: %s op failed\n", info->gd->disk_name, op_name(bret->operation)); - req->errors = -EOPNOTSUPP; + error = -EOPNOTSUPP; info->feature_discard = 0; info->feature_secdiscard = 0; queue_flag_clear(QUEUE_FLAG_DISCARD, rq); queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq); } - blk_mq_complete_request(req); + blk_mq_complete_request(req, error); break; case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_WRITE_BARRIER: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { printk(KERN_WARNING "blkfront: %s: %s op failed\n", info->gd->disk_name, op_name(bret->operation)); - req->errors = -EOPNOTSUPP; + error = -EOPNOTSUPP; } if (unlikely(bret->status == BLKIF_RSP_ERROR && info->shadow[id].req.u.rw.nr_segments == 0)) { printk(KERN_WARNING "blkfront: %s: empty %s op failed\n", info->gd->disk_name, op_name(bret->operation)); - req->errors = -EOPNOTSUPP; + error = -EOPNOTSUPP; } - if (unlikely(req->errors)) { - if (req->errors == -EOPNOTSUPP) - req->errors = 0; + if (unlikely(error)) { + if (error == -EOPNOTSUPP) + error = 0; info->feature_flush = 0; xlvbd_flush(info); } @@ -1223,7 +1224,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " "request: %x\n", bret->status); - blk_mq_complete_request(req); + blk_mq_complete_request(req, error); break; default: BUG(); diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index c9c5dd0bad36..ec6af1595062 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -183,7 +183,7 @@ config BT_HCIBCM203X config BT_HCIBPA10X tristate "HCI BPA10x USB driver" - depends on USB + depends on USB && BT_HCIUART select BT_HCIUART_H4 help Bluetooth HCI BPA10x USB driver. diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index e527a3e13939..fa893c3ec408 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -93,6 +93,7 @@ static const struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x04CA, 0x300f) }, { USB_DEVICE(0x04CA, 0x3010) }, { USB_DEVICE(0x0930, 0x0219) }, + { USB_DEVICE(0x0930, 0x021c) }, { USB_DEVICE(0x0930, 0x0220) }, { USB_DEVICE(0x0930, 0x0227) }, { USB_DEVICE(0x0b05, 0x17d0) }, @@ -104,6 +105,7 @@ static const struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x0CF3, 0x311F) }, { USB_DEVICE(0x0cf3, 0x3121) }, { USB_DEVICE(0x0CF3, 0x817a) }, + { USB_DEVICE(0x0CF3, 0x817b) }, { USB_DEVICE(0x0cf3, 0xe003) }, { USB_DEVICE(0x0CF3, 0xE004) }, { USB_DEVICE(0x0CF3, 0xE005) }, @@ -153,6 +155,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = { { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, @@ -164,6 +167,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = { { USB_DEVICE(0x0cf3, 0x311F), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0CF3, 0x817b), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 }, diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index 2fc363a0393d..0b697946e9bc 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -323,7 +323,7 @@ int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len) } BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name, - hw_name ? : "BCM", (subver & 0x7000) >> 13, + hw_name ? : "BCM", (subver & 0xe000) >> 13, (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff); return 0; @@ -353,7 +353,7 @@ int btbcm_finalize(struct hci_dev *hdev) kfree_skb(skb); BT_INFO("%s: BCM (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name, - (subver & 0x7000) >> 13, (subver & 0x1f00) >> 8, + (subver & 0xe000) >> 13, (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff); btbcm_check_bdaddr(hdev); @@ -461,7 +461,7 @@ int btbcm_setup_patchram(struct hci_dev *hdev) } BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name, - hw_name ? : "BCM", (subver & 0x7000) >> 13, + hw_name ? : "BCM", (subver & 0xe000) >> 13, (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff); err = request_firmware(&fw, fw_name, &hdev->dev); @@ -490,7 +490,7 @@ int btbcm_setup_patchram(struct hci_dev *hdev) kfree_skb(skb); BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name, - hw_name ? : "BCM", (subver & 0x7000) >> 13, + hw_name ? : "BCM", (subver & 0xe000) >> 13, (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff); /* Read Local Name */ @@ -527,6 +527,15 @@ int btbcm_setup_apple(struct hci_dev *hdev) kfree_skb(skb); } + /* Read USB Product Info */ + skb = btbcm_read_usb_product(hdev); + if (!IS_ERR(skb)) { + BT_INFO("%s: BCM: product %4.4x:%4.4x", hdev->name, + get_unaligned_le16(skb->data + 1), + get_unaligned_le16(skb->data + 3)); + kfree_skb(skb); + } + /* Read Local Name */ skb = btbcm_read_local_name(hdev); if (!IS_ERR(skb)) { diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index 7047fe6a6a2b..1f13e617bf56 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -91,6 +91,75 @@ int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) } EXPORT_SYMBOL_GPL(btintel_set_bdaddr); +int btintel_set_diag(struct hci_dev *hdev, bool enable) +{ + struct sk_buff *skb; + u8 param[3]; + int err; + + if (enable) { + param[0] = 0x03; + param[1] = 0x03; + param[2] = 0x03; + } else { + param[0] = 0x00; + param[1] = 0x00; + param[2] = 0x00; + } + + skb = __hci_cmd_sync(hdev, 0xfc43, 3, param, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + if (err == -ENODATA) + goto done; + BT_ERR("%s: Changing Intel diagnostic mode failed (%d)", + hdev->name, err); + return err; + } + kfree_skb(skb); + +done: + btintel_set_event_mask(hdev, enable); + return 0; +} +EXPORT_SYMBOL_GPL(btintel_set_diag); + +int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable) +{ + struct sk_buff *skb; + u8 param[2]; + int err; + + param[0] = 0x01; + param[1] = 0x00; + + skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + BT_ERR("%s: Entering Intel manufacturer mode failed (%d)", + hdev->name, err); + return PTR_ERR(skb); + } + kfree_skb(skb); + + err = btintel_set_diag(hdev, enable); + + param[0] = 0x00; + param[1] = 0x00; + + skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + BT_ERR("%s: Leaving Intel manufacturer mode failed (%d)", + hdev->name, err); + return PTR_ERR(skb); + } + kfree_skb(skb); + + return err; +} +EXPORT_SYMBOL_GPL(btintel_set_diag_mfg); + void btintel_hw_error(struct hci_dev *hdev, u8 code) { struct sk_buff *skb; @@ -216,6 +285,64 @@ int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name) } EXPORT_SYMBOL_GPL(btintel_load_ddc_config); +int btintel_set_event_mask(struct hci_dev *hdev, bool debug) +{ + u8 mask[8] = { 0x87, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; + struct sk_buff *skb; + int err; + + if (debug) + mask[1] |= 0x62; + + skb = __hci_cmd_sync(hdev, 0xfc52, 8, mask, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + BT_ERR("%s: Setting Intel event mask failed (%d)", + hdev->name, err); + return err; + } + kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(btintel_set_event_mask); + +int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug) +{ + struct sk_buff *skb; + u8 param[2]; + int err; + + param[0] = 0x01; + param[1] = 0x00; + + skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + BT_ERR("%s: Entering Intel manufacturer mode failed (%d)", + hdev->name, err); + return PTR_ERR(skb); + } + kfree_skb(skb); + + err = btintel_set_event_mask(hdev, debug); + + param[0] = 0x00; + param[1] = 0x00; + + skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + BT_ERR("%s: Leaving Intel manufacturer mode failed (%d)", + hdev->name, err); + return PTR_ERR(skb); + } + kfree_skb(skb); + + return err; +} +EXPORT_SYMBOL_GPL(btintel_set_event_mask_mfg); + /* ------- REGMAP IBT SUPPORT ------- */ #define IBT_REG_MODE_8BIT 0x00 diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h index f0655c476fd2..07e58e05a7fa 100644 --- a/drivers/bluetooth/btintel.h +++ b/drivers/bluetooth/btintel.h @@ -73,12 +73,16 @@ struct intel_secure_send_result { int btintel_check_bdaddr(struct hci_dev *hdev); int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr); +int btintel_set_diag(struct hci_dev *hdev, bool enable); +int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable); void btintel_hw_error(struct hci_dev *hdev, u8 code); void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver); int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen, const void *param); int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name); +int btintel_set_event_mask(struct hci_dev *hdev, bool debug); +int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug); struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read, u16 opcode_write); @@ -95,6 +99,16 @@ static inline int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdadd return -EOPNOTSUPP; } +static inline int btintel_set_diag(struct hci_dev *hdev, bool enable) +{ + return -EOPNOTSUPP; +} + +static inline int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable) +{ + return -EOPNOTSUPP; +} + static inline void btintel_hw_error(struct hci_dev *hdev, u8 code) { } @@ -116,6 +130,16 @@ static inline int btintel_load_ddc_config(struct hci_dev *hdev, return -EOPNOTSUPP; } +static inline int btintel_set_event_mask(struct hci_dev *hdev, bool debug) +{ + return -EOPNOTSUPP; +} + +static inline int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug) +{ + return -EOPNOTSUPP; +} + static inline struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read, u16 opcode_write) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 247b1062cb9a..e33dacf5bd98 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -60,6 +60,8 @@ static struct usb_driver btusb_driver; #define BTUSB_QCA_ROME 0x8000 #define BTUSB_BCM_APPLE 0x10000 #define BTUSB_REALTEK 0x20000 +#define BTUSB_BCM2045 0x40000 +#define BTUSB_IFNUM_2 0x80000 static const struct usb_device_id btusb_table[] = { /* Generic Bluetooth USB device */ @@ -73,7 +75,7 @@ static const struct usb_device_id btusb_table[] = { /* Apple-specific (Broadcom) devices */ { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01), - .driver_info = BTUSB_BCM_APPLE }, + .driver_info = BTUSB_BCM_APPLE | BTUSB_IFNUM_2 }, /* MediaTek MT76x0E */ { USB_DEVICE(0x0e8d, 0x763f) }, @@ -124,6 +126,9 @@ static const struct usb_device_id btusb_table[] = { /* Broadcom BCM20702B0 (Dynex/Insignia) */ { USB_DEVICE(0x19ff, 0x0239), .driver_info = BTUSB_BCM_PATCHRAM }, + /* Broadcom BCM43142A0 (Foxconn/Lenovo) */ + { USB_DEVICE(0x105b, 0xe065), .driver_info = BTUSB_BCM_PATCHRAM }, + /* Foxconn - Hon Hai */ { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01), .driver_info = BTUSB_BCM_PATCHRAM }, @@ -164,6 +169,9 @@ static const struct usb_device_id blacklist_table[] = { /* Broadcom BCM2033 without firmware */ { USB_DEVICE(0x0a5c, 0x2033), .driver_info = BTUSB_IGNORE }, + /* Broadcom BCM2045 devices */ + { USB_DEVICE(0x0a5c, 0x2045), .driver_info = BTUSB_BCM2045 }, + /* Atheros 3011 with sflash firmware */ { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE }, @@ -195,6 +203,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, @@ -206,6 +215,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0cf3, 0x311f), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x817b), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, @@ -341,12 +351,14 @@ static const struct usb_device_id blacklist_table[] = { #define BTUSB_FIRMWARE_FAILED 8 #define BTUSB_BOOTING 9 #define BTUSB_RESET_RESUME 10 +#define BTUSB_DIAG_RUNNING 11 struct btusb_data { struct hci_dev *hdev; struct usb_device *udev; struct usb_interface *intf; struct usb_interface *isoc; + struct usb_interface *diag; unsigned long flags; @@ -361,6 +373,7 @@ struct btusb_data { struct usb_anchor intr_anchor; struct usb_anchor bulk_anchor; struct usb_anchor isoc_anchor; + struct usb_anchor diag_anchor; spinlock_t rxlock; struct sk_buff *evt_skb; @@ -372,6 +385,8 @@ struct btusb_data { struct usb_endpoint_descriptor *bulk_rx_ep; struct usb_endpoint_descriptor *isoc_tx_ep; struct usb_endpoint_descriptor *isoc_rx_ep; + struct usb_endpoint_descriptor *diag_tx_ep; + struct usb_endpoint_descriptor *diag_rx_ep; __u8 cmdreq_type; __u8 cmdreq; @@ -869,6 +884,92 @@ static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags) return err; } +static void btusb_diag_complete(struct urb *urb) +{ + struct hci_dev *hdev = urb->context; + struct btusb_data *data = hci_get_drvdata(hdev); + int err; + + BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, + urb->actual_length); + + if (urb->status == 0) { + struct sk_buff *skb; + + skb = bt_skb_alloc(urb->actual_length, GFP_ATOMIC); + if (skb) { + memcpy(skb_put(skb, urb->actual_length), + urb->transfer_buffer, urb->actual_length); + hci_recv_diag(hdev, skb); + } + } else if (urb->status == -ENOENT) { + /* Avoid suspend failed when usb_kill_urb */ + return; + } + + if (!test_bit(BTUSB_DIAG_RUNNING, &data->flags)) + return; + + usb_anchor_urb(urb, &data->diag_anchor); + usb_mark_last_busy(data->udev); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err < 0) { + /* -EPERM: urb is being killed; + * -ENODEV: device got disconnected */ + if (err != -EPERM && err != -ENODEV) + BT_ERR("%s urb %p failed to resubmit (%d)", + hdev->name, urb, -err); + usb_unanchor_urb(urb); + } +} + +static int btusb_submit_diag_urb(struct hci_dev *hdev, gfp_t mem_flags) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct urb *urb; + unsigned char *buf; + unsigned int pipe; + int err, size = HCI_MAX_FRAME_SIZE; + + BT_DBG("%s", hdev->name); + + if (!data->diag_rx_ep) + return -ENODEV; + + urb = usb_alloc_urb(0, mem_flags); + if (!urb) + return -ENOMEM; + + buf = kmalloc(size, mem_flags); + if (!buf) { + usb_free_urb(urb); + return -ENOMEM; + } + + pipe = usb_rcvbulkpipe(data->udev, data->diag_rx_ep->bEndpointAddress); + + usb_fill_bulk_urb(urb, data->udev, pipe, buf, size, + btusb_diag_complete, hdev); + + urb->transfer_flags |= URB_FREE_BUFFER; + + usb_mark_last_busy(data->udev); + usb_anchor_urb(urb, &data->diag_anchor); + + err = usb_submit_urb(urb, mem_flags); + if (err < 0) { + if (err != -EPERM && err != -ENODEV) + BT_ERR("%s urb %p submission failed (%d)", + hdev->name, urb, -err); + usb_unanchor_urb(urb); + } + + usb_free_urb(urb); + + return err; +} + static void btusb_tx_complete(struct urb *urb) { struct sk_buff *skb = urb->context; @@ -956,6 +1057,11 @@ static int btusb_open(struct hci_dev *hdev) set_bit(BTUSB_BULK_RUNNING, &data->flags); btusb_submit_bulk_urb(hdev, GFP_KERNEL); + if (data->diag) { + if (!btusb_submit_diag_urb(hdev, GFP_KERNEL)) + set_bit(BTUSB_DIAG_RUNNING, &data->flags); + } + done: usb_autopm_put_interface(data->intf); return 0; @@ -971,6 +1077,7 @@ static void btusb_stop_traffic(struct btusb_data *data) usb_kill_anchored_urbs(&data->intr_anchor); usb_kill_anchored_urbs(&data->bulk_anchor); usb_kill_anchored_urbs(&data->isoc_anchor); + usb_kill_anchored_urbs(&data->diag_anchor); } static int btusb_close(struct hci_dev *hdev) @@ -986,6 +1093,7 @@ static int btusb_close(struct hci_dev *hdev) clear_bit(BTUSB_ISOC_RUNNING, &data->flags); clear_bit(BTUSB_BULK_RUNNING, &data->flags); clear_bit(BTUSB_INTR_RUNNING, &data->flags); + clear_bit(BTUSB_DIAG_RUNNING, &data->flags); btusb_stop_traffic(data); btusb_free_frags(data); @@ -1593,8 +1701,7 @@ static int btusb_setup_intel(struct hci_dev *hdev) BT_INFO("%s: Intel device is already patched. patch num: %02x", hdev->name, ver->fw_patch_num); kfree_skb(skb); - btintel_check_bdaddr(hdev); - return 0; + goto complete; } /* Opens the firmware patch file based on the firmware version read @@ -1606,8 +1713,7 @@ static int btusb_setup_intel(struct hci_dev *hdev) fw = btusb_setup_intel_get_fw(hdev, ver); if (!fw) { kfree_skb(skb); - btintel_check_bdaddr(hdev); - return 0; + goto complete; } fw_ptr = fw->data; @@ -1680,8 +1786,7 @@ static int btusb_setup_intel(struct hci_dev *hdev) BT_INFO("%s: Intel Bluetooth firmware patch completed and activated", hdev->name); - btintel_check_bdaddr(hdev); - return 0; + goto complete; exit_mfg_disable: /* Disable the manufacturer mode without reset */ @@ -1696,8 +1801,7 @@ exit_mfg_disable: BT_INFO("%s: Intel Bluetooth firmware patch completed", hdev->name); - btintel_check_bdaddr(hdev); - return 0; + goto complete; exit_mfg_deactivate: release_firmware(fw); @@ -1717,6 +1821,12 @@ exit_mfg_deactivate: BT_INFO("%s: Intel Bluetooth firmware patch completed and deactivated", hdev->name); +complete: + /* Set the event mask for Intel specific vendor events. This enables + * a few extra events that are useful during general operation. + */ + btintel_set_event_mask_mfg(hdev, false); + btintel_check_bdaddr(hdev); return 0; } @@ -2006,6 +2116,15 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) BT_INFO("%s: Secure boot is %s", hdev->name, params->secure_boot ? "enabled" : "disabled"); + BT_INFO("%s: OTP lock is %s", hdev->name, + params->otp_lock ? "enabled" : "disabled"); + + BT_INFO("%s: API lock is %s", hdev->name, + params->api_lock ? "enabled" : "disabled"); + + BT_INFO("%s: Debug lock is %s", hdev->name, + params->debug_lock ? "enabled" : "disabled"); + BT_INFO("%s: Minimum firmware build %u week %u %u", hdev->name, params->min_fw_build_nn, params->min_fw_build_cw, 2000 + params->min_fw_build_yy); @@ -2222,6 +2341,15 @@ done: */ btintel_load_ddc_config(hdev, fwname); + /* Set the event mask for Intel specific vendor events. This enables + * a few extra events that are useful during general operation. It + * does not enable any debugging related events. + * + * The device will function correctly without these events enabled + * and thus no need to fail the setup. + */ + btintel_set_event_mask(hdev, false); + return 0; } @@ -2547,19 +2675,115 @@ static int btusb_setup_qca(struct hci_dev *hdev) return 0; } +#ifdef CONFIG_BT_HCIBTUSB_BCM +static inline int __set_diag_interface(struct hci_dev *hdev) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct usb_interface *intf = data->diag; + int i; + + if (!data->diag) + return -ENODEV; + + data->diag_tx_ep = NULL; + data->diag_rx_ep = NULL; + + for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { + struct usb_endpoint_descriptor *ep_desc; + + ep_desc = &intf->cur_altsetting->endpoint[i].desc; + + if (!data->diag_tx_ep && usb_endpoint_is_bulk_out(ep_desc)) { + data->diag_tx_ep = ep_desc; + continue; + } + + if (!data->diag_rx_ep && usb_endpoint_is_bulk_in(ep_desc)) { + data->diag_rx_ep = ep_desc; + continue; + } + } + + if (!data->diag_tx_ep || !data->diag_rx_ep) { + BT_ERR("%s invalid diagnostic descriptors", hdev->name); + return -ENODEV; + } + + return 0; +} + +static struct urb *alloc_diag_urb(struct hci_dev *hdev, bool enable) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct sk_buff *skb; + struct urb *urb; + unsigned int pipe; + + if (!data->diag_tx_ep) + return ERR_PTR(-ENODEV); + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) + return ERR_PTR(-ENOMEM); + + skb = bt_skb_alloc(2, GFP_KERNEL); + if (!skb) { + usb_free_urb(urb); + return ERR_PTR(-ENOMEM); + } + + *skb_put(skb, 1) = 0xf0; + *skb_put(skb, 1) = enable; + + pipe = usb_sndbulkpipe(data->udev, data->diag_tx_ep->bEndpointAddress); + + usb_fill_bulk_urb(urb, data->udev, pipe, + skb->data, skb->len, btusb_tx_complete, skb); + + skb->dev = (void *)hdev; + + return urb; +} + +static int btusb_bcm_set_diag(struct hci_dev *hdev, bool enable) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct urb *urb; + + if (!data->diag) + return -ENODEV; + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return -ENETDOWN; + + urb = alloc_diag_urb(hdev, enable); + if (IS_ERR(urb)) + return PTR_ERR(urb); + + return submit_or_queue_tx_urb(hdev, urb); +} +#endif + static int btusb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_endpoint_descriptor *ep_desc; struct btusb_data *data; struct hci_dev *hdev; + unsigned ifnum_base; int i, err; BT_DBG("intf %p id %p", intf, id); /* interface numbers are hardcoded in the spec */ - if (intf->cur_altsetting->desc.bInterfaceNumber != 0) - return -ENODEV; + if (intf->cur_altsetting->desc.bInterfaceNumber != 0) { + if (!(id->driver_info & BTUSB_IFNUM_2)) + return -ENODEV; + if (intf->cur_altsetting->desc.bInterfaceNumber != 2) + return -ENODEV; + } + + ifnum_base = intf->cur_altsetting->desc.bInterfaceNumber; if (!id->driver_info) { const struct usb_device_id *match; @@ -2627,6 +2851,7 @@ static int btusb_probe(struct usb_interface *intf, init_usb_anchor(&data->intr_anchor); init_usb_anchor(&data->bulk_anchor); init_usb_anchor(&data->isoc_anchor); + init_usb_anchor(&data->diag_anchor); spin_lock_init(&data->rxlock); if (id->driver_info & BTUSB_INTEL_NEW) { @@ -2660,33 +2885,53 @@ static int btusb_probe(struct usb_interface *intf, hdev->send = btusb_send_frame; hdev->notify = btusb_notify; + if (id->driver_info & BTUSB_BCM2045) + set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks); + if (id->driver_info & BTUSB_BCM92035) hdev->setup = btusb_setup_bcm92035; #ifdef CONFIG_BT_HCIBTUSB_BCM if (id->driver_info & BTUSB_BCM_PATCHRAM) { + hdev->manufacturer = 15; hdev->setup = btbcm_setup_patchram; + hdev->set_diag = btusb_bcm_set_diag; hdev->set_bdaddr = btbcm_set_bdaddr; + + /* Broadcom LM_DIAG Interface numbers are hardcoded */ + data->diag = usb_ifnum_to_if(data->udev, ifnum_base + 2); } - if (id->driver_info & BTUSB_BCM_APPLE) + if (id->driver_info & BTUSB_BCM_APPLE) { + hdev->manufacturer = 15; hdev->setup = btbcm_setup_apple; + hdev->set_diag = btusb_bcm_set_diag; + + /* Broadcom LM_DIAG Interface numbers are hardcoded */ + data->diag = usb_ifnum_to_if(data->udev, ifnum_base + 2); + } #endif if (id->driver_info & BTUSB_INTEL) { + hdev->manufacturer = 2; hdev->setup = btusb_setup_intel; hdev->shutdown = btusb_shutdown_intel; + hdev->set_diag = btintel_set_diag_mfg; hdev->set_bdaddr = btintel_set_bdaddr; set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); + set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks); } if (id->driver_info & BTUSB_INTEL_NEW) { + hdev->manufacturer = 2; hdev->send = btusb_send_frame_intel; hdev->setup = btusb_setup_intel_new; hdev->hw_error = btintel_hw_error; + hdev->set_diag = btintel_set_diag; hdev->set_bdaddr = btintel_set_bdaddr; set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); + set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks); } if (id->driver_info & BTUSB_MARVELL) @@ -2697,8 +2942,10 @@ static int btusb_probe(struct usb_interface *intf, set_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks); } - if (id->driver_info & BTUSB_INTEL_BOOT) + if (id->driver_info & BTUSB_INTEL_BOOT) { + hdev->manufacturer = 2; set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); + } if (id->driver_info & BTUSB_ATH3012) { hdev->set_bdaddr = btusb_set_bdaddr_ath3012; @@ -2727,8 +2974,8 @@ static int btusb_probe(struct usb_interface *intf, /* AMP controllers do not support SCO packets */ data->isoc = NULL; } else { - /* Interface numbers are hardcoded in the specification */ - data->isoc = usb_ifnum_to_if(data->udev, 1); + /* Interface orders are hardcoded in the specification */ + data->isoc = usb_ifnum_to_if(data->udev, ifnum_base + 1); } if (!reset) @@ -2791,6 +3038,16 @@ static int btusb_probe(struct usb_interface *intf, } } +#ifdef CONFIG_BT_HCIBTUSB_BCM + if (data->diag) { + if (!usb_driver_claim_interface(&btusb_driver, + data->diag, data)) + __set_diag_interface(hdev); + else + data->diag = NULL; + } +#endif + err = hci_register_dev(hdev); if (err < 0) { hci_free_dev(hdev); @@ -2818,12 +3075,25 @@ static void btusb_disconnect(struct usb_interface *intf) if (data->isoc) usb_set_intfdata(data->isoc, NULL); + if (data->diag) + usb_set_intfdata(data->diag, NULL); + hci_unregister_dev(hdev); - if (intf == data->isoc) + if (intf == data->intf) { + if (data->isoc) + usb_driver_release_interface(&btusb_driver, data->isoc); + if (data->diag) + usb_driver_release_interface(&btusb_driver, data->diag); + } else if (intf == data->isoc) { + if (data->diag) + usb_driver_release_interface(&btusb_driver, data->diag); usb_driver_release_interface(&btusb_driver, data->intf); - else if (data->isoc) - usb_driver_release_interface(&btusb_driver, data->isoc); + } else if (intf == data->diag) { + usb_driver_release_interface(&btusb_driver, data->intf); + if (data->isoc) + usb_driver_release_interface(&btusb_driver, data->isoc); + } hci_free_dev(hdev); } diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c index 6da5e4ca13ea..d776dfd51478 100644 --- a/drivers/bluetooth/hci_ath.c +++ b/drivers/bluetooth/hci_ath.c @@ -243,6 +243,7 @@ static struct sk_buff *ath_dequeue(struct hci_uart *hu) static const struct hci_uart_proto athp = { .id = HCI_UART_ATH3K, .name = "ATH3K", + .manufacturer = 69, .open = ath_open, .close = ath_close, .flush = ath_flush, diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 645e66e9a945..cb852cc750b7 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -259,8 +259,8 @@ static int bcm_set_diag(struct hci_dev *hdev, bool enable) return -ENETDOWN; skb = bt_skb_alloc(3, GFP_KERNEL); - if (IS_ERR(skb)) - return PTR_ERR(skb); + if (!skb) + return -ENOMEM; *skb_put(skb, 1) = BCM_LM_DIAG_PKT; *skb_put(skb, 1) = 0xf0; @@ -799,6 +799,7 @@ static int bcm_remove(struct platform_device *pdev) static const struct hci_uart_proto bcm_proto = { .id = HCI_UART_BCM, .name = "BCM", + .manufacturer = 15, .init_speed = 115200, .oper_speed = 4000000, .open = bcm_open, diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c index 2952107e3bae..4a414a5a3165 100644 --- a/drivers/bluetooth/hci_intel.c +++ b/drivers/bluetooth/hci_intel.c @@ -557,6 +557,7 @@ static int intel_setup(struct hci_uart *hu) bt_dev_dbg(hdev, "start intel_setup"); + hu->hdev->set_diag = btintel_set_diag; hu->hdev->set_bdaddr = btintel_set_bdaddr; calltime = ktime_get(); @@ -1147,6 +1148,7 @@ static struct sk_buff *intel_dequeue(struct hci_uart *hu) static const struct hci_uart_proto intel_proto = { .id = HCI_UART_INTEL, .name = "Intel", + .manufacturer = 2, .init_speed = 115200, .oper_speed = 3000000, .open = intel_open, diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 01a83a3f8a1d..96bcec5598c2 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -587,6 +587,13 @@ static int hci_uart_register_dev(struct hci_uart *hu) hdev->bus = HCI_UART; hci_set_drvdata(hdev, hu); + /* Only when vendor specific setup callback is provided, consider + * the manufacturer information valid. This avoids filling in the + * value for Ericsson when nothing is specified. + */ + if (hu->proto->setup) + hdev->manufacturer = hu->proto->manufacturer; + hdev->open = hci_uart_open; hdev->close = hci_uart_close; hdev->flush = hci_uart_flush; diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index b4a0393b2862..77eae64000b3 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -947,6 +947,7 @@ static int qca_setup(struct hci_uart *hu) static struct hci_uart_proto qca_proto = { .id = HCI_UART_QCA, .name = "QCA", + .manufacturer = 29, .init_speed = 115200, .oper_speed = 3000000, .open = qca_open, diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index 2f7bb35a890e..82c92f1b65b4 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h @@ -59,6 +59,7 @@ struct hci_uart; struct hci_uart_proto { unsigned int id; const char *name; + unsigned int manufacturer; unsigned int init_speed; unsigned int oper_speed; int (*open)(struct hci_uart *hu); diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 1a82f3a17681..0ebca8ba7bc4 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -36,7 +36,6 @@ config ARM_CCI400_PORT_CTRL config ARM_CCI500_PMU bool "ARM CCI500 PMU support" - default y depends on (ARM && CPU_V7) || ARM64 depends on PERF_EVENTS select ARM_CCI_PMU diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c index 5837eb8a212f..85da8b983256 100644 --- a/drivers/clk/mvebu/clk-cpu.c +++ b/drivers/clk/mvebu/clk-cpu.c @@ -197,6 +197,7 @@ static void __init of_cpu_clk_setup(struct device_node *node) for_each_node_by_type(dn, "cpu") { struct clk_init_data init; struct clk *clk; + struct clk *parent_clk; char *clk_name = kzalloc(5, GFP_KERNEL); int cpu, err; @@ -208,8 +209,9 @@ static void __init of_cpu_clk_setup(struct device_node *node) goto bail_out; sprintf(clk_name, "cpu%d", cpu); + parent_clk = of_clk_get(node, 0); - cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0); + cpuclk[cpu].parent_name = __clk_get_name(parent_clk); cpuclk[cpu].clk_name = clk_name; cpuclk[cpu].cpu = cpu; cpuclk[cpu].reg_base = clock_complex_base; diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c index 7c1e1f58e2da..2fe37f708dc7 100644 --- a/drivers/clk/samsung/clk-cpu.c +++ b/drivers/clk/samsung/clk-cpu.c @@ -164,7 +164,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata, * the values for DIV_COPY and DIV_HPM dividers need not be set. */ div0 = cfg_data->div0; - if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) { + if (cpuclk->flags & CLK_CPU_HAS_DIV1) { div1 = cfg_data->div1; if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK) div1 = readl(base + E4210_DIV_CPU1) & @@ -185,7 +185,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata, alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1; WARN_ON(alt_div >= MAX_DIV); - if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) { + if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) { /* * In Exynos4210, ATB clock parent is also mout_core. So * ATB clock also needs to be mantained at safe speed. @@ -206,7 +206,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata, writel(div0, base + E4210_DIV_CPU0); wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL); - if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) { + if (cpuclk->flags & CLK_CPU_HAS_DIV1) { writel(div1, base + E4210_DIV_CPU1); wait_until_divider_stable(base + E4210_DIV_STAT_CPU1, DIV_MASK_ALL); @@ -225,7 +225,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata, unsigned long mux_reg; /* find out the divider values to use for clock data */ - if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) { + if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) { while ((cfg_data->prate * 1000) != ndata->new_rate) { if (cfg_data->prate == 0) return -EINVAL; @@ -240,7 +240,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata, writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU); wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1); - if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) { + if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) { div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK); div_mask |= E4210_DIV0_ATB_MASK; } diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c index 676ee8f6d813..8831e1a05367 100644 --- a/drivers/clk/ti/clk-3xxx.c +++ b/drivers/clk/ti/clk-3xxx.c @@ -374,7 +374,6 @@ static struct ti_dt_clk omap3xxx_clks[] = { DT_CLK(NULL, "gpio2_ick", "gpio2_ick"), DT_CLK(NULL, "wdt3_ick", "wdt3_ick"), DT_CLK(NULL, "uart3_ick", "uart3_ick"), - DT_CLK(NULL, "uart4_ick", "uart4_ick"), DT_CLK(NULL, "gpt9_ick", "gpt9_ick"), DT_CLK(NULL, "gpt8_ick", "gpt8_ick"), DT_CLK(NULL, "gpt7_ick", "gpt7_ick"), @@ -519,6 +518,7 @@ static struct ti_dt_clk am35xx_clks[] = { static struct ti_dt_clk omap36xx_clks[] = { DT_CLK(NULL, "omap_192m_alwon_fck", "omap_192m_alwon_fck"), DT_CLK(NULL, "uart4_fck", "uart4_fck"), + DT_CLK(NULL, "uart4_ick", "uart4_ick"), { .node_name = NULL }, }; diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c index 9b5b289e6334..a911d7de3377 100644 --- a/drivers/clk/ti/clk-7xx.c +++ b/drivers/clk/ti/clk-7xx.c @@ -18,7 +18,6 @@ #include "clock.h" -#define DRA7_DPLL_ABE_DEFFREQ 180633600 #define DRA7_DPLL_GMAC_DEFFREQ 1000000000 #define DRA7_DPLL_USB_DEFFREQ 960000000 @@ -313,27 +312,12 @@ static struct ti_dt_clk dra7xx_clks[] = { int __init dra7xx_dt_clk_init(void) { int rc; - struct clk *abe_dpll_mux, *sys_clkin2, *dpll_ck, *hdcp_ck; + struct clk *dpll_ck, *hdcp_ck; ti_dt_clocks_register(dra7xx_clks); omap2_clk_disable_autoidle_all(); - abe_dpll_mux = clk_get_sys(NULL, "abe_dpll_sys_clk_mux"); - sys_clkin2 = clk_get_sys(NULL, "sys_clkin2"); - dpll_ck = clk_get_sys(NULL, "dpll_abe_ck"); - - rc = clk_set_parent(abe_dpll_mux, sys_clkin2); - if (!rc) - rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ); - if (rc) - pr_err("%s: failed to configure ABE DPLL!\n", __func__); - - dpll_ck = clk_get_sys(NULL, "dpll_abe_m2x2_ck"); - rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ * 2); - if (rc) - pr_err("%s: failed to configure ABE DPLL m2x2!\n", __func__); - dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck"); rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ); if (rc) diff --git a/drivers/clk/ti/clkt_dflt.c b/drivers/clk/ti/clkt_dflt.c index 90d7d8a21c49..1ddc288fce4e 100644 --- a/drivers/clk/ti/clkt_dflt.c +++ b/drivers/clk/ti/clkt_dflt.c @@ -222,7 +222,7 @@ int omap2_dflt_clk_enable(struct clk_hw *hw) } } - if (unlikely(!clk->enable_reg)) { + if (unlikely(IS_ERR(clk->enable_reg))) { pr_err("%s: %s missing enable_reg\n", __func__, clk_hw_get_name(hw)); ret = -EINVAL; @@ -264,7 +264,7 @@ void omap2_dflt_clk_disable(struct clk_hw *hw) u32 v; clk = to_clk_hw_omap(hw); - if (!clk->enable_reg) { + if (IS_ERR(clk->enable_reg)) { /* * 'independent' here refers to a clock which is not * controlled by its parent. diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c index bb2c2b050964..d3c1742ded1a 100644 --- a/drivers/clocksource/rockchip_timer.c +++ b/drivers/clocksource/rockchip_timer.c @@ -148,7 +148,7 @@ static void __init rk_timer_init(struct device_node *np) bc_timer.freq = clk_get_rate(timer_clk); irq = irq_of_parse_and_map(np, 0); - if (irq == NO_IRQ) { + if (!irq) { pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME); return; } diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c index edacf3902e10..1cea08cf603e 100644 --- a/drivers/clocksource/timer-keystone.c +++ b/drivers/clocksource/timer-keystone.c @@ -152,7 +152,7 @@ static void __init keystone_timer_init(struct device_node *np) int irq, error; irq = irq_of_parse_and_map(np, 0); - if (irq == NO_IRQ) { + if (!irq) { pr_err("%s: failed to map interrupts\n", __func__); return; } diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 798277227de7..cec1ee2d2f74 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -149,6 +149,9 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) { struct acpi_cpufreq_data *data = policy->driver_data; + if (unlikely(!data)) + return -ENODEV; + return cpufreq_show_cpus(data->freqdomain_cpus, buf); } diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index ef5ed9470de9..25c4c15103a0 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1436,8 +1436,10 @@ static void cpufreq_offline_finish(unsigned int cpu) * since this is a core component, and is essential for the * subsequent light-weight ->init() to succeed. */ - if (cpufreq_driver->exit) + if (cpufreq_driver->exit) { cpufreq_driver->exit(policy); + policy->freq_table = NULL; + } } /** diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 3af9dd7332e6..aa33b92b3e3e 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -776,6 +776,11 @@ static inline void intel_pstate_sample(struct cpudata *cpu) local_irq_save(flags); rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); + if (cpu->prev_mperf == mperf) { + local_irq_restore(flags); + return; + } + tsc = rdtsc(); local_irq_restore(flags); diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 3927ed9fdbd5..ca848cc6a8fd 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -492,7 +492,7 @@ struct devfreq *devfreq_add_device(struct device *dev, if (err) { put_device(&devfreq->dev); mutex_unlock(&devfreq->lock); - goto err_dev; + goto err_out; } mutex_unlock(&devfreq->lock); @@ -518,7 +518,6 @@ struct devfreq *devfreq_add_device(struct device *dev, err_init: list_del(&devfreq->node); device_unregister(&devfreq->dev); -err_dev: kfree(devfreq); err_out: return ERR_PTR(err); @@ -795,8 +794,10 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr, ret = PTR_ERR(governor); goto out; } - if (df->governor == governor) + if (df->governor == governor) { + ret = 0; goto out; + } if (df->governor) { ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL); diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index a165b4bfd330..dd24375b76dd 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -455,6 +455,15 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan, return desc; } +void at_xdmac_init_used_desc(struct at_xdmac_desc *desc) +{ + memset(&desc->lld, 0, sizeof(desc->lld)); + INIT_LIST_HEAD(&desc->descs_list); + desc->direction = DMA_TRANS_NONE; + desc->xfer_size = 0; + desc->active_xfer = false; +} + /* Call must be protected by lock. */ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) { @@ -466,7 +475,7 @@ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) desc = list_first_entry(&atchan->free_descs_list, struct at_xdmac_desc, desc_node); list_del(&desc->desc_node); - desc->active_xfer = false; + at_xdmac_init_used_desc(desc); } return desc; @@ -875,14 +884,14 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan, if (xt->src_inc) { if (xt->src_sgl) - chan_cc |= AT_XDMAC_CC_SAM_UBS_DS_AM; + chan_cc |= AT_XDMAC_CC_SAM_UBS_AM; else chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM; } if (xt->dst_inc) { if (xt->dst_sgl) - chan_cc |= AT_XDMAC_CC_DAM_UBS_DS_AM; + chan_cc |= AT_XDMAC_CC_DAM_UBS_AM; else chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM; } diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 3ff284c8e3d5..09479d4be4db 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -554,10 +554,18 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) mutex_lock(&dma_list_mutex); if (chan->client_count == 0) { + struct dma_device *device = chan->device; + + dma_cap_set(DMA_PRIVATE, device->cap_mask); + device->privatecnt++; err = dma_chan_get(chan); - if (err) + if (err) { pr_debug("%s: failed to get %s: (%d)\n", __func__, dma_chan_name(chan), err); + chan = NULL; + if (--device->privatecnt == 0) + dma_cap_clear(DMA_PRIVATE, device->cap_mask); + } } else chan = NULL; diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index cf1c87fa1edd..bedce038c6e2 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -1591,7 +1591,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) INIT_LIST_HEAD(&dw->dma.channels); for (i = 0; i < nr_channels; i++) { struct dw_dma_chan *dwc = &dw->chan[i]; - int r = nr_channels - i - 1; dwc->chan.device = &dw->dma; dma_cookie_init(&dwc->chan); @@ -1603,7 +1602,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) /* 7 is highest priority & 0 is lowest. */ if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) - dwc->priority = r; + dwc->priority = nr_channels - i - 1; else dwc->priority = i; @@ -1622,6 +1621,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) /* Hardware configuration */ if (autocfg) { unsigned int dwc_params; + unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; void __iomem *addr = chip->regs + r * sizeof(u32); dwc_params = dma_read_byaddr(addr, DWC_PARAMS); diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index 18c14e1f1414..48d6d9e94f67 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c @@ -355,23 +355,23 @@ static size_t idma64_active_desc_size(struct idma64_chan *idma64c) struct idma64_desc *desc = idma64c->desc; struct idma64_hw_desc *hw; size_t bytes = desc->length; - u64 llp; - u32 ctlhi; + u64 llp = channel_readq(idma64c, LLP); + u32 ctlhi = channel_readl(idma64c, CTL_HI); unsigned int i = 0; - llp = channel_readq(idma64c, LLP); do { hw = &desc->hw[i]; - } while ((hw->llp != llp) && (++i < desc->ndesc)); + if (hw->llp == llp) + break; + bytes -= hw->len; + } while (++i < desc->ndesc); if (!i) return bytes; - do { - bytes -= desc->hw[--i].len; - } while (i); + /* The current chunk is not fully transfered yet */ + bytes += desc->hw[--i].len; - ctlhi = channel_readl(idma64c, CTL_HI); return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi); } diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 5cb61ce01036..fc4156afa070 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -473,8 +473,10 @@ static void pxad_free_phy(struct pxad_chan *chan) return; /* clear the channel mapping in DRCMR */ - reg = pxad_drcmr(chan->drcmr); - writel_relaxed(0, chan->phy->base + reg); + if (chan->drcmr <= DRCMR_CHLNUM) { + reg = pxad_drcmr(chan->drcmr); + writel_relaxed(0, chan->phy->base + reg); + } spin_lock_irqsave(&pdev->phy_lock, flags); for (i = 0; i < 32; i++) @@ -516,8 +518,10 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned) "%s(); phy=%p(%d) misaligned=%d\n", __func__, phy, phy->idx, misaligned); - reg = pxad_drcmr(phy->vchan->drcmr); - writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); + if (phy->vchan->drcmr <= DRCMR_CHLNUM) { + reg = pxad_drcmr(phy->vchan->drcmr); + writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); + } dalgn = phy_readl_relaxed(phy, DALGN); if (misaligned) @@ -887,6 +891,7 @@ pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd, struct dma_async_tx_descriptor *tx; struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc); + INIT_LIST_HEAD(&vd->node); tx = vchan_tx_prep(vc, vd, tx_flags); tx->tx_submit = pxad_tx_submit; dev_dbg(&chan->vc.chan.dev->device, @@ -910,14 +915,18 @@ static void pxad_get_config(struct pxad_chan *chan, width = chan->cfg.src_addr_width; dev_addr = chan->cfg.src_addr; *dev_src = dev_addr; - *dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC; + *dcmd |= PXA_DCMD_INCTRGADDR; + if (chan->drcmr <= DRCMR_CHLNUM) + *dcmd |= PXA_DCMD_FLOWSRC; } if (dir == DMA_MEM_TO_DEV) { maxburst = chan->cfg.dst_maxburst; width = chan->cfg.dst_addr_width; dev_addr = chan->cfg.dst_addr; *dev_dst = dev_addr; - *dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG; + *dcmd |= PXA_DCMD_INCSRCADDR; + if (chan->drcmr <= DRCMR_CHLNUM) + *dcmd |= PXA_DCMD_FLOWTRG; } if (dir == DMA_MEM_TO_MEM) *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR | @@ -1177,6 +1186,16 @@ static unsigned int pxad_residue(struct pxad_chan *chan, else curr = phy_readl_relaxed(chan->phy, DTADR); + /* + * curr has to be actually read before checking descriptor + * completion, so that a curr inside a status updater + * descriptor implies the following test returns true, and + * preventing reordering of curr load and the test. + */ + rmb(); + if (is_desc_completed(vd)) + goto out; + for (i = 0; i < sw_desc->nb_desc - 1; i++) { hw_desc = sw_desc->hw_desc[i]; if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR) diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c index a1a500d96ff2..1661d518224a 100644 --- a/drivers/dma/sun4i-dma.c +++ b/drivers/dma/sun4i-dma.c @@ -599,13 +599,13 @@ get_next_cyclic_promise(struct sun4i_dma_contract *contract) static void sun4i_dma_free_contract(struct virt_dma_desc *vd) { struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd); - struct sun4i_dma_promise *promise; + struct sun4i_dma_promise *promise, *tmp; /* Free all the demands and completed demands */ - list_for_each_entry(promise, &contract->demands, list) + list_for_each_entry_safe(promise, tmp, &contract->demands, list) kfree(promise); - list_for_each_entry(promise, &contract->completed_demands, list) + list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list) kfree(promise); kfree(contract); diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index b23e8d52d126..8d57b1b12e41 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -59,7 +59,6 @@ #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070 #define XGENE_DMA_RING_BLK_MEM_RDY 0xD074 #define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF -#define XGENE_DMA_RING_DESC_CNT(v) (((v) & 0x0001FFFE) >> 1) #define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num)) #define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v)) #define XGENE_DMA_RING_CMD_OFFSET 0x2C @@ -379,14 +378,6 @@ static u8 xgene_dma_encode_xor_flyby(u32 src_cnt) return flyby_type[src_cnt]; } -static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring) -{ - u32 __iomem *cmd_base = ring->cmd_base; - u32 ring_state = ioread32(&cmd_base[1]); - - return XGENE_DMA_RING_DESC_CNT(ring_state); -} - static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len, dma_addr_t *paddr) { @@ -659,15 +650,12 @@ static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan, dma_pool_free(chan->desc_pool, desc, desc->tx.phys); } -static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, - struct xgene_dma_desc_sw *desc_sw) +static void xgene_chan_xfer_request(struct xgene_dma_chan *chan, + struct xgene_dma_desc_sw *desc_sw) { + struct xgene_dma_ring *ring = &chan->tx_ring; struct xgene_dma_desc_hw *desc_hw; - /* Check if can push more descriptor to hw for execution */ - if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2)) - return -EBUSY; - /* Get hw descriptor from DMA tx ring */ desc_hw = &ring->desc_hw[ring->head]; @@ -694,11 +682,13 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw)); } + /* Increment the pending transaction count */ + chan->pending += ((desc_sw->flags & + XGENE_DMA_FLAG_64B_DESC) ? 2 : 1); + /* Notify the hw that we have descriptor ready for execution */ iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ? 2 : 1, ring->cmd); - - return 0; } /** @@ -710,7 +700,6 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) { struct xgene_dma_desc_sw *desc_sw, *_desc_sw; - int ret; /* * If the list of pending descriptors is empty, then we @@ -735,18 +724,13 @@ static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) if (chan->pending >= chan->max_outstanding) return; - ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw); - if (ret) - return; + xgene_chan_xfer_request(chan, desc_sw); /* * Delete this element from ld pending queue and append it to * ld running queue */ list_move_tail(&desc_sw->node, &chan->ld_running); - - /* Increment the pending transaction count */ - chan->pending++; } } @@ -821,7 +805,8 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) * Decrement the pending transaction count * as we have processed one */ - chan->pending--; + chan->pending -= ((desc_sw->flags & + XGENE_DMA_FLAG_64B_DESC) ? 2 : 1); /* * Delete this node from ld running queue and append it to @@ -1421,15 +1406,18 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan, struct xgene_dma_ring *ring, enum xgene_dma_ring_cfgsize cfgsize) { + int ret; + /* Setup DMA ring descriptor variables */ ring->pdma = chan->pdma; ring->cfgsize = cfgsize; ring->num = chan->pdma->ring_num++; ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num); - ring->size = xgene_dma_get_ring_size(chan, cfgsize); - if (ring->size <= 0) - return ring->size; + ret = xgene_dma_get_ring_size(chan, cfgsize); + if (ret <= 0) + return ret; + ring->size = ret; /* Allocate memory for DMA ring descriptor */ ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, @@ -1482,7 +1470,7 @@ static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan) tx_ring->id, tx_ring->num, tx_ring->desc_vaddr); /* Set the max outstanding request possible to this channel */ - chan->max_outstanding = rx_ring->slots; + chan->max_outstanding = tx_ring->slots; return ret; } diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c index 39915a6b7986..c017fcd8e07c 100644 --- a/drivers/dma/zx296702_dma.c +++ b/drivers/dma/zx296702_dma.c @@ -739,7 +739,7 @@ static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec, struct dma_chan *chan; struct zx_dma_chan *c; - if (request > d->dma_requests) + if (request >= d->dma_requests) return NULL; chan = dma_get_any_slave_channel(&d->slave); diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index e29560e6b40b..950c87f5d279 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c @@ -13,6 +13,7 @@ */ #include <linux/efi.h> +#include <linux/sort.h> #include <asm/efi.h> #include "efistub.h" @@ -305,6 +306,44 @@ fail: */ #define EFI_RT_VIRTUAL_BASE 0x40000000 +static int cmp_mem_desc(const void *l, const void *r) +{ + const efi_memory_desc_t *left = l, *right = r; + + return (left->phys_addr > right->phys_addr) ? 1 : -1; +} + +/* + * Returns whether region @left ends exactly where region @right starts, + * or false if either argument is NULL. + */ +static bool regions_are_adjacent(efi_memory_desc_t *left, + efi_memory_desc_t *right) +{ + u64 left_end; + + if (left == NULL || right == NULL) + return false; + + left_end = left->phys_addr + left->num_pages * EFI_PAGE_SIZE; + + return left_end == right->phys_addr; +} + +/* + * Returns whether region @left and region @right have compatible memory type + * mapping attributes, and are both EFI_MEMORY_RUNTIME regions. + */ +static bool regions_have_compatible_memory_type_attrs(efi_memory_desc_t *left, + efi_memory_desc_t *right) +{ + static const u64 mem_type_mask = EFI_MEMORY_WB | EFI_MEMORY_WT | + EFI_MEMORY_WC | EFI_MEMORY_UC | + EFI_MEMORY_RUNTIME; + + return ((left->attribute ^ right->attribute) & mem_type_mask) == 0; +} + /* * efi_get_virtmap() - create a virtual mapping for the EFI memory map * @@ -317,33 +356,52 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size, int *count) { u64 efi_virt_base = EFI_RT_VIRTUAL_BASE; - efi_memory_desc_t *out = runtime_map; + efi_memory_desc_t *in, *prev = NULL, *out = runtime_map; int l; - for (l = 0; l < map_size; l += desc_size) { - efi_memory_desc_t *in = (void *)memory_map + l; + /* + * To work around potential issues with the Properties Table feature + * introduced in UEFI 2.5, which may split PE/COFF executable images + * in memory into several RuntimeServicesCode and RuntimeServicesData + * regions, we need to preserve the relative offsets between adjacent + * EFI_MEMORY_RUNTIME regions with the same memory type attributes. + * The easiest way to find adjacent regions is to sort the memory map + * before traversing it. + */ + sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL); + + for (l = 0; l < map_size; l += desc_size, prev = in) { u64 paddr, size; + in = (void *)memory_map + l; if (!(in->attribute & EFI_MEMORY_RUNTIME)) continue; + paddr = in->phys_addr; + size = in->num_pages * EFI_PAGE_SIZE; + /* * Make the mapping compatible with 64k pages: this allows * a 4k page size kernel to kexec a 64k page size kernel and * vice versa. */ - paddr = round_down(in->phys_addr, SZ_64K); - size = round_up(in->num_pages * EFI_PAGE_SIZE + - in->phys_addr - paddr, SZ_64K); - - /* - * Avoid wasting memory on PTEs by choosing a virtual base that - * is compatible with section mappings if this region has the - * appropriate size and physical alignment. (Sections are 2 MB - * on 4k granule kernels) - */ - if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M) - efi_virt_base = round_up(efi_virt_base, SZ_2M); + if (!regions_are_adjacent(prev, in) || + !regions_have_compatible_memory_type_attrs(prev, in)) { + + paddr = round_down(in->phys_addr, SZ_64K); + size += in->phys_addr - paddr; + + /* + * Avoid wasting memory on PTEs by choosing a virtual + * base that is compatible with section mappings if this + * region has the appropriate size and physical + * alignment. (Sections are 2 MB on 4k granule kernels) + */ + if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M) + efi_virt_base = round_up(efi_virt_base, SZ_2M); + else + efi_virt_base = round_up(efi_virt_base, SZ_64K); + } in->virt_addr = efi_virt_base + in->phys_addr - paddr; efi_virt_base += size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 77f1d7c6ea3a..9416e0f5c1db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -672,8 +672,12 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) /* disp clock */ adev->clock.default_dispclk = le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq); - if (adev->clock.default_dispclk == 0) - adev->clock.default_dispclk = 54000; /* 540 Mhz */ + /* set a reasonable default for DP */ + if (adev->clock.default_dispclk < 53900) { + DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", + adev->clock.default_dispclk / 100); + adev->clock.default_dispclk = 60000; + } adev->clock.dp_extclk = le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); adev->clock.current_dispclk = adev->clock.default_dispclk; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 1c3fc99c5465..8e995148f56e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -208,44 +208,6 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device, return ret; } -static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd, - cgs_handle_t *handle) -{ - CGS_FUNC_ADEV; - int r; - uint32_t dma_handle; - struct drm_gem_object *obj; - struct amdgpu_bo *bo; - struct drm_device *dev = adev->ddev; - struct drm_file *file_priv = NULL, *priv; - - mutex_lock(&dev->struct_mutex); - list_for_each_entry(priv, &dev->filelist, lhead) { - rcu_read_lock(); - if (priv->pid == get_pid(task_pid(current))) - file_priv = priv; - rcu_read_unlock(); - if (file_priv) - break; - } - mutex_unlock(&dev->struct_mutex); - r = dev->driver->prime_fd_to_handle(dev, - file_priv, dmabuf_fd, - &dma_handle); - spin_lock(&file_priv->table_lock); - - /* Check if we currently have a reference on the object */ - obj = idr_find(&file_priv->object_idr, dma_handle); - if (obj == NULL) { - spin_unlock(&file_priv->table_lock); - return -EINVAL; - } - spin_unlock(&file_priv->table_lock); - bo = gem_to_amdgpu_bo(obj); - *handle = (cgs_handle_t)bo; - return 0; -} - static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle) { struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; @@ -810,7 +772,6 @@ static const struct cgs_ops amdgpu_cgs_ops = { }; static const struct cgs_os_ops amdgpu_cgs_os_ops = { - amdgpu_cgs_import_gpu_mem, amdgpu_cgs_add_irq_source, amdgpu_cgs_irq_get, amdgpu_cgs_irq_put diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 749420f1ea6f..fd16652aa277 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -156,7 +156,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) uint64_t *chunk_array_user; uint64_t *chunk_array; struct amdgpu_fpriv *fpriv = p->filp->driver_priv; - unsigned size, i; + unsigned size; + int i; int ret; if (cs->in.num_chunks == 0) @@ -176,7 +177,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) /* get chunks */ INIT_LIST_HEAD(&p->validated); - chunk_array_user = (uint64_t __user *)(cs->in.chunks); + chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks); if (copy_from_user(chunk_array, chunk_array_user, sizeof(uint64_t)*cs->in.num_chunks)) { ret = -EFAULT; @@ -196,7 +197,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) struct drm_amdgpu_cs_chunk user_chunk; uint32_t __user *cdata; - chunk_ptr = (void __user *)chunk_array[i]; + chunk_ptr = (void __user *)(unsigned long)chunk_array[i]; if (copy_from_user(&user_chunk, chunk_ptr, sizeof(struct drm_amdgpu_cs_chunk))) { ret = -EFAULT; @@ -207,7 +208,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) p->chunks[i].length_dw = user_chunk.length_dw; size = p->chunks[i].length_dw; - cdata = (void __user *)user_chunk.chunk_data; + cdata = (void __user *)(unsigned long)user_chunk.chunk_data; p->chunks[i].user_ptr = cdata; p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index e3d70772b531..dc29ed8145c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -85,8 +85,6 @@ static void amdgpu_flip_work_func(struct work_struct *__work) /* We borrow the event spin lock for protecting flip_status */ spin_lock_irqsave(&crtc->dev->event_lock, flags); - /* set the proper interrupt */ - amdgpu_irq_get(adev, &adev->pageflip_irq, work->crtc_id); /* do the flip (mmio) */ adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); /* set the flip status */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index adb48353f2e1..b190c2a83680 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -242,11 +242,11 @@ static struct pci_device_id pciidlist[] = { {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, #endif /* topaz */ - {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, - {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, - {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, - {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, - {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, + {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, /* tonga */ {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 8a122b1b7786..96290d9cddca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -402,3 +402,19 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj) return true; return false; } + +void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev) +{ + struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev; + struct drm_fb_helper *fb_helper; + int ret; + + if (!afbdev) + return; + + fb_helper = &afbdev->helper; + + ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); + if (ret) + DRM_DEBUG("failed to restore crtc mode\n"); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 8c735f544b66..5d11e798230c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -485,7 +485,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file * Outdated mess for old drm with Xorg being in charge (void function now). */ /** - * amdgpu_driver_firstopen_kms - drm callback for last close + * amdgpu_driver_lastclose_kms - drm callback for last close * * @dev: drm dev pointer * @@ -493,6 +493,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file */ void amdgpu_driver_lastclose_kms(struct drm_device *dev) { + struct amdgpu_device *adev = dev->dev_private; + + amdgpu_fbdev_restore_mode(adev); vga_switcheroo_process_delayed_switch(); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 64efe5b52e65..7bd470d9ac30 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -567,6 +567,7 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev); void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state); int amdgpu_fbdev_total_size(struct amdgpu_device *adev); bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj); +void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev); void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 1e14531353e0..53d551f2d839 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -455,8 +455,10 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, return -ENOMEM; r = amdgpu_ib_get(ring, NULL, ndw * 4, ib); - if (r) + if (r) { + kfree(ib); return r; + } ib->length_dw = 0; /* walk over the address space and update the page directory */ diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index cd6edc40c9cd..1e0bba29e167 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -1279,8 +1279,7 @@ amdgpu_atombios_encoder_setup_dig(struct drm_encoder *encoder, int action) amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); } if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - amdgpu_atombios_encoder_setup_dig_transmitter(encoder, - ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); + amdgpu_atombios_encoder_set_backlight_level(amdgpu_encoder, dig->backlight_level); if (ext_encoder) amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 82e8d0730517..a1a35a5df8e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -6185,6 +6185,11 @@ static int ci_dpm_late_init(void *handle) if (!amdgpu_dpm) return 0; + /* init the sysfs and debugfs files late */ + ret = amdgpu_pm_sysfs_init(adev); + if (ret) + return ret; + ret = ci_set_temperature_range(adev); if (ret) return ret; @@ -6232,9 +6237,6 @@ static int ci_dpm_sw_init(void *handle) adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; if (amdgpu_dpm == 1) amdgpu_pm_print_power_states(adev); - ret = amdgpu_pm_sysfs_init(adev); - if (ret) - goto dpm_failed; mutex_unlock(&adev->pm.mutex); DRM_INFO("amdgpu: dpm initialized\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 4b6ce74753cd..484710cfdf82 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1567,6 +1567,9 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) int ret, i; u16 tmp16; + if (pci_is_root_bus(adev->pdev->bus)) + return; + if (amdgpu_pcie_gen2 == 0) return; diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index 44fa96ad4709..2e3373ed4c94 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -596,6 +596,12 @@ static int cz_dpm_late_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (amdgpu_dpm) { + int ret; + /* init the sysfs and debugfs files late */ + ret = amdgpu_pm_sysfs_init(adev); + if (ret) + return ret; + /* powerdown unused blocks for now */ cz_dpm_powergate_uvd(adev, true); cz_dpm_powergate_vce(adev, true); @@ -632,10 +638,6 @@ static int cz_dpm_sw_init(void *handle) if (amdgpu_dpm == 1) amdgpu_pm_print_power_states(adev); - ret = amdgpu_pm_sysfs_init(adev); - if (ret) - goto dpm_init_failed; - mutex_unlock(&adev->pm.mutex); DRM_INFO("amdgpu: dpm initialized\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index e4d101b1252a..d4c82b625727 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -255,6 +255,24 @@ static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); } +static void dce_v10_0_pageflip_interrupt_init(struct amdgpu_device *adev) +{ + unsigned i; + + /* Enable pflip interrupts */ + for (i = 0; i < adev->mode_info.num_crtc; i++) + amdgpu_irq_get(adev, &adev->pageflip_irq, i); +} + +static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev) +{ + unsigned i; + + /* Disable pflip interrupts */ + for (i = 0; i < adev->mode_info.num_crtc; i++) + amdgpu_irq_put(adev, &adev->pageflip_irq, i); +} + /** * dce_v10_0_page_flip - pageflip callback. * @@ -2663,9 +2681,10 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) dce_v10_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); dce_v10_0_vga_enable(crtc, false); - /* Make sure VBLANK interrupt is still enabled */ + /* Make sure VBLANK and PFLIP interrupts are still enabled */ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); + amdgpu_irq_update(adev, &adev->pageflip_irq, type); drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); dce_v10_0_crtc_load_lut(crtc); break; @@ -3025,6 +3044,8 @@ static int dce_v10_0_hw_init(void *handle) dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); } + dce_v10_0_pageflip_interrupt_init(adev); + return 0; } @@ -3039,6 +3060,8 @@ static int dce_v10_0_hw_fini(void *handle) dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); } + dce_v10_0_pageflip_interrupt_fini(adev); + return 0; } @@ -3050,6 +3073,8 @@ static int dce_v10_0_suspend(void *handle) dce_v10_0_hpd_fini(adev); + dce_v10_0_pageflip_interrupt_fini(adev); + return 0; } @@ -3075,6 +3100,8 @@ static int dce_v10_0_resume(void *handle) /* initialize hpd */ dce_v10_0_hpd_init(adev); + dce_v10_0_pageflip_interrupt_init(adev); + return 0; } @@ -3369,7 +3396,6 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); - amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id); queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 6411e8244671..7e1cf5e4eebf 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -233,6 +233,24 @@ static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); } +static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev) +{ + unsigned i; + + /* Enable pflip interrupts */ + for (i = 0; i < adev->mode_info.num_crtc; i++) + amdgpu_irq_get(adev, &adev->pageflip_irq, i); +} + +static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev) +{ + unsigned i; + + /* Disable pflip interrupts */ + for (i = 0; i < adev->mode_info.num_crtc; i++) + amdgpu_irq_put(adev, &adev->pageflip_irq, i); +} + /** * dce_v11_0_page_flip - pageflip callback. * @@ -2640,9 +2658,10 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) dce_v11_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); dce_v11_0_vga_enable(crtc, false); - /* Make sure VBLANK interrupt is still enabled */ + /* Make sure VBLANK and PFLIP interrupts are still enabled */ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); + amdgpu_irq_update(adev, &adev->pageflip_irq, type); drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); dce_v11_0_crtc_load_lut(crtc); break; @@ -2888,7 +2907,7 @@ static int dce_v11_0_early_init(void *handle) switch (adev->asic_type) { case CHIP_CARRIZO: - adev->mode_info.num_crtc = 4; + adev->mode_info.num_crtc = 3; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 9; break; @@ -3000,6 +3019,8 @@ static int dce_v11_0_hw_init(void *handle) dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); } + dce_v11_0_pageflip_interrupt_init(adev); + return 0; } @@ -3014,6 +3035,8 @@ static int dce_v11_0_hw_fini(void *handle) dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); } + dce_v11_0_pageflip_interrupt_fini(adev); + return 0; } @@ -3025,6 +3048,8 @@ static int dce_v11_0_suspend(void *handle) dce_v11_0_hpd_fini(adev); + dce_v11_0_pageflip_interrupt_fini(adev); + return 0; } @@ -3051,6 +3076,8 @@ static int dce_v11_0_resume(void *handle) /* initialize hpd */ dce_v11_0_hpd_init(adev); + dce_v11_0_pageflip_interrupt_init(adev); + return 0; } @@ -3345,7 +3372,6 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); - amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id); queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index c86911c2ea2a..34b9c2a9d8d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -204,6 +204,24 @@ static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); } +static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev) +{ + unsigned i; + + /* Enable pflip interrupts */ + for (i = 0; i < adev->mode_info.num_crtc; i++) + amdgpu_irq_get(adev, &adev->pageflip_irq, i); +} + +static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev) +{ + unsigned i; + + /* Disable pflip interrupts */ + for (i = 0; i < adev->mode_info.num_crtc; i++) + amdgpu_irq_put(adev, &adev->pageflip_irq, i); +} + /** * dce_v8_0_page_flip - pageflip callback. * @@ -2575,9 +2593,10 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) dce_v8_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); dce_v8_0_vga_enable(crtc, false); - /* Make sure VBLANK interrupt is still enabled */ + /* Make sure VBLANK and PFLIP interrupts are still enabled */ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); + amdgpu_irq_update(adev, &adev->pageflip_irq, type); drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); dce_v8_0_crtc_load_lut(crtc); break; @@ -2933,6 +2952,8 @@ static int dce_v8_0_hw_init(void *handle) dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); } + dce_v8_0_pageflip_interrupt_init(adev); + return 0; } @@ -2947,6 +2968,8 @@ static int dce_v8_0_hw_fini(void *handle) dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); } + dce_v8_0_pageflip_interrupt_fini(adev); + return 0; } @@ -2958,6 +2981,8 @@ static int dce_v8_0_suspend(void *handle) dce_v8_0_hpd_fini(adev); + dce_v8_0_pageflip_interrupt_fini(adev); + return 0; } @@ -2981,6 +3006,8 @@ static int dce_v8_0_resume(void *handle) /* initialize hpd */ dce_v8_0_hpd_init(adev); + dce_v8_0_pageflip_interrupt_init(adev); + return 0; } @@ -3376,7 +3403,6 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); - amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id); queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 774528ab8704..fab5471d25d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -1262,6 +1262,12 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); + /* reset addr and status */ + WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); + + if (!addr && !status) + return 0; + dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", entry->src_id, entry->src_data); dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", @@ -1269,8 +1275,6 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", status); gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client); - /* reset addr and status */ - WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 9a07742620d0..7bc9e9fcf3d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1262,6 +1262,12 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); + /* reset addr and status */ + WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); + + if (!addr && !status) + return 0; + dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", entry->src_id, entry->src_data); dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", @@ -1269,8 +1275,6 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", status); gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client); - /* reset addr and status */ - WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 94ec04a9c4d5..9745ed3a9aef 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -2995,6 +2995,12 @@ static int kv_dpm_late_init(void *handle) { /* powerdown unused blocks for now */ struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + /* init the sysfs and debugfs files late */ + ret = amdgpu_pm_sysfs_init(adev); + if (ret) + return ret; kv_dpm_powergate_acp(adev, true); kv_dpm_powergate_samu(adev, true); @@ -3038,9 +3044,6 @@ static int kv_dpm_sw_init(void *handle) adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; if (amdgpu_dpm == 1) amdgpu_pm_print_power_states(adev); - ret = amdgpu_pm_sysfs_init(adev); - if (ret) - goto dpm_failed; mutex_unlock(&adev->pm.mutex); DRM_INFO("amdgpu: dpm initialized\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index b55ceb14fdcd..0bac8702e934 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1005,6 +1005,9 @@ static void vi_pcie_gen3_enable(struct amdgpu_device *adev) u32 mask; int ret; + if (pci_is_root_bus(adev->pdev->bus)) + return; + if (amdgpu_pcie_gen2 == 0) return; diff --git a/drivers/gpu/drm/amd/include/cgs_linux.h b/drivers/gpu/drm/amd/include/cgs_linux.h index 488642f08267..3b47ae313e36 100644 --- a/drivers/gpu/drm/amd/include/cgs_linux.h +++ b/drivers/gpu/drm/amd/include/cgs_linux.h @@ -27,19 +27,6 @@ #include "cgs_common.h" /** - * cgs_import_gpu_mem() - Import dmabuf handle - * @cgs_device: opaque device handle - * @dmabuf_fd: DMABuf file descriptor - * @handle: memory handle (output) - * - * Must be called in the process context that dmabuf_fd belongs to. - * - * Return: 0 on success, -errno otherwise - */ -typedef int (*cgs_import_gpu_mem_t)(void *cgs_device, int dmabuf_fd, - cgs_handle_t *handle); - -/** * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources * @private_data: private data provided to cgs_add_irq_source * @src_id: interrupt source ID @@ -114,16 +101,12 @@ typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type); typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type); struct cgs_os_ops { - cgs_import_gpu_mem_t import_gpu_mem; - /* IRQ handling */ cgs_add_irq_source_t add_irq_source; cgs_irq_get_t irq_get; cgs_irq_put_t irq_put; }; -#define cgs_import_gpu_mem(dev,dmabuf_fd,handle) \ - CGS_OS_CALL(import_gpu_mem,dev,dmabuf_fd,handle) #define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \ CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler, \ private_data) diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index e23df5fd3836..5bca390d9ae2 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -53,8 +53,8 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int offset, int size, u8 *bytes); -static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_branch *mstb); +static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb); static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *port); @@ -804,8 +804,6 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) struct drm_dp_mst_port *port, *tmp; bool wake_tx = false; - cancel_work_sync(&mstb->mgr->work); - /* * destroy all ports - don't need lock * as there are no more references to the mst branch @@ -863,29 +861,33 @@ static void drm_dp_destroy_port(struct kref *kref) { struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); struct drm_dp_mst_topology_mgr *mgr = port->mgr; + if (!port->input) { port->vcpi.num_slots = 0; kfree(port->cached_edid); - /* we can't destroy the connector here, as - we might be holding the mode_config.mutex - from an EDID retrieval */ + /* + * The only time we don't have a connector + * on an output port is if the connector init + * fails. + */ if (port->connector) { + /* we can't destroy the connector here, as + * we might be holding the mode_config.mutex + * from an EDID retrieval */ + mutex_lock(&mgr->destroy_connector_lock); list_add(&port->next, &mgr->destroy_connector_list); mutex_unlock(&mgr->destroy_connector_lock); schedule_work(&mgr->destroy_connector_work); return; } + /* no need to clean up vcpi + * as if we have no connector we never setup a vcpi */ drm_dp_port_teardown_pdt(port, port->pdt); - - if (!port->input && port->vcpi.vcpi > 0) - drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); } kfree(port); - - (*mgr->cbs->hotplug)(mgr); } static void drm_dp_put_port(struct drm_dp_mst_port *port) @@ -1027,8 +1029,8 @@ static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb, } } -static void build_mst_prop_path(struct drm_dp_mst_port *port, - struct drm_dp_mst_branch *mstb, +static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb, + int pnum, char *proppath, size_t proppath_size) { @@ -1041,7 +1043,7 @@ static void build_mst_prop_path(struct drm_dp_mst_port *port, snprintf(temp, sizeof(temp), "-%d", port_num); strlcat(proppath, temp, proppath_size); } - snprintf(temp, sizeof(temp), "-%d", port->port_num); + snprintf(temp, sizeof(temp), "-%d", pnum); strlcat(proppath, temp, proppath_size); } @@ -1105,22 +1107,32 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, drm_dp_port_teardown_pdt(port, old_pdt); ret = drm_dp_port_setup_pdt(port); - if (ret == true) { + if (ret == true) drm_dp_send_link_address(mstb->mgr, port->mstb); - port->mstb->link_address_sent = true; - } } if (created && !port->input) { char proppath[255]; - build_mst_prop_path(port, mstb, proppath, sizeof(proppath)); - port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); - if (port->port_num >= 8) { + build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath)); + port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); + if (!port->connector) { + /* remove it from the port list */ + mutex_lock(&mstb->mgr->lock); + list_del(&port->next); + mutex_unlock(&mstb->mgr->lock); + /* drop port list reference */ + drm_dp_put_port(port); + goto out; + } + if (port->port_num >= DP_MST_LOGICAL_PORT_0) { port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); + drm_mode_connector_set_tile_property(port->connector); } + (*mstb->mgr->cbs->register_connector)(port->connector); } +out: /* put reference to this port */ drm_dp_put_port(port); } @@ -1202,10 +1214,9 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m { struct drm_dp_mst_port *port; struct drm_dp_mst_branch *mstb_child; - if (!mstb->link_address_sent) { + if (!mstb->link_address_sent) drm_dp_send_link_address(mgr, mstb); - mstb->link_address_sent = true; - } + list_for_each_entry(port, &mstb->ports, next) { if (port->input) continue; @@ -1458,8 +1469,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, mutex_unlock(&mgr->qlock); } -static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_branch *mstb) +static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb) { int len; struct drm_dp_sideband_msg_tx *txmsg; @@ -1467,11 +1478,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); if (!txmsg) - return -ENOMEM; + return; txmsg->dst = mstb; len = build_link_address(txmsg); + mstb->link_address_sent = true; drm_dp_queue_down_tx(mgr, txmsg); ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); @@ -1499,11 +1511,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, } (*mgr->cbs->hotplug)(mgr); } - } else + } else { + mstb->link_address_sent = false; DRM_DEBUG_KMS("link address failed %d\n", ret); + } kfree(txmsg); - return 0; } static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, @@ -1978,6 +1991,8 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, DP_MST_EN | DP_UPSTREAM_IS_SRC); mutex_unlock(&mgr->lock); + flush_work(&mgr->work); + flush_work(&mgr->destroy_connector_work); } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); @@ -2263,10 +2278,10 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_ if (port->cached_edid) edid = drm_edid_duplicate(port->cached_edid); - else + else { edid = drm_get_edid(connector, &port->aux.ddc); - - drm_mode_connector_set_tile_property(connector); + drm_mode_connector_set_tile_property(connector); + } drm_dp_put_port(port); return edid; } @@ -2671,7 +2686,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) { struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); struct drm_dp_mst_port *port; - + bool send_hotplug = false; /* * Not a regular list traverse as we have to drop the destroy * connector lock before destroying the connector, to avoid AB->BA @@ -2694,7 +2709,10 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) if (!port->input && port->vcpi.vcpi > 0) drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); kfree(port); + send_hotplug = true; } + if (send_hotplug) + (*mgr->cbs->hotplug)(mgr); } /** @@ -2747,6 +2765,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); */ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) { + flush_work(&mgr->work); flush_work(&mgr->destroy_connector_work); mutex_lock(&mgr->payload_lock); kfree(mgr->payloads); @@ -2782,12 +2801,13 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs if (msgs[num - 1].flags & I2C_M_RD) reading = true; - if (!reading) { + if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) { DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n"); ret = -EIO; goto out; } + memset(&msg, 0, sizeof(msg)); msg.req_type = DP_REMOTE_I2C_READ; msg.u.i2c_read.num_transactions = num - 1; msg.u.i2c_read.port_number = port->port_num; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 418d299f3b12..ca08c472311b 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -345,7 +345,11 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper) struct drm_crtc *crtc = mode_set->crtc; int ret; - if (crtc->funcs->cursor_set) { + if (crtc->funcs->cursor_set2) { + ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0); + if (ret) + error = true; + } else if (crtc->funcs->cursor_set) { ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0); if (ret) error = true; diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index d734780b31c0..a18164f2f6d2 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -94,7 +94,18 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector) } #define DRM_OUTPUT_POLL_PERIOD (10*HZ) -static void __drm_kms_helper_poll_enable(struct drm_device *dev) +/** + * drm_kms_helper_poll_enable_locked - re-enable output polling. + * @dev: drm_device + * + * This function re-enables the output polling work without + * locking the mode_config mutex. + * + * This is like drm_kms_helper_poll_enable() however it is to be + * called from a context where the mode_config mutex is locked + * already. + */ +void drm_kms_helper_poll_enable_locked(struct drm_device *dev) { bool poll = false; struct drm_connector *connector; @@ -113,6 +124,8 @@ static void __drm_kms_helper_poll_enable(struct drm_device *dev) if (poll) schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); } +EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked); + static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector, uint32_t maxX, uint32_t maxY, bool merge_type_bits) @@ -174,7 +187,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect /* Re-enable polling in case the global poll config changed. */ if (drm_kms_helper_poll != dev->mode_config.poll_running) - __drm_kms_helper_poll_enable(dev); + drm_kms_helper_poll_enable_locked(dev); dev->mode_config.poll_running = drm_kms_helper_poll; @@ -428,7 +441,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_disable); void drm_kms_helper_poll_enable(struct drm_device *dev) { mutex_lock(&dev->mode_config.mutex); - __drm_kms_helper_poll_enable(dev); + drm_kms_helper_poll_enable_locked(dev); mutex_unlock(&dev->mode_config.mutex); } EXPORT_SYMBOL(drm_kms_helper_poll_enable); diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 0f6cd33b531f..684bd4a13843 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -235,18 +235,12 @@ static ssize_t dpms_show(struct device *device, char *buf) { struct drm_connector *connector = to_drm_connector(device); - struct drm_device *dev = connector->dev; - uint64_t dpms_status; - int ret; + int dpms; - ret = drm_object_property_get_value(&connector->base, - dev->mode_config.dpms_property, - &dpms_status); - if (ret) - return 0; + dpms = READ_ONCE(connector->dpms); return snprintf(buf, PAGE_SIZE, "%s\n", - drm_get_dpms_name((int)dpms_status)); + drm_get_dpms_name(dpms)); } static ssize_t enabled_show(struct device *device, diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index cbdb78ef3bac..e6cbaca821a4 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -37,7 +37,6 @@ * DECON stands for Display and Enhancement controller. */ -#define DECON_DEFAULT_FRAMERATE 60 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 #define WINDOWS_NR 2 @@ -165,16 +164,6 @@ static u32 decon_calc_clkdiv(struct decon_context *ctx, return (clkdiv < 0x100) ? clkdiv : 0xff; } -static bool decon_mode_fixup(struct exynos_drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - if (adjusted_mode->vrefresh == 0) - adjusted_mode->vrefresh = DECON_DEFAULT_FRAMERATE; - - return true; -} - static void decon_commit(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; @@ -637,7 +626,6 @@ static void decon_disable(struct exynos_drm_crtc *crtc) static const struct exynos_drm_crtc_ops decon_crtc_ops = { .enable = decon_enable, .disable = decon_disable, - .mode_fixup = decon_mode_fixup, .commit = decon_commit, .enable_vblank = decon_enable_vblank, .disable_vblank = decon_disable_vblank, diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c index d66ade0efac8..124fb9a56f02 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.c +++ b/drivers/gpu/drm/exynos/exynos_dp_core.c @@ -1383,28 +1383,6 @@ static int exynos_dp_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int exynos_dp_suspend(struct device *dev) -{ - struct exynos_dp_device *dp = dev_get_drvdata(dev); - - exynos_dp_disable(&dp->encoder); - return 0; -} - -static int exynos_dp_resume(struct device *dev) -{ - struct exynos_dp_device *dp = dev_get_drvdata(dev); - - exynos_dp_enable(&dp->encoder); - return 0; -} -#endif - -static const struct dev_pm_ops exynos_dp_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(exynos_dp_suspend, exynos_dp_resume) -}; - static const struct of_device_id exynos_dp_match[] = { { .compatible = "samsung,exynos5-dp" }, {}, @@ -1417,7 +1395,6 @@ struct platform_driver dp_driver = { .driver = { .name = "exynos-dp", .owner = THIS_MODULE, - .pm = &exynos_dp_pm_ops, .of_match_table = exynos_dp_match, }, }; diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c index c68a6a2a9b57..7f55ba6771c6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_core.c +++ b/drivers/gpu/drm/exynos/exynos_drm_core.c @@ -28,7 +28,6 @@ int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register); int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) { @@ -39,7 +38,6 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister); int exynos_drm_device_subdrv_probe(struct drm_device *dev) { @@ -69,7 +67,6 @@ int exynos_drm_device_subdrv_probe(struct drm_device *dev) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_probe); int exynos_drm_device_subdrv_remove(struct drm_device *dev) { @@ -87,7 +84,6 @@ int exynos_drm_device_subdrv_remove(struct drm_device *dev) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_remove); int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file) { @@ -111,7 +107,6 @@ err: } return ret; } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open); void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) { @@ -122,4 +117,3 @@ void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) subdrv->close(dev, subdrv->dev, file); } } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close); diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 0872aa2f450f..ed28823d3b35 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -41,20 +41,6 @@ static void exynos_drm_crtc_disable(struct drm_crtc *crtc) exynos_crtc->ops->disable(exynos_crtc); } -static bool -exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - - if (exynos_crtc->ops->mode_fixup) - return exynos_crtc->ops->mode_fixup(exynos_crtc, mode, - adjusted_mode); - - return true; -} - static void exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) { @@ -99,7 +85,6 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc, static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { .enable = exynos_drm_crtc_enable, .disable = exynos_drm_crtc_disable, - .mode_fixup = exynos_drm_crtc_mode_fixup, .mode_set_nofb = exynos_drm_crtc_mode_set_nofb, .atomic_begin = exynos_crtc_atomic_begin, .atomic_flush = exynos_crtc_atomic_flush, diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 831d2e4cacf9..ae9e6b2d3758 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -304,6 +304,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, return 0; } +#ifdef CONFIG_PM_SLEEP static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state) { struct drm_connector *connector; @@ -340,6 +341,7 @@ static int exynos_drm_resume(struct drm_device *dev) return 0; } +#endif static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index b7ba21dfb696..6c717ba672db 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -82,7 +82,6 @@ struct exynos_drm_plane { * * @enable: enable the device * @disable: disable the device - * @mode_fixup: fix mode data before applying it * @commit: set current hw specific display mode to hw. * @enable_vblank: specific driver callback for enabling vblank interrupt. * @disable_vblank: specific driver callback for disabling vblank interrupt. @@ -103,9 +102,6 @@ struct exynos_drm_crtc; struct exynos_drm_crtc_ops { void (*enable)(struct exynos_drm_crtc *crtc); void (*disable)(struct exynos_drm_crtc *crtc); - bool (*mode_fixup)(struct exynos_drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); void (*commit)(struct exynos_drm_crtc *crtc); int (*enable_vblank)(struct exynos_drm_crtc *crtc); void (*disable_vblank)(struct exynos_drm_crtc *crtc); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 2a652359af64..dd3a5e6d58c8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -1206,23 +1206,6 @@ static struct exynos_drm_ipp_ops fimc_dst_ops = { .set_addr = fimc_dst_set_addr, }; -static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable) -{ - DRM_DEBUG_KMS("enable[%d]\n", enable); - - if (enable) { - clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]); - clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]); - ctx->suspended = false; - } else { - clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]); - clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]); - ctx->suspended = true; - } - - return 0; -} - static irqreturn_t fimc_irq_handler(int irq, void *dev_id) { struct fimc_context *ctx = dev_id; @@ -1780,6 +1763,24 @@ static int fimc_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM +static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable) +{ + DRM_DEBUG_KMS("enable[%d]\n", enable); + + if (enable) { + clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]); + clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]); + ctx->suspended = false; + } else { + clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]); + clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]); + ctx->suspended = true; + } + + return 0; +} + #ifdef CONFIG_PM_SLEEP static int fimc_suspend(struct device *dev) { @@ -1806,7 +1807,6 @@ static int fimc_resume(struct device *dev) } #endif -#ifdef CONFIG_PM static int fimc_runtime_suspend(struct device *dev) { struct fimc_context *ctx = get_fimc_context(dev); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 750a9e6b9e8d..3d1aba67758b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -41,7 +41,6 @@ * CPU Interface. */ -#define FIMD_DEFAULT_FRAMERATE 60 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 /* position control register for hardware window 0, 2 ~ 4.*/ @@ -377,16 +376,6 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx, return (clkdiv < 0x100) ? clkdiv : 0xff; } -static bool fimd_mode_fixup(struct exynos_drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - if (adjusted_mode->vrefresh == 0) - adjusted_mode->vrefresh = FIMD_DEFAULT_FRAMERATE; - - return true; -} - static void fimd_commit(struct exynos_drm_crtc *crtc) { struct fimd_context *ctx = crtc->ctx; @@ -882,13 +871,12 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable) return; val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE; - writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON); + writel(val, ctx->regs + DP_MIE_CLKCON); } static const struct exynos_drm_crtc_ops fimd_crtc_ops = { .enable = fimd_enable, .disable = fimd_disable, - .mode_fixup = fimd_mode_fixup, .commit = fimd_commit, .enable_vblank = fimd_enable_vblank, .disable_vblank = fimd_disable_vblank, diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 3734c34aed16..c17efdb238a6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -1059,7 +1059,6 @@ int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data, return 0; } -EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl); int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, struct drm_file *file) @@ -1230,7 +1229,6 @@ err: g2d_put_cmdlist(g2d, node); return ret; } -EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl); int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, struct drm_file *file) @@ -1293,7 +1291,6 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, out: return 0; } -EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl); static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index f12fbc36b120..407afedb6003 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -56,39 +56,35 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj) nr_pages = obj->size >> PAGE_SHIFT; if (!is_drm_iommu_supported(dev)) { - dma_addr_t start_addr; - unsigned int i = 0; - obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); if (!obj->pages) { DRM_ERROR("failed to allocate pages.\n"); return -ENOMEM; } + } - obj->cookie = dma_alloc_attrs(dev->dev, - obj->size, - &obj->dma_addr, GFP_KERNEL, - &obj->dma_attrs); - if (!obj->cookie) { - DRM_ERROR("failed to allocate buffer.\n"); + obj->cookie = dma_alloc_attrs(dev->dev, obj->size, &obj->dma_addr, + GFP_KERNEL, &obj->dma_attrs); + if (!obj->cookie) { + DRM_ERROR("failed to allocate buffer.\n"); + if (obj->pages) drm_free_large(obj->pages); - return -ENOMEM; - } + return -ENOMEM; + } + + if (obj->pages) { + dma_addr_t start_addr; + unsigned int i = 0; start_addr = obj->dma_addr; while (i < nr_pages) { - obj->pages[i] = phys_to_page(start_addr); + obj->pages[i] = pfn_to_page(dma_to_pfn(dev->dev, + start_addr)); start_addr += PAGE_SIZE; i++; } } else { - obj->pages = dma_alloc_attrs(dev->dev, obj->size, - &obj->dma_addr, GFP_KERNEL, - &obj->dma_attrs); - if (!obj->pages) { - DRM_ERROR("failed to allocate buffer.\n"); - return -ENOMEM; - } + obj->pages = obj->cookie; } DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", @@ -110,15 +106,11 @@ static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj) DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", (unsigned long)obj->dma_addr, obj->size); - if (!is_drm_iommu_supported(dev)) { - dma_free_attrs(dev->dev, obj->size, obj->cookie, - (dma_addr_t)obj->dma_addr, &obj->dma_attrs); - drm_free_large(obj->pages); - } else - dma_free_attrs(dev->dev, obj->size, obj->pages, - (dma_addr_t)obj->dma_addr, &obj->dma_attrs); + dma_free_attrs(dev->dev, obj->size, obj->cookie, + (dma_addr_t)obj->dma_addr, &obj->dma_attrs); - obj->dma_addr = (dma_addr_t)NULL; + if (!is_drm_iommu_supported(dev)) + drm_free_large(obj->pages); } static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, @@ -156,18 +148,14 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) * once dmabuf's refcount becomes 0. */ if (obj->import_attach) - goto out; - - exynos_drm_free_buf(exynos_gem_obj); - -out: - drm_gem_free_mmap_offset(obj); + drm_prime_gem_destroy(obj, exynos_gem_obj->sgt); + else + exynos_drm_free_buf(exynos_gem_obj); /* release file pointer to gem object. */ drm_gem_object_release(obj); kfree(exynos_gem_obj); - exynos_gem_obj = NULL; } unsigned long exynos_drm_gem_get_size(struct drm_device *dev, @@ -190,8 +178,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev, return exynos_gem_obj->size; } - -struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, +static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, unsigned long size) { struct exynos_drm_gem_obj *exynos_gem_obj; @@ -212,6 +199,13 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, return ERR_PTR(ret); } + ret = drm_gem_create_mmap_offset(obj); + if (ret < 0) { + drm_gem_object_release(obj); + kfree(exynos_gem_obj); + return ERR_PTR(ret); + } + DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); return exynos_gem_obj; @@ -313,7 +307,7 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev, drm_gem_object_unreference_unlocked(obj); } -int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, +static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, struct vm_area_struct *vma) { struct drm_device *drm_dev = exynos_gem_obj->base.dev; @@ -342,7 +336,8 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ struct exynos_drm_gem_obj *exynos_gem_obj; +{ + struct exynos_drm_gem_obj *exynos_gem_obj; struct drm_exynos_gem_info *args = data; struct drm_gem_object *obj; @@ -402,6 +397,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_mode_create_dumb *args) { struct exynos_drm_gem_obj *exynos_gem_obj; + unsigned int flags; int ret; /* @@ -413,16 +409,12 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, args->pitch = args->width * ((args->bpp + 7) / 8); args->size = args->pitch * args->height; - if (is_drm_iommu_supported(dev)) { - exynos_gem_obj = exynos_drm_gem_create(dev, - EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC, - args->size); - } else { - exynos_gem_obj = exynos_drm_gem_create(dev, - EXYNOS_BO_CONTIG | EXYNOS_BO_WC, - args->size); - } + if (is_drm_iommu_supported(dev)) + flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC; + else + flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC; + exynos_gem_obj = exynos_drm_gem_create(dev, flags, args->size); if (IS_ERR(exynos_gem_obj)) { dev_warn(dev->dev, "FB allocation failed.\n"); return PTR_ERR(exynos_gem_obj); @@ -460,14 +452,9 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, goto unlock; } - ret = drm_gem_create_mmap_offset(obj); - if (ret) - goto out; - *offset = drm_vma_node_offset_addr(&obj->vma_node); DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); -out: drm_gem_object_unreference(obj); unlock: mutex_unlock(&dev->struct_mutex); @@ -543,7 +530,6 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) err_close_vm: drm_gem_vm_close(vma); - drm_gem_free_mmap_offset(obj); return ret; } @@ -588,6 +574,8 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, if (ret < 0) goto err_free_large; + exynos_gem_obj->sgt = sgt; + if (sgt->nents == 1) { /* always physically continuous memory if sgt->nents is 1. */ exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index cd62f8410d1e..b62d1007c0e0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -39,6 +39,7 @@ * - this address could be physical address without IOMMU and * device address with IOMMU. * @pages: Array of backing pages. + * @sgt: Imported sg_table. * * P.S. this object would be transferred to user as kms_bo.handle so * user can access the buffer through kms_bo.handle. @@ -52,6 +53,7 @@ struct exynos_drm_gem_obj { dma_addr_t dma_addr; struct dma_attrs dma_attrs; struct page **pages; + struct sg_table *sgt; }; struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); @@ -59,10 +61,6 @@ struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); /* destroy a buffer with gem object */ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj); -/* create a private gem object and initialize it. */ -struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, - unsigned long size); - /* create a new buffer with gem object */ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, unsigned int flags, diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 425e70625388..2f5c118f4c8e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -786,6 +786,7 @@ static int rotator_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM static int rotator_clk_crtl(struct rot_context *rot, bool enable) { if (enable) { @@ -822,7 +823,6 @@ static int rotator_resume(struct device *dev) } #endif -#ifdef CONFIG_PM static int rotator_runtime_suspend(struct device *dev) { struct rot_context *rot = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 3e4be5a3becd..6ade06888432 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -462,11 +462,17 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0); drm_mode_connector_set_path_property(connector, pathprop); + return connector; +} + +static void intel_dp_register_mst_connector(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct drm_device *dev = connector->dev; drm_modeset_lock_all(dev); intel_connector_add_to_fbdev(intel_connector); drm_modeset_unlock_all(dev); drm_connector_register(&intel_connector->base); - return connector; } static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, @@ -512,6 +518,7 @@ static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) static struct drm_dp_mst_topology_cbs mst_cbs = { .add_connector = intel_dp_add_mst_connector, + .register_connector = intel_dp_register_mst_connector, .destroy_connector = intel_dp_destroy_mst_connector, .hotplug = intel_dp_mst_hotplug, }; diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index 53c0173a39fe..b17785719598 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) /* Enable polling and queue hotplug re-enabling. */ if (hpd_disabled) { - drm_kms_helper_poll_enable(dev); + drm_kms_helper_poll_enable_locked(dev); mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 72e0edd7bbde..7412caedcf7f 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -484,18 +484,18 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); read_pointer = ring->next_context_status_buffer; - write_pointer = status_pointer & 0x07; + write_pointer = status_pointer & GEN8_CSB_PTR_MASK; if (read_pointer > write_pointer) - write_pointer += 6; + write_pointer += GEN8_CSB_ENTRIES; spin_lock(&ring->execlist_lock); while (read_pointer < write_pointer) { read_pointer++; status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + - (read_pointer % 6) * 8); + (read_pointer % GEN8_CSB_ENTRIES) * 8); status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + - (read_pointer % 6) * 8 + 4); + (read_pointer % GEN8_CSB_ENTRIES) * 8 + 4); if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) continue; @@ -521,10 +521,12 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) spin_unlock(&ring->execlist_lock); WARN(submit_contexts > 2, "More than two context complete events?\n"); - ring->next_context_status_buffer = write_pointer % 6; + ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES; I915_WRITE(RING_CONTEXT_STATUS_PTR(ring), - _MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8)); + _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8, + ((u32)ring->next_context_status_buffer & + GEN8_CSB_PTR_MASK) << 8)); } static int execlists_context_queue(struct drm_i915_gem_request *request) @@ -1422,6 +1424,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; + u8 next_context_status_buffer_hw; I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); @@ -1436,7 +1439,29 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); POSTING_READ(RING_MODE_GEN7(ring)); - ring->next_context_status_buffer = 0; + + /* + * Instead of resetting the Context Status Buffer (CSB) read pointer to + * zero, we need to read the write pointer from hardware and use its + * value because "this register is power context save restored". + * Effectively, these states have been observed: + * + * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) | + * BDW | CSB regs not reset | CSB regs reset | + * CHT | CSB regs not reset | CSB regs not reset | + */ + next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring)) + & GEN8_CSB_PTR_MASK); + + /* + * When the CSB registers are reset (also after power-up / gpu reset), + * CSB write pointer is set to all 1's, which is not valid, use '5' in + * this special case, so the first element read is CSB[0]. + */ + if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK) + next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1); + + ring->next_context_status_buffer = next_context_status_buffer_hw; DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name); memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 64f89f9982a2..3c63bb32ad81 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -25,6 +25,8 @@ #define _INTEL_LRC_H_ #define GEN8_LR_CONTEXT_ALIGN 4096 +#define GEN8_CSB_ENTRIES 6 +#define GEN8_CSB_PTR_MASK 0x07 /* Execlists regs */ #define RING_ELSP(ring) ((ring)->mmio_base+0x230) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index af7fdb3bd663..7401cf90b0db 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -246,7 +246,8 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, } if (power_well->data == SKL_DISP_PW_1) { - intel_prepare_ddi(dev); + if (!dev_priv->power_domains.initializing) + intel_prepare_ddi(dev); gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A); } } diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index cc6c228e11c8..e905c00acf1a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -469,9 +469,13 @@ nouveau_display_create(struct drm_device *dev) if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { dev->mode_config.max_width = 4096; dev->mode_config.max_height = 4096; - } else { + } else + if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI) { dev->mode_config.max_width = 8192; dev->mode_config.max_height = 8192; + } else { + dev->mode_config.max_width = 16384; + dev->mode_config.max_height = 16384; } dev->mode_config.preferred_depth = 24; diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 2791701685dc..59f27e774acb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -178,8 +178,30 @@ nouveau_fbcon_sync(struct fb_info *info) return 0; } +static int +nouveau_fbcon_open(struct fb_info *info, int user) +{ + struct nouveau_fbdev *fbcon = info->par; + struct nouveau_drm *drm = nouveau_drm(fbcon->dev); + int ret = pm_runtime_get_sync(drm->dev->dev); + if (ret < 0 && ret != -EACCES) + return ret; + return 0; +} + +static int +nouveau_fbcon_release(struct fb_info *info, int user) +{ + struct nouveau_fbdev *fbcon = info->par; + struct nouveau_drm *drm = nouveau_drm(fbcon->dev); + pm_runtime_put(drm->dev->dev); + return 0; +} + static struct fb_ops nouveau_fbcon_ops = { .owner = THIS_MODULE, + .fb_open = nouveau_fbcon_open, + .fb_release = nouveau_fbcon_release, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_fillrect = nouveau_fbcon_fillrect, @@ -195,6 +217,8 @@ static struct fb_ops nouveau_fbcon_ops = { static struct fb_ops nouveau_fbcon_sw_ops = { .owner = THIS_MODULE, + .fb_open = nouveau_fbcon_open, + .fb_release = nouveau_fbcon_release, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_fillrect = drm_fb_helper_cfb_fillrect, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c index 65af31441e9c..a7d69ce7abc1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c @@ -267,6 +267,12 @@ init_i2c(struct nvbios_init *init, int index) index = NVKM_I2C_BUS_PRI; if (init->outp && init->outp->i2c_upper_default) index = NVKM_I2C_BUS_SEC; + } else + if (index == 0x80) { + index = NVKM_I2C_BUS_PRI; + } else + if (index == 0x81) { + index = NVKM_I2C_BUS_SEC; } bus = nvkm_i2c_bus_find(i2c, index); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h index e0ec2a6b7b79..212800ecdce9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h @@ -8,7 +8,10 @@ struct nvbios_source { void *(*init)(struct nvkm_bios *, const char *); void (*fini)(void *); u32 (*read)(void *, u32 offset, u32 length, struct nvkm_bios *); + u32 (*size)(void *); bool rw; + bool ignore_checksum; + bool no_pcir; }; int nvbios_extend(struct nvkm_bios *, u32 length); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c index 792f017525f6..b2557e87afdd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c @@ -45,7 +45,7 @@ shadow_fetch(struct nvkm_bios *bios, struct shadow *mthd, u32 upto) u32 read = mthd->func->read(data, start, limit - start, bios); bios->size = start + read; } - return bios->size >= limit; + return bios->size >= upto; } static int @@ -55,14 +55,22 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd) struct nvbios_image image; int score = 1; - if (!shadow_fetch(bios, mthd, offset + 0x1000)) { - nvkm_debug(subdev, "%08x: header fetch failed\n", offset); - return 0; - } + if (mthd->func->no_pcir) { + image.base = 0; + image.type = 0; + image.size = mthd->func->size(mthd->data); + image.last = 1; + } else { + if (!shadow_fetch(bios, mthd, offset + 0x1000)) { + nvkm_debug(subdev, "%08x: header fetch failed\n", + offset); + return 0; + } - if (!nvbios_image(bios, idx, &image)) { - nvkm_debug(subdev, "image %d invalid\n", idx); - return 0; + if (!nvbios_image(bios, idx, &image)) { + nvkm_debug(subdev, "image %d invalid\n", idx); + return 0; + } } nvkm_debug(subdev, "%08x: type %02x, %d bytes\n", image.base, image.type, image.size); @@ -74,7 +82,8 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd) switch (image.type) { case 0x00: - if (nvbios_checksum(&bios->data[image.base], image.size)) { + if (!mthd->func->ignore_checksum && + nvbios_checksum(&bios->data[image.base], image.size)) { nvkm_debug(subdev, "%08x: checksum failed\n", image.base); if (mthd->func->rw) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c index bd60d7dd09f5..4bf486b57101 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c @@ -21,6 +21,7 @@ * */ #include "priv.h" + #include <core/pci.h> #if defined(__powerpc__) @@ -33,17 +34,26 @@ static u32 of_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios) { struct priv *priv = data; - if (offset + length <= priv->size) { + if (offset < priv->size) { + length = min_t(u32, length, priv->size - offset); memcpy_fromio(bios->data + offset, priv->data + offset, length); return length; } return 0; } +static u32 +of_size(void *data) +{ + struct priv *priv = data; + return priv->size; +} + static void * of_init(struct nvkm_bios *bios, const char *name) { - struct pci_dev *pdev = bios->subdev.device->func->pci(bios->subdev.device)->pdev; + struct nvkm_device *device = bios->subdev.device; + struct pci_dev *pdev = device->func->pci(device)->pdev; struct device_node *dn; struct priv *priv; if (!(dn = pci_device_to_OF_node(pdev))) @@ -62,7 +72,10 @@ nvbios_of = { .init = of_init, .fini = (void(*)(void *))kfree, .read = of_read, + .size = of_size, .rw = false, + .ignore_checksum = true, + .no_pcir = true, }; #else const struct nvbios_source diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c index 814cb51cc873..385a90f91ed6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c @@ -35,6 +35,8 @@ static const struct nvkm_device_agp_quirk nvkm_device_agp_quirks[] = { /* VIA Apollo PRO133x / GeForce FX 5600 Ultra - fdo#20341 */ { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_NVIDIA, 0x0311, 2 }, + /* SiS 761 does not support AGP cards, use PCI mode */ + { PCI_VENDOR_ID_SI, 0x0761, PCI_ANY_ID, PCI_ANY_ID, 0 }, {}, }; @@ -137,8 +139,10 @@ nvkm_agp_ctor(struct nvkm_pci *pci) while (quirk->hostbridge_vendor) { if (info.device->vendor == quirk->hostbridge_vendor && info.device->device == quirk->hostbridge_device && - pci->pdev->vendor == quirk->chip_vendor && - pci->pdev->device == quirk->chip_device) { + (quirk->chip_vendor == (u16)PCI_ANY_ID || + pci->pdev->vendor == quirk->chip_vendor) && + (quirk->chip_device == (u16)PCI_ANY_ID || + pci->pdev->device == quirk->chip_device)) { nvkm_info(subdev, "forcing default agp mode to %dX, " "use NvAGP=<mode> to override\n", quirk->mode); diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index dd845f82cc24..183aea1abebc 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -244,6 +244,10 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc, ret = qxl_bo_reserve(bo, false); if (ret) return ret; + ret = qxl_bo_pin(bo, bo->type, NULL); + qxl_bo_unreserve(bo); + if (ret) + return ret; qxl_draw_dirty_fb(qdev, qfb_src, bo, 0, 0, &norect, one_clip_rect, inc); @@ -257,7 +261,11 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc, } drm_vblank_put(dev, qcrtc->index); - qxl_bo_unreserve(bo); + ret = qxl_bo_reserve(bo, false); + if (!ret) { + qxl_bo_unpin(bo); + qxl_bo_unreserve(bo); + } return 0; } @@ -618,7 +626,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc, adjusted_mode->hdisplay, adjusted_mode->vdisplay); - if (qcrtc->index == 0) + if (bo->is_primary == false) recreate_primary = true; if (bo->surf.stride * bo->surf.height > qdev->vram_size) { diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index 41c422fee31a..c4a552637c93 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c @@ -144,14 +144,17 @@ static void qxl_dirty_update(struct qxl_fbdev *qfbdev, spin_lock_irqsave(&qfbdev->dirty.lock, flags); - if (qfbdev->dirty.y1 < y) - y = qfbdev->dirty.y1; - if (qfbdev->dirty.y2 > y2) - y2 = qfbdev->dirty.y2; - if (qfbdev->dirty.x1 < x) - x = qfbdev->dirty.x1; - if (qfbdev->dirty.x2 > x2) - x2 = qfbdev->dirty.x2; + if ((qfbdev->dirty.y2 - qfbdev->dirty.y1) && + (qfbdev->dirty.x2 - qfbdev->dirty.x1)) { + if (qfbdev->dirty.y1 < y) + y = qfbdev->dirty.y1; + if (qfbdev->dirty.y2 > y2) + y2 = qfbdev->dirty.y2; + if (qfbdev->dirty.x1 < x) + x = qfbdev->dirty.x1; + if (qfbdev->dirty.x2 > x2) + x2 = qfbdev->dirty.x2; + } qfbdev->dirty.x1 = x; qfbdev->dirty.x2 = x2; diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index b66ec331c17c..4efa8e261baf 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -307,7 +307,7 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); if (idr_ret < 0) return idr_ret; - bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo)); + bo = to_qxl_bo(entry->tv.bo); (*release)->release_offset = create_rel->release_offset + 64; @@ -316,8 +316,6 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, info = qxl_release_map(qdev, *release); info->id = idr_ret; qxl_release_unmap(qdev, *release, info); - - qxl_bo_unref(&bo); return 0; } diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index c3872598b85a..65adb9c72377 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -1624,8 +1624,9 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode) } else atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - args.ucAction = ATOM_LCD_BLON; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + + atombios_set_backlight_level(radeon_encoder, dig->backlight_level); } break; case DRM_MODE_DPMS_STANDBY: @@ -1706,8 +1707,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); } if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - atombios_dig_transmitter_setup(encoder, - ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); + atombios_set_backlight_level(radeon_encoder, dig->backlight_level); if (ext_encoder) atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); break; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index d2e9e9efc159..6743174acdbc 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1633,18 +1633,8 @@ int radeon_modeset_init(struct radeon_device *rdev) radeon_fbdev_init(rdev); drm_kms_helper_poll_init(rdev->ddev); - if (rdev->pm.dpm_enabled) { - /* do dpm late init */ - ret = radeon_pm_late_init(rdev); - if (ret) { - rdev->pm.dpm_enabled = false; - DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); - } - /* set the dpm state for PX since there won't be - * a modeset to call this. - */ - radeon_pm_compute_clocks(rdev); - } + /* do pm late init */ + ret = radeon_pm_late_init(rdev); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index 5e09c061847f..744f5c49c664 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c @@ -265,7 +265,6 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol { struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr); struct drm_device *dev = master->base.dev; - struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector; struct drm_connector *connector; @@ -284,14 +283,22 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol radeon_connector->mst_encoder = radeon_dp_create_fake_mst_encoder(master); drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); + drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0); drm_mode_connector_set_path_property(connector, pathprop); + return connector; +} + +static void radeon_dp_register_mst_connector(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct radeon_device *rdev = dev->dev_private; + drm_modeset_lock_all(dev); radeon_fb_add_connector(rdev, connector); drm_modeset_unlock_all(dev); drm_connector_register(connector); - return connector; } static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, @@ -324,6 +331,7 @@ static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) struct drm_dp_mst_topology_cbs mst_cbs = { .add_connector = radeon_dp_add_mst_connector, + .register_connector = radeon_dp_register_mst_connector, .destroy_connector = radeon_dp_destroy_mst_connector, .hotplug = radeon_dp_mst_hotplug, }; diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 7214858ffcea..26da2f4d7b4f 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -48,40 +48,10 @@ struct radeon_fbdev { struct radeon_device *rdev; }; -/** - * radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev. - * - * @info: fbdev info - * - * This function hides the cursor on all CRTCs used by fbdev. - */ -static int radeon_fb_helper_set_par(struct fb_info *info) -{ - int ret; - - ret = drm_fb_helper_set_par(info); - - /* XXX: with universal plane support fbdev will automatically disable - * all non-primary planes (including the cursor) - */ - if (ret == 0) { - struct drm_fb_helper *fb_helper = info->par; - int i; - - for (i = 0; i < fb_helper->crtc_count; i++) { - struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc; - - radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0); - } - } - - return ret; -} - static struct fb_ops radeonfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, - .fb_set_par = radeon_fb_helper_set_par, + .fb_set_par = drm_fb_helper_set_par, .fb_fillrect = drm_fb_helper_cfb_fillrect, .fb_copyarea = drm_fb_helper_cfb_copyarea, .fb_imageblit = drm_fb_helper_cfb_imageblit, @@ -427,3 +397,19 @@ void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector { drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector); } + +void radeon_fbdev_restore_mode(struct radeon_device *rdev) +{ + struct radeon_fbdev *rfbdev = rdev->mode_info.rfbdev; + struct drm_fb_helper *fb_helper; + int ret; + + if (!rfbdev) + return; + + fb_helper = &rfbdev->helper; + + ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); + if (ret) + DRM_DEBUG("failed to restore crtc mode\n"); +} diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 4a119c255ba9..0e932bf932c1 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -598,7 +598,7 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file * Outdated mess for old drm with Xorg being in charge (void function now). */ /** - * radeon_driver_firstopen_kms - drm callback for last close + * radeon_driver_lastclose_kms - drm callback for last close * * @dev: drm dev pointer * @@ -606,6 +606,9 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file */ void radeon_driver_lastclose_kms(struct drm_device *dev) { + struct radeon_device *rdev = dev->dev_private; + + radeon_fbdev_restore_mode(rdev); vga_switcheroo_process_delayed_switch(); } diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index aecc3e3dec0c..457b026a0972 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -980,6 +980,7 @@ int radeon_fbdev_init(struct radeon_device *rdev); void radeon_fbdev_fini(struct radeon_device *rdev); void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state); bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); +void radeon_fbdev_restore_mode(struct radeon_device *rdev); void radeon_fb_output_poll_changed(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 05751f3f8444..44489cce7458 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -1326,14 +1326,6 @@ static int radeon_pm_init_old(struct radeon_device *rdev) INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); if (rdev->pm.num_power_states > 1) { - /* where's the best place to put these? */ - ret = device_create_file(rdev->dev, &dev_attr_power_profile); - if (ret) - DRM_ERROR("failed to create device file for power profile\n"); - ret = device_create_file(rdev->dev, &dev_attr_power_method); - if (ret) - DRM_ERROR("failed to create device file for power method\n"); - if (radeon_debugfs_pm_init(rdev)) { DRM_ERROR("Failed to register debugfs file for PM!\n"); } @@ -1391,20 +1383,6 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev) goto dpm_failed; rdev->pm.dpm_enabled = true; - ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); - if (ret) - DRM_ERROR("failed to create device file for dpm state\n"); - ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); - if (ret) - DRM_ERROR("failed to create device file for dpm state\n"); - /* XXX: these are noops for dpm but are here for backwards compat */ - ret = device_create_file(rdev->dev, &dev_attr_power_profile); - if (ret) - DRM_ERROR("failed to create device file for power profile\n"); - ret = device_create_file(rdev->dev, &dev_attr_power_method); - if (ret) - DRM_ERROR("failed to create device file for power method\n"); - if (radeon_debugfs_pm_init(rdev)) { DRM_ERROR("Failed to register debugfs file for dpm!\n"); } @@ -1545,9 +1523,44 @@ int radeon_pm_late_init(struct radeon_device *rdev) int ret = 0; if (rdev->pm.pm_method == PM_METHOD_DPM) { - mutex_lock(&rdev->pm.mutex); - ret = radeon_dpm_late_enable(rdev); - mutex_unlock(&rdev->pm.mutex); + if (rdev->pm.dpm_enabled) { + ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); + if (ret) + DRM_ERROR("failed to create device file for dpm state\n"); + ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); + if (ret) + DRM_ERROR("failed to create device file for dpm state\n"); + /* XXX: these are noops for dpm but are here for backwards compat */ + ret = device_create_file(rdev->dev, &dev_attr_power_profile); + if (ret) + DRM_ERROR("failed to create device file for power profile\n"); + ret = device_create_file(rdev->dev, &dev_attr_power_method); + if (ret) + DRM_ERROR("failed to create device file for power method\n"); + + mutex_lock(&rdev->pm.mutex); + ret = radeon_dpm_late_enable(rdev); + mutex_unlock(&rdev->pm.mutex); + if (ret) { + rdev->pm.dpm_enabled = false; + DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); + } else { + /* set the dpm state for PX since there won't be + * a modeset to call this. + */ + radeon_pm_compute_clocks(rdev); + } + } + } else { + if (rdev->pm.num_power_states > 1) { + /* where's the best place to put these? */ + ret = device_create_file(rdev->dev, &dev_attr_power_profile); + if (ret) + DRM_ERROR("failed to create device file for power profile\n"); + ret = device_create_file(rdev->dev, &dev_attr_power_method); + if (ret) + DRM_ERROR("failed to create device file for power method\n"); + } } return ret; } diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index e9115d3f67b0..e72bf46042e0 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2928,6 +2928,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = { { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 }, + { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, { 0, 0, 0, 0 }, }; diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c index db8b49101a8b..512263919282 100644 --- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c +++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c @@ -34,8 +34,8 @@ virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct virtio_gpu_device *vgdev = node->minor->dev->dev_private; - seq_printf(m, "fence %ld %lld\n", - atomic64_read(&vgdev->fence_drv.last_seq), + seq_printf(m, "fence %llu %lld\n", + (u64)atomic64_read(&vgdev->fence_drv.last_seq), vgdev->fence_drv.sync_seq); return 0; } diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c index 1da632631dac..67097c9ce9c1 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fence.c +++ b/drivers/gpu/drm/virtio/virtgpu_fence.c @@ -61,7 +61,7 @@ static void virtio_timeline_value_str(struct fence *f, char *str, int size) { struct virtio_gpu_fence *fence = to_virtio_fence(f); - snprintf(str, size, "%lu", atomic64_read(&fence->drv->last_seq)); + snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq)); } static const struct fence_ops virtio_fence_ops = { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c index 5ae8f921da2a..8a76821177a6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c @@ -681,6 +681,14 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man, 0, 0, DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT); + if (ret) { + (void) vmw_cmdbuf_man_process(man); + ret = drm_mm_insert_node_generic(&man->mm, info->node, + info->page_size, 0, 0, + DRM_MM_SEARCH_DEFAULT, + DRM_MM_CREATE_DEFAULT); + } + spin_unlock_bh(&man->lock); info->done = !ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 64b50409fa07..03f63c749c02 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -657,7 +657,8 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) struct vmw_resource *res = &user_srf->srf.res; *p_base = NULL; - ttm_base_object_unref(&user_srf->backup_base); + if (user_srf->backup_base) + ttm_base_object_unref(&user_srf->backup_base); vmw_resource_unreference(&res); } diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 3dd2de31a2f8..472b88285c75 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -24,6 +24,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> +#include <linux/dmi.h> #include <linux/i2c.h> #include <linux/clk.h> #include <linux/clk-provider.h> @@ -51,6 +52,22 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev) } #ifdef CONFIG_ACPI +/* + * The HCNT/LCNT information coming from ACPI should be the most accurate + * for given platform. However, some systems get it wrong. On such systems + * we get better results by calculating those based on the input clock. + */ +static const struct dmi_system_id dw_i2c_no_acpi_params[] = { + { + .ident = "Dell Inspiron 7348", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7348"), + }, + }, + { } +}; + static void dw_i2c_acpi_params(struct platform_device *pdev, char method[], u16 *hcnt, u16 *lcnt, u32 *sda_hold) { @@ -58,6 +75,9 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[], acpi_handle handle = ACPI_HANDLE(&pdev->dev); union acpi_object *obj; + if (dmi_check_system(dw_i2c_no_acpi_params)) + return; + if (ACPI_FAILURE(acpi_evaluate_object(handle, method, NULL, &buf))) return; @@ -253,12 +273,6 @@ static int dw_i2c_probe(struct platform_device *pdev) adap->dev.parent = &pdev->dev; adap->dev.of_node = pdev->dev.of_node; - r = i2c_add_numbered_adapter(adap); - if (r) { - dev_err(&pdev->dev, "failure adding adapter\n"); - return r; - } - if (dev->pm_runtime_disabled) { pm_runtime_forbid(&pdev->dev); } else { @@ -268,6 +282,13 @@ static int dw_i2c_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); } + r = i2c_add_numbered_adapter(adap); + if (r) { + dev_err(&pdev->dev, "failure adding adapter\n"); + pm_runtime_disable(&pdev->dev); + return r; + } + return 0; } diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index d8361dada584..d8b5a8fee1e6 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -690,15 +690,16 @@ static int rcar_i2c_probe(struct platform_device *pdev) return ret; } + pm_runtime_enable(dev); + platform_set_drvdata(pdev, priv); + ret = i2c_add_numbered_adapter(adap); if (ret < 0) { dev_err(dev, "reg adap failed: %d\n", ret); + pm_runtime_disable(dev); return ret; } - pm_runtime_enable(dev); - platform_set_drvdata(pdev, priv); - dev_info(dev, "probed\n"); return 0; diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 50bfd8cef5f2..5df819610d52 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -1243,17 +1243,19 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) i2c->adap.nr = i2c->pdata->bus_num; i2c->adap.dev.of_node = pdev->dev.of_node; + platform_set_drvdata(pdev, i2c); + + pm_runtime_enable(&pdev->dev); + ret = i2c_add_numbered_adapter(&i2c->adap); if (ret < 0) { dev_err(&pdev->dev, "failed to add bus to i2c core\n"); + pm_runtime_disable(&pdev->dev); s3c24xx_i2c_deregister_cpufreq(i2c); clk_unprepare(i2c->clk); return ret; } - platform_set_drvdata(pdev, i2c); - - pm_runtime_enable(&pdev->dev); pm_runtime_enable(&i2c->adap.dev); dev_info(&pdev->dev, "%s: S3C I2C adapter\n", dev_name(&i2c->adap.dev)); diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 5f89f1e3c2f2..a59c3111f7fb 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -694,12 +694,12 @@ static int i2c_device_probe(struct device *dev) goto err_clear_wakeup_irq; status = dev_pm_domain_attach(&client->dev, true); - if (status != -EPROBE_DEFER) { - status = driver->probe(client, i2c_match_id(driver->id_table, - client)); - if (status) - goto err_detach_pm_domain; - } + if (status == -EPROBE_DEFER) + goto err_clear_wakeup_irq; + + status = driver->probe(client, i2c_match_id(driver->id_table, client)); + if (status) + goto err_detach_pm_domain; return 0; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index b1ab13f3e182..59a2dafc8c57 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1232,14 +1232,32 @@ static bool cma_match_private_data(struct rdma_id_private *id_priv, return true; } +static bool cma_protocol_roce_dev_port(struct ib_device *device, int port_num) +{ + enum rdma_link_layer ll = rdma_port_get_link_layer(device, port_num); + enum rdma_transport_type transport = + rdma_node_get_transport(device->node_type); + + return ll == IB_LINK_LAYER_ETHERNET && transport == RDMA_TRANSPORT_IB; +} + +static bool cma_protocol_roce(const struct rdma_cm_id *id) +{ + struct ib_device *device = id->device; + const int port_num = id->port_num ?: rdma_start_port(device); + + return cma_protocol_roce_dev_port(device, port_num); +} + static bool cma_match_net_dev(const struct rdma_id_private *id_priv, const struct net_device *net_dev) { const struct rdma_addr *addr = &id_priv->id.route.addr; if (!net_dev) - /* This request is an AF_IB request */ - return addr->src_addr.ss_family == AF_IB; + /* This request is an AF_IB request or a RoCE request */ + return addr->src_addr.ss_family == AF_IB || + cma_protocol_roce(&id_priv->id); return !addr->dev_addr.bound_dev_if || (net_eq(dev_net(net_dev), &init_net) && @@ -1294,6 +1312,10 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) { /* Assuming the protocol is AF_IB */ *net_dev = NULL; + } else if (cma_protocol_roce_dev_port(req.device, req.port)) { + /* TODO find the net dev matching the request parameters + * through the RoCE GID table */ + *net_dev = NULL; } else { return ERR_CAST(*net_dev); } @@ -1593,11 +1615,16 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, if (ret) goto err; } else { - /* An AF_IB connection */ - WARN_ON_ONCE(ss_family != AF_IB); - - cma_translate_ib((struct sockaddr_ib *)cma_src_addr(id_priv), - &rt->addr.dev_addr); + if (!cma_protocol_roce(listen_id) && + cma_any_addr(cma_src_addr(id_priv))) { + rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; + rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); + ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); + } else if (!cma_any_addr(cma_src_addr(id_priv))) { + ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr); + if (ret) + goto err; + } } rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); @@ -1635,13 +1662,12 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, if (ret) goto err; } else { - /* An AF_IB connection */ - WARN_ON_ONCE(ss_family != AF_IB); - - if (!cma_any_addr(cma_src_addr(id_priv))) - cma_translate_ib((struct sockaddr_ib *) - cma_src_addr(id_priv), - &id->route.addr.dev_addr); + if (!cma_any_addr(cma_src_addr(id_priv))) { + ret = cma_translate_addr(cma_src_addr(id_priv), + &id->route.addr.dev_addr); + if (ret) + goto err; + } } id_priv->state = RDMA_CM_CONNECT; diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 70acda91eb2a..6a0bdfa0ce2e 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -1325,9 +1325,6 @@ static void nes_netdev_get_drvinfo(struct net_device *netdev, "%u.%u", nesadapter->firmware_version >> 16, nesadapter->firmware_version & 0x000000ff); strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); - drvinfo->testinfo_len = 0; - drvinfo->eedump_len = 0; - drvinfo->regdump_len = 0; } diff --git a/drivers/infiniband/hw/usnic/usnic.h b/drivers/infiniband/hw/usnic/usnic.h index 5be13d8991bc..f903502d3883 100644 --- a/drivers/infiniband/hw/usnic/usnic.h +++ b/drivers/infiniband/hw/usnic/usnic.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_abi.h b/drivers/infiniband/hw/usnic/usnic_abi.h index 04a66229584e..7fe9502ce8d3 100644 --- a/drivers/infiniband/hw/usnic/usnic_abi.h +++ b/drivers/infiniband/hw/usnic/usnic_abi.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h b/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h index 393567266142..596e0ed49a8e 100644 --- a/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h +++ b/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_common_util.h b/drivers/infiniband/hw/usnic/usnic_common_util.h index 9d737ed5e55d..b54986de5f0c 100644 --- a/drivers/infiniband/hw/usnic/usnic_common_util.h +++ b/drivers/infiniband/hw/usnic/usnic_common_util.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_debugfs.c b/drivers/infiniband/hw/usnic/usnic_debugfs.c index 5d13860161a4..5e55b8bc6fe4 100644 --- a/drivers/infiniband/hw/usnic/usnic_debugfs.c +++ b/drivers/infiniband/hw/usnic/usnic_debugfs.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_debugfs.h b/drivers/infiniband/hw/usnic/usnic_debugfs.h index 4087d24a88f6..98453e91daa6 100644 --- a/drivers/infiniband/hw/usnic/usnic_debugfs.h +++ b/drivers/infiniband/hw/usnic/usnic_debugfs.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.c b/drivers/infiniband/hw/usnic/usnic_fwd.c index e3c9bd9d3ba3..3c37dd59c04e 100644 --- a/drivers/infiniband/hw/usnic/usnic_fwd.c +++ b/drivers/infiniband/hw/usnic/usnic_fwd.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.h b/drivers/infiniband/hw/usnic/usnic_fwd.h index 93713a2230b3..3a8add9ddf46 100644 --- a/drivers/infiniband/hw/usnic/usnic_fwd.h +++ b/drivers/infiniband/hw/usnic/usnic_fwd.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_ib.h b/drivers/infiniband/hw/usnic/usnic_ib.h index e5a9297dd1bd..525bf272671e 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib.h +++ b/drivers/infiniband/hw/usnic/usnic_ib.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index 34c49b8105fe..0c15bd885035 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c index db3588df3546..85dc3f989ff7 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h index b0aafe8db0c3..b1458be1d402 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h +++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c index 27dc67c1689f..3412ea06116e 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h index 0d09b493cd02..3d98e16cfeaf 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h +++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index 7df43827cb29..f8e3211689a3 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h index 0bd04efa16f3..414eaa566bd9 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_log.h b/drivers/infiniband/hw/usnic/usnic_log.h index 75777a66c684..183fcb6a952f 100644 --- a/drivers/infiniband/hw/usnic/usnic_log.h +++ b/drivers/infiniband/hw/usnic/usnic_log.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_transport.c b/drivers/infiniband/hw/usnic/usnic_transport.c index ddef6f77a78c..de318389a301 100644 --- a/drivers/infiniband/hw/usnic/usnic_transport.c +++ b/drivers/infiniband/hw/usnic/usnic_transport.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_transport.h b/drivers/infiniband/hw/usnic/usnic_transport.h index 7e5dc6d9f462..9a7a2d9755c0 100644 --- a/drivers/infiniband/hw/usnic/usnic_transport.h +++ b/drivers/infiniband/hw/usnic/usnic_transport.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index cb2337f0532b..645a5f6e6c88 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c @@ -7,7 +7,7 @@ * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: + * BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.h b/drivers/infiniband/hw/usnic/usnic_uiom.h index 70440996e8f2..45ca7c1613a7 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.h +++ b/drivers/infiniband/hw/usnic/usnic_uiom.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c index 3a4288e0fbac..42b4b4c4e452 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2014, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h index d4f752e258fd..c0b0b876ab90 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h +++ b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_vnic.c b/drivers/infiniband/hw/usnic/usnic_vnic.c index 656b88c39eda..66de93fb8ea9 100644 --- a/drivers/infiniband/hw/usnic/usnic_vnic.c +++ b/drivers/infiniband/hw/usnic/usnic_vnic.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_vnic.h b/drivers/infiniband/hw/usnic/usnic_vnic.h index 14d931a8829d..a08423e478af 100644 --- a/drivers/infiniband/hw/usnic/usnic_vnic.h +++ b/drivers/infiniband/hw/usnic/usnic_vnic.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 4cd5428a2399..edc5b8565d6d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -495,6 +495,7 @@ void ipoib_dev_cleanup(struct net_device *dev); void ipoib_mcast_join_task(struct work_struct *work); void ipoib_mcast_carrier_on_task(struct work_struct *work); void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb); +void ipoib_mcast_free(struct ipoib_mcast *mc); void ipoib_mcast_restart_task(struct work_struct *work); int ipoib_mcast_start_thread(struct net_device *dev); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index f74316e679d2..babba05d7a0e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1207,8 +1207,10 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) out_unlock: spin_unlock_irqrestore(&priv->lock, flags); - list_for_each_entry_safe(mcast, tmcast, &remove_list, list) + list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { ipoib_mcast_leave(dev, mcast); + ipoib_mcast_free(mcast); + } } static void ipoib_reap_neigh(struct work_struct *work) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 136cbefe00f8..d750a86042f3 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -106,7 +106,7 @@ static void __ipoib_mcast_schedule_join_thread(struct ipoib_dev_priv *priv, queue_delayed_work(priv->wq, &priv->mcast_task, 0); } -static void ipoib_mcast_free(struct ipoib_mcast *mcast) +void ipoib_mcast_free(struct ipoib_mcast *mcast) { struct net_device *dev = mcast->dev; int tx_dropped = 0; diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c index b76ac580703c..a8bc2fe170dd 100644 --- a/drivers/input/joystick/walkera0701.c +++ b/drivers/input/joystick/walkera0701.c @@ -150,7 +150,7 @@ static void walkera0701_irq_handler(void *handler_data) if (w->counter == 24) { /* full frame */ walkera0701_parse_frame(w); w->counter = NO_SYNC; - if (abs(pulse_time - SYNC_PULSE) < RESERVE) /* new frame sync */ + if (abs64(pulse_time - SYNC_PULSE) < RESERVE) /* new frame sync */ w->counter = 0; } else { if ((pulse_time > (ANALOG_MIN_PULSE - RESERVE) @@ -161,7 +161,7 @@ static void walkera0701_irq_handler(void *handler_data) } else w->counter = NO_SYNC; } - } else if (abs(pulse_time - SYNC_PULSE - BIN0_PULSE) < + } else if (abs64(pulse_time - SYNC_PULSE - BIN0_PULSE) < RESERVE + BIN1_PULSE - BIN0_PULSE) /* frame sync .. */ w->counter = 0; diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c index b052afec9a11..6639b2b8528a 100644 --- a/drivers/input/keyboard/omap4-keypad.c +++ b/drivers/input/keyboard/omap4-keypad.c @@ -266,7 +266,7 @@ static int omap4_keypad_probe(struct platform_device *pdev) error = omap4_keypad_parse_dt(&pdev->dev, keypad_data); if (error) - return error; + goto err_free_keypad; res = request_mem_region(res->start, resource_size(res), pdev->name); if (!res) { diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c index 867db8a91372..e317b75357a0 100644 --- a/drivers/input/misc/pm8941-pwrkey.c +++ b/drivers/input/misc/pm8941-pwrkey.c @@ -93,7 +93,7 @@ static int pm8941_reboot_notify(struct notifier_block *nb, default: reset_type = PON_PS_HOLD_TYPE_HARD_RESET; break; - }; + } error = regmap_update_bits(pwrkey->regmap, pwrkey->baseaddr + PON_PS_HOLD_RST_CTL, diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 345df9b03aed..5adbcedcb81c 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -414,7 +414,7 @@ static int uinput_setup_device(struct uinput_device *udev, dev->id.product = user_dev->id.product; dev->id.version = user_dev->id.version; - for_each_set_bit(i, dev->absbit, ABS_CNT) { + for (i = 0; i < ABS_CNT; i++) { input_abs_set_max(dev, i, user_dev->absmax[i]); input_abs_set_min(dev, i, user_dev->absmin[i]); input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]); diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c index 5f191071d44a..e4eb048d1bf6 100644 --- a/drivers/input/mouse/cyapa_gen6.c +++ b/drivers/input/mouse/cyapa_gen6.c @@ -241,14 +241,10 @@ static int cyapa_gen6_read_sys_info(struct cyapa *cyapa) memcpy(&cyapa->product_id[13], &resp_data[62], 2); cyapa->product_id[15] = '\0'; + /* Get the number of Rx electrodes. */ rotat_align = resp_data[68]; - if (rotat_align) { - cyapa->electrodes_rx = cyapa->electrodes_y; - cyapa->electrodes_rx = cyapa->electrodes_y; - } else { - cyapa->electrodes_rx = cyapa->electrodes_x; - cyapa->electrodes_rx = cyapa->electrodes_y; - } + cyapa->electrodes_rx = + rotat_align ? cyapa->electrodes_y : cyapa->electrodes_x; cyapa->aligned_electrodes_rx = (cyapa->electrodes_rx + 3) & ~3u; if (!cyapa->electrodes_x || !cyapa->electrodes_y || diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h index 73670f2aebfd..c0ec26118732 100644 --- a/drivers/input/mouse/elan_i2c.h +++ b/drivers/input/mouse/elan_i2c.h @@ -60,7 +60,7 @@ struct elan_transport_ops { int (*get_sm_version)(struct i2c_client *client, u8* ic_type, u8 *version); int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum); - int (*get_product_id)(struct i2c_client *client, u8 *id); + int (*get_product_id)(struct i2c_client *client, u16 *id); int (*get_max)(struct i2c_client *client, unsigned int *max_x, unsigned int *max_y); diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index fa945304b9a5..5e1665bbaa0b 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -40,7 +40,7 @@ #include "elan_i2c.h" #define DRIVER_NAME "elan_i2c" -#define ELAN_DRIVER_VERSION "1.6.0" +#define ELAN_DRIVER_VERSION "1.6.1" #define ETP_MAX_PRESSURE 255 #define ETP_FWIDTH_REDUCE 90 #define ETP_FINGER_WIDTH 15 @@ -76,7 +76,7 @@ struct elan_tp_data { unsigned int x_res; unsigned int y_res; - u8 product_id; + u16 product_id; u8 fw_version; u8 sm_version; u8 iap_version; @@ -98,15 +98,25 @@ static int elan_get_fwinfo(u8 iap_version, u16 *validpage_count, u16 *signature_address) { switch (iap_version) { + case 0x00: + case 0x06: case 0x08: *validpage_count = 512; break; + case 0x03: + case 0x07: case 0x09: + case 0x0A: + case 0x0B: + case 0x0C: *validpage_count = 768; break; case 0x0D: *validpage_count = 896; break; + case 0x0E: + *validpage_count = 640; + break; default: /* unknown ic type clear value */ *validpage_count = 0; @@ -266,11 +276,10 @@ static int elan_query_device_info(struct elan_tp_data *data) error = elan_get_fwinfo(data->iap_version, &data->fw_validpage_count, &data->fw_signature_address); - if (error) { - dev_err(&data->client->dev, - "unknown iap version %d\n", data->iap_version); - return error; - } + if (error) + dev_warn(&data->client->dev, + "unexpected iap version %#04x (ic type: %#04x), firmware update will not work\n", + data->iap_version, data->ic_type); return 0; } @@ -486,6 +495,9 @@ static ssize_t elan_sysfs_update_fw(struct device *dev, const u8 *fw_signature; static const u8 signature[] = {0xAA, 0x55, 0xCC, 0x33, 0xFF, 0xFF}; + if (data->fw_validpage_count == 0) + return -EINVAL; + /* Look for a firmware with the product id appended. */ fw_name = kasprintf(GFP_KERNEL, ETP_FW_NAME, data->product_id); if (!fw_name) { diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c index 683c840c9dd7..a679e56c44cd 100644 --- a/drivers/input/mouse/elan_i2c_i2c.c +++ b/drivers/input/mouse/elan_i2c_i2c.c @@ -276,7 +276,7 @@ static int elan_i2c_get_sm_version(struct i2c_client *client, return 0; } -static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id) +static int elan_i2c_get_product_id(struct i2c_client *client, u16 *id) { int error; u8 val[3]; @@ -287,7 +287,7 @@ static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id) return error; } - *id = val[0]; + *id = le16_to_cpup((__le16 *)val); return 0; } diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c index ff36a366b2aa..cb6aecbc1dc2 100644 --- a/drivers/input/mouse/elan_i2c_smbus.c +++ b/drivers/input/mouse/elan_i2c_smbus.c @@ -183,7 +183,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client, return 0; } -static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id) +static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id) { int error; u8 val[3]; @@ -195,7 +195,7 @@ static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id) return error; } - *id = val[1]; + *id = be16_to_cpup((__be16 *)val); return 0; } diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 994ae7886156..6025eb430c0a 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -519,18 +519,14 @@ static int synaptics_set_mode(struct psmouse *psmouse) struct synaptics_data *priv = psmouse->private; priv->mode = 0; - - if (priv->absolute_mode) { + if (priv->absolute_mode) priv->mode |= SYN_BIT_ABSOLUTE_MODE; - if (SYN_CAP_EXTENDED(priv->capabilities)) - priv->mode |= SYN_BIT_W_MODE; - } - - if (!SYN_MODE_WMODE(priv->mode) && priv->disable_gesture) + if (priv->disable_gesture) priv->mode |= SYN_BIT_DISABLE_GESTURE; - if (psmouse->rate >= 80) priv->mode |= SYN_BIT_HIGH_RATE; + if (SYN_CAP_EXTENDED(priv->capabilities)) + priv->mode |= SYN_BIT_W_MODE; if (synaptics_mode_cmd(psmouse, priv->mode)) return -1; diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c index 75516996db20..316f2c897101 100644 --- a/drivers/input/serio/libps2.c +++ b/drivers/input/serio/libps2.c @@ -212,12 +212,17 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command) * time before the ACK arrives. */ if (ps2_sendbyte(ps2dev, command & 0xff, - command == PS2_CMD_RESET_BAT ? 1000 : 200)) - goto out; + command == PS2_CMD_RESET_BAT ? 1000 : 200)) { + serio_pause_rx(ps2dev->serio); + goto out_reset_flags; + } - for (i = 0; i < send; i++) - if (ps2_sendbyte(ps2dev, param[i], 200)) - goto out; + for (i = 0; i < send; i++) { + if (ps2_sendbyte(ps2dev, param[i], 200)) { + serio_pause_rx(ps2dev->serio); + goto out_reset_flags; + } + } /* * The reset command takes a long time to execute. @@ -234,17 +239,18 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command) !(ps2dev->flags & PS2_FLAG_CMD), timeout); } + serio_pause_rx(ps2dev->serio); + if (param) for (i = 0; i < receive; i++) param[i] = ps2dev->cmdbuf[(receive - 1) - i]; if (ps2dev->cmdcnt && (command != PS2_CMD_RESET_BAT || ps2dev->cmdcnt != 1)) - goto out; + goto out_reset_flags; rc = 0; - out: - serio_pause_rx(ps2dev->serio); + out_reset_flags: ps2dev->flags = 0; serio_continue_rx(ps2dev->serio); diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c index 26b45936f9fd..1e8cd6f1fe9e 100644 --- a/drivers/input/serio/parkbd.c +++ b/drivers/input/serio/parkbd.c @@ -194,6 +194,7 @@ static int __init parkbd_init(void) parkbd_port = parkbd_allocate_serio(); if (!parkbd_port) { parport_release(parkbd_dev); + parport_unregister_device(parkbd_dev); return -ENOMEM; } diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index 0f5f968592bd..04edc8f7122f 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -668,18 +668,22 @@ static int ads7846_no_filter(void *ads, int data_idx, int *val) static int ads7846_get_value(struct ads7846 *ts, struct spi_message *m) { + int value; struct spi_transfer *t = list_entry(m->transfers.prev, struct spi_transfer, transfer_list); if (ts->model == 7845) { - return be16_to_cpup((__be16 *)&(((char*)t->rx_buf)[1])) >> 3; + value = be16_to_cpup((__be16 *)&(((char *)t->rx_buf)[1])); } else { /* * adjust: on-wire is a must-ignore bit, a BE12 value, then * padding; built from two 8 bit values written msb-first. */ - return be16_to_cpup((__be16 *)t->rx_buf) >> 3; + value = be16_to_cpup((__be16 *)t->rx_buf); } + + /* enforce ADC output is 12 bits width */ + return (value >> 3) & 0xfff; } static void ads7846_update_value(struct spi_message *m, int val) diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c index ff0b75813daa..8275267eac25 100644 --- a/drivers/input/touchscreen/imx6ul_tsc.c +++ b/drivers/input/touchscreen/imx6ul_tsc.c @@ -94,7 +94,7 @@ struct imx6ul_tsc { * TSC module need ADC to get the measure value. So * before config TSC, we should initialize ADC module. */ -static void imx6ul_adc_init(struct imx6ul_tsc *tsc) +static int imx6ul_adc_init(struct imx6ul_tsc *tsc) { int adc_hc = 0; int adc_gc; @@ -122,17 +122,23 @@ static void imx6ul_adc_init(struct imx6ul_tsc *tsc) timeout = wait_for_completion_timeout (&tsc->completion, ADC_TIMEOUT); - if (timeout == 0) + if (timeout == 0) { dev_err(tsc->dev, "Timeout for adc calibration\n"); + return -ETIMEDOUT; + } adc_gs = readl(tsc->adc_regs + REG_ADC_GS); - if (adc_gs & ADC_CALF) + if (adc_gs & ADC_CALF) { dev_err(tsc->dev, "ADC calibration failed\n"); + return -EINVAL; + } /* TSC need the ADC work in hardware trigger */ adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG); adc_cfg |= ADC_HARDWARE_TRIGGER; writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG); + + return 0; } /* @@ -188,11 +194,17 @@ static void imx6ul_tsc_set(struct imx6ul_tsc *tsc) writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL); } -static void imx6ul_tsc_init(struct imx6ul_tsc *tsc) +static int imx6ul_tsc_init(struct imx6ul_tsc *tsc) { - imx6ul_adc_init(tsc); + int err; + + err = imx6ul_adc_init(tsc); + if (err) + return err; imx6ul_tsc_channel_config(tsc); imx6ul_tsc_set(tsc); + + return 0; } static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc) @@ -311,9 +323,7 @@ static int imx6ul_tsc_open(struct input_dev *input_dev) return err; } - imx6ul_tsc_init(tsc); - - return 0; + return imx6ul_tsc_init(tsc); } static void imx6ul_tsc_close(struct input_dev *input_dev) @@ -337,7 +347,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev) int tsc_irq; int adc_irq; - tsc = devm_kzalloc(&pdev->dev, sizeof(struct imx6ul_tsc), GFP_KERNEL); + tsc = devm_kzalloc(&pdev->dev, sizeof(*tsc), GFP_KERNEL); if (!tsc) return -ENOMEM; @@ -345,7 +355,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev) if (!input_dev) return -ENOMEM; - input_dev->name = "iMX6UL TouchScreen Controller"; + input_dev->name = "iMX6UL Touchscreen Controller"; input_dev->id.bustype = BUS_HOST; input_dev->open = imx6ul_tsc_open; @@ -406,7 +416,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev) } adc_irq = platform_get_irq(pdev, 1); - if (adc_irq <= 0) { + if (adc_irq < 0) { dev_err(&pdev->dev, "no adc irq resource?\n"); return adc_irq; } @@ -491,7 +501,7 @@ static int __maybe_unused imx6ul_tsc_resume(struct device *dev) goto out; } - imx6ul_tsc_init(tsc); + retval = imx6ul_tsc_init(tsc); } out: diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c index 7cce87650fc8..1fafc9f57af6 100644 --- a/drivers/input/touchscreen/mms114.c +++ b/drivers/input/touchscreen/mms114.c @@ -394,12 +394,12 @@ static struct mms114_platform_data *mms114_parse_dt(struct device *dev) if (of_property_read_u32(np, "x-size", &pdata->x_size)) { dev_err(dev, "failed to get x-size property\n"); return NULL; - }; + } if (of_property_read_u32(np, "y-size", &pdata->y_size)) { dev_err(dev, "failed to get y-size property\n"); return NULL; - }; + } of_property_read_u32(np, "contact-threshold", &pdata->contact_threshold); diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index d9da766719c8..cbe6a890a93a 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -23,8 +23,7 @@ config IOMMU_IO_PGTABLE config IOMMU_IO_PGTABLE_LPAE bool "ARMv7/v8 Long Descriptor Format" select IOMMU_IO_PGTABLE - # SWIOTLB guarantees a dma_to_phys() implementation - depends on ARM || ARM64 || (COMPILE_TEST && SWIOTLB) + depends on HAS_DMA && (ARM || ARM64 || COMPILE_TEST) help Enable support for the ARM long descriptor pagetable format. This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index f82060e778a2..08d2775887f7 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -2006,6 +2006,15 @@ static void do_detach(struct iommu_dev_data *dev_data) { struct amd_iommu *iommu; + /* + * First check if the device is still attached. It might already + * be detached from its domain because the generic + * iommu_detach_group code detached it and we try again here in + * our alias handling. + */ + if (!dev_data->domain) + return; + iommu = amd_iommu_rlookup_table[dev_data->devid]; /* decrease reference counters */ diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 5ef347a13cb5..1b066e7d144d 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -1256,6 +1256,9 @@ static int iommu_init_pci(struct amd_iommu *iommu) if (!iommu->dev) return -ENODEV; + /* Prevent binding other PCI device drivers to IOMMU devices */ + iommu->dev->match_driver = false; + pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, &iommu->cap); pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index dafaf59dc3b8..286e890e7d64 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -56,6 +56,7 @@ #define IDR0_TTF_SHIFT 2 #define IDR0_TTF_MASK 0x3 #define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT) +#define IDR0_TTF_AARCH32_64 (3 << IDR0_TTF_SHIFT) #define IDR0_S1P (1 << 1) #define IDR0_S2P (1 << 0) @@ -342,7 +343,8 @@ #define CMDQ_TLBI_0_VMID_SHIFT 32 #define CMDQ_TLBI_0_ASID_SHIFT 48 #define CMDQ_TLBI_1_LEAF (1UL << 0) -#define CMDQ_TLBI_1_ADDR_MASK ~0xfffUL +#define CMDQ_TLBI_1_VA_MASK ~0xfffUL +#define CMDQ_TLBI_1_IPA_MASK 0xfffffffff000UL #define CMDQ_PRI_0_SSID_SHIFT 12 #define CMDQ_PRI_0_SSID_MASK 0xfffffUL @@ -770,11 +772,13 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) break; case CMDQ_OP_TLBI_NH_VA: cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT; - /* Fallthrough */ + cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0; + cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK; + break; case CMDQ_OP_TLBI_S2_IPA: cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT; cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0; - cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_ADDR_MASK; + cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK; break; case CMDQ_OP_TLBI_NH_ASID: cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT; @@ -2460,7 +2464,13 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu) } /* We only support the AArch64 table format at present */ - if ((reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) < IDR0_TTF_AARCH64) { + switch (reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) { + case IDR0_TTF_AARCH32_64: + smmu->ias = 40; + /* Fallthrough */ + case IDR0_TTF_AARCH64: + break; + default: dev_err(smmu->dev, "AArch64 table format not supported!\n"); return -ENXIO; } @@ -2541,8 +2551,7 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu) dev_warn(smmu->dev, "failed to set DMA mask for table walker\n"); - if (!smmu->ias) - smmu->ias = smmu->oas; + smmu->ias = max(smmu->ias, smmu->oas); dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n", smmu->ias, smmu->oas, smmu->features); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 041bc1810a86..35365f046923 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -2301,6 +2301,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, if (ret) { spin_unlock_irqrestore(&device_domain_lock, flags); + free_devinfo_mem(info); return NULL; } diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 73c07482f487..7df97777662d 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -202,9 +202,9 @@ typedef u64 arm_lpae_iopte; static bool selftest_running = false; -static dma_addr_t __arm_lpae_dma_addr(struct device *dev, void *pages) +static dma_addr_t __arm_lpae_dma_addr(void *pages) { - return phys_to_dma(dev, virt_to_phys(pages)); + return (dma_addr_t)virt_to_phys(pages); } static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, @@ -223,10 +223,10 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, goto out_free; /* * We depend on the IOMMU being able to work with any physical - * address directly, so if the DMA layer suggests it can't by - * giving us back some translation, that bodes very badly... + * address directly, so if the DMA layer suggests otherwise by + * translating or truncating them, that bodes very badly... */ - if (dma != __arm_lpae_dma_addr(dev, pages)) + if (dma != virt_to_phys(pages)) goto out_unmap; } @@ -243,10 +243,8 @@ out_free: static void __arm_lpae_free_pages(void *pages, size_t size, struct io_pgtable_cfg *cfg) { - struct device *dev = cfg->iommu_dev; - if (!selftest_running) - dma_unmap_single(dev, __arm_lpae_dma_addr(dev, pages), + dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), size, DMA_TO_DEVICE); free_pages_exact(pages, size); } @@ -254,12 +252,11 @@ static void __arm_lpae_free_pages(void *pages, size_t size, static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, struct io_pgtable_cfg *cfg) { - struct device *dev = cfg->iommu_dev; - *ptep = pte; if (!selftest_running) - dma_sync_single_for_device(dev, __arm_lpae_dma_addr(dev, ptep), + dma_sync_single_for_device(cfg->iommu_dev, + __arm_lpae_dma_addr(ptep), sizeof(pte), DMA_TO_DEVICE); } @@ -629,6 +626,11 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) return NULL; + if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) { + dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n"); + return NULL; + } + data = kmalloc(sizeof(*data), GFP_KERNEL); if (!data) return NULL; diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c index cf351c637464..a7c8c9ffbafd 100644 --- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c @@ -62,7 +62,7 @@ static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data) dev_alias->dev_id = alias; if (pdev != dev_alias->pdev) - dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev); + dev_alias->count += its_pci_msi_vec_count(pdev); return 0; } diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index ac7ae2b3cb83..25ceae9f7348 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -719,6 +719,9 @@ static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids) out: spin_unlock(&lpi_lock); + if (!bitmap) + *base = *nr_ids = 0; + return bitmap; } diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c index 18accb0a79cc..c53a53f6efb6 100644 --- a/drivers/isdn/hisax/isdnl2.c +++ b/drivers/isdn/hisax/isdnl2.c @@ -1247,7 +1247,7 @@ static void l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; - struct sk_buff *skb; + struct sk_buff *skb, *nskb; struct Layer2 *l2 = &st->l2; u_char header[MAX_HEADER_LEN]; int i, hdr_space_needed; @@ -1262,14 +1262,10 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) return; hdr_space_needed = l2headersize(l2, 0); - if (hdr_space_needed > skb_headroom(skb)) { - struct sk_buff *orig_skb = skb; - - skb = skb_realloc_headroom(skb, hdr_space_needed); - if (!skb) { - dev_kfree_skb(orig_skb); - return; - } + nskb = skb_realloc_headroom(skb, hdr_space_needed); + if (!nskb) { + skb_queue_head(&l2->i_queue, skb); + return; } spin_lock_irqsave(&l2->lock, flags); if (test_bit(FLG_MOD128, &l2->flag)) @@ -1282,7 +1278,7 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) p1); dev_kfree_skb(l2->windowar[p1]); } - l2->windowar[p1] = skb_clone(skb, GFP_ATOMIC); + l2->windowar[p1] = skb; i = sethdraddr(&st->l2, header, CMD); @@ -1295,8 +1291,8 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) l2->vs = (l2->vs + 1) % 8; } spin_unlock_irqrestore(&l2->lock, flags); - memcpy(skb_push(skb, i), header, i); - st->l2.l2l1(st, PH_PULL | INDICATION, skb); + memcpy(skb_push(nskb, i), header, i); + st->l2.l2l1(st, PH_PULL | INDICATION, nskb); test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag); if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) { FsmDelTimer(&st->l2.t203, 13); diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c index 8b1a66c6ca8a..e72b4e73cd61 100644 --- a/drivers/isdn/mISDN/dsp_pipeline.c +++ b/drivers/isdn/mISDN/dsp_pipeline.c @@ -235,7 +235,7 @@ void dsp_pipeline_destroy(struct dsp_pipeline *pipeline) int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) { - int len, incomplete = 0, found = 0; + int incomplete = 0, found = 0; char *dup, *tok, *name, *args; struct dsp_element_entry *entry, *n; struct dsp_pipeline_entry *pipeline_entry; @@ -247,17 +247,9 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) if (!list_empty(&pipeline->list)) _dsp_pipeline_destroy(pipeline); - if (!cfg) - return 0; - - len = strlen(cfg); - if (!len) - return 0; - - dup = kmalloc(len + 1, GFP_ATOMIC); + dup = kstrdup(cfg, GFP_ATOMIC); if (!dup) return 0; - strcpy(dup, cfg); while ((tok = strsep(&dup, "|"))) { if (!strlen(tok)) continue; diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c index 949cabb88f1c..5eb380a25903 100644 --- a/drivers/isdn/mISDN/layer2.c +++ b/drivers/isdn/mISDN/layer2.c @@ -1476,7 +1476,7 @@ static void l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; - struct sk_buff *skb, *nskb, *oskb; + struct sk_buff *skb, *nskb; u_char header[MAX_L2HEADER_LEN]; u_int i, p1; @@ -1486,48 +1486,34 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) skb = skb_dequeue(&l2->i_queue); if (!skb) return; - - if (test_bit(FLG_MOD128, &l2->flag)) - p1 = (l2->vs - l2->va) % 128; - else - p1 = (l2->vs - l2->va) % 8; - p1 = (p1 + l2->sow) % l2->window; - if (l2->windowar[p1]) { - printk(KERN_WARNING "%s: l2 try overwrite ack queue entry %d\n", - mISDNDevName4ch(&l2->ch), p1); - dev_kfree_skb(l2->windowar[p1]); - } - l2->windowar[p1] = skb; i = sethdraddr(l2, header, CMD); if (test_bit(FLG_MOD128, &l2->flag)) { header[i++] = l2->vs << 1; header[i++] = l2->vr << 1; + } else + header[i++] = (l2->vr << 5) | (l2->vs << 1); + nskb = skb_realloc_headroom(skb, i); + if (!nskb) { + printk(KERN_WARNING "%s: no headroom(%d) copy for IFrame\n", + mISDNDevName4ch(&l2->ch), i); + skb_queue_head(&l2->i_queue, skb); + return; + } + if (test_bit(FLG_MOD128, &l2->flag)) { + p1 = (l2->vs - l2->va) % 128; l2->vs = (l2->vs + 1) % 128; } else { - header[i++] = (l2->vr << 5) | (l2->vs << 1); + p1 = (l2->vs - l2->va) % 8; l2->vs = (l2->vs + 1) % 8; } - - nskb = skb_clone(skb, GFP_ATOMIC); - p1 = skb_headroom(nskb); - if (p1 >= i) - memcpy(skb_push(nskb, i), header, i); - else { - printk(KERN_WARNING - "%s: L2 pull_iqueue skb header(%d/%d) too short\n", - mISDNDevName4ch(&l2->ch), i, p1); - oskb = nskb; - nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC); - if (!nskb) { - dev_kfree_skb(oskb); - printk(KERN_WARNING "%s: no skb mem in %s\n", - mISDNDevName4ch(&l2->ch), __func__); - return; - } - memcpy(skb_put(nskb, i), header, i); - memcpy(skb_put(nskb, oskb->len), oskb->data, oskb->len); - dev_kfree_skb(oskb); + p1 = (p1 + l2->sow) % l2->window; + if (l2->windowar[p1]) { + printk(KERN_WARNING "%s: l2 try overwrite ack queue entry %d\n", + mISDNDevName4ch(&l2->ch), p1); + dev_kfree_skb(l2->windowar[p1]); } + l2->windowar[p1] = skb; + memcpy(skb_push(nskb, i), header, i); l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb); test_and_clear_bit(FLG_ACK_PEND, &l2->flag); if (!test_and_set_bit(FLG_T200_RUN, &l2->flag)) { diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c index de36237d7c6b..051645498b53 100644 --- a/drivers/mcb/mcb-pci.c +++ b/drivers/mcb/mcb-pci.c @@ -74,7 +74,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ret = -ENOTSUPP; dev_err(&pdev->dev, "IO mapped PCI devices are not supported\n"); - goto out_release; + goto out_iounmap; } pci_set_drvdata(pdev, priv); @@ -89,7 +89,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ret = chameleon_parse_cells(priv->bus, priv->mapbase, priv->base); if (ret < 0) - goto out_iounmap; + goto out_mcb_bus; num_cells = ret; dev_dbg(&pdev->dev, "Found %d cells\n", num_cells); @@ -98,6 +98,8 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; +out_mcb_bus: + mcb_release_bus(priv->bus); out_iounmap: iounmap(priv->base); out_release: diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index e51de52eeb94..48b5890c28e3 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1997,7 +1997,8 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file) ret = bitmap_storage_alloc(&store, chunks, !bitmap->mddev->bitmap_info.external, - bitmap->cluster_slot); + mddev_is_clustered(bitmap->mddev) + ? bitmap->cluster_slot : 0); if (ret) goto err; diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c index 240c9f0e85e7..8a096456579b 100644 --- a/drivers/md/dm-cache-policy-cleaner.c +++ b/drivers/md/dm-cache-policy-cleaner.c @@ -436,7 +436,7 @@ static struct dm_cache_policy *wb_create(dm_cblock_t cache_size, static struct dm_cache_policy_type wb_policy_type = { .name = "cleaner", .version = {1, 0, 0}, - .hint_size = 0, + .hint_size = 4, .owner = THIS_MODULE, .create = wb_create }; diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index ebaa4f803eec..192bb8beeb6b 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c @@ -203,7 +203,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, return -EINVAL; } - tmp_store = kmalloc(sizeof(*tmp_store), GFP_KERNEL); + tmp_store = kzalloc(sizeof(*tmp_store), GFP_KERNEL); if (!tmp_store) { ti->error = "Exception store allocation failed"; return -ENOMEM; @@ -215,7 +215,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, else if (persistent == 'N') type = get_type("N"); else { - ti->error = "Persistent flag is not P or N"; + ti->error = "Exception store type is not P or N"; r = -EINVAL; goto bad_type; } @@ -233,7 +233,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, if (r) goto bad; - r = type->ctr(tmp_store, 0, NULL); + r = type->ctr(tmp_store, (strlen(argv[0]) > 1 ? &argv[0][1] : NULL)); if (r) { ti->error = "Exception store type constructor failed"; goto bad; diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h index 0b2536247cf5..fae34e7a0b1e 100644 --- a/drivers/md/dm-exception-store.h +++ b/drivers/md/dm-exception-store.h @@ -42,8 +42,7 @@ struct dm_exception_store_type { const char *name; struct module *module; - int (*ctr) (struct dm_exception_store *store, - unsigned argc, char **argv); + int (*ctr) (struct dm_exception_store *store, char *options); /* * Destroys this object when you've finished with it. @@ -123,6 +122,8 @@ struct dm_exception_store { unsigned chunk_shift; void *context; + + bool userspace_supports_overflow; }; /* diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 97e165183e79..a0901214aef5 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -329,8 +329,7 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size) */ if (min_region_size > (1 << 13)) { /* If not a power of 2, make it the next power of 2 */ - if (min_region_size & (min_region_size - 1)) - region_size = 1 << fls(region_size); + region_size = roundup_pow_of_two(min_region_size); DMINFO("Choosing default region size of %lu sectors", region_size); } else { diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index bf71583296f7..117a05e40090 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -7,6 +7,7 @@ #include "dm-exception-store.h" +#include <linux/ctype.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/vmalloc.h> @@ -843,10 +844,10 @@ static void persistent_drop_snapshot(struct dm_exception_store *store) DMWARN("write header failed"); } -static int persistent_ctr(struct dm_exception_store *store, - unsigned argc, char **argv) +static int persistent_ctr(struct dm_exception_store *store, char *options) { struct pstore *ps; + int r; /* allocate the pstore */ ps = kzalloc(sizeof(*ps), GFP_KERNEL); @@ -868,14 +869,32 @@ static int persistent_ctr(struct dm_exception_store *store, ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0); if (!ps->metadata_wq) { - kfree(ps); DMERR("couldn't start header metadata update thread"); - return -ENOMEM; + r = -ENOMEM; + goto err_workqueue; + } + + if (options) { + char overflow = toupper(options[0]); + if (overflow == 'O') + store->userspace_supports_overflow = true; + else { + DMERR("Unsupported persistent store option: %s", options); + r = -EINVAL; + goto err_options; + } } store->context = ps; return 0; + +err_options: + destroy_workqueue(ps->metadata_wq); +err_workqueue: + kfree(ps); + + return r; } static unsigned persistent_status(struct dm_exception_store *store, @@ -888,7 +907,8 @@ static unsigned persistent_status(struct dm_exception_store *store, case STATUSTYPE_INFO: break; case STATUSTYPE_TABLE: - DMEMIT(" P %llu", (unsigned long long)store->chunk_size); + DMEMIT(" %s %llu", store->userspace_supports_overflow ? "PO" : "P", + (unsigned long long)store->chunk_size); } return sz; diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c index 1ce9a2586e41..9b7c8c8049d6 100644 --- a/drivers/md/dm-snap-transient.c +++ b/drivers/md/dm-snap-transient.c @@ -70,8 +70,7 @@ static void transient_usage(struct dm_exception_store *store, *metadata_sectors = 0; } -static int transient_ctr(struct dm_exception_store *store, - unsigned argc, char **argv) +static int transient_ctr(struct dm_exception_store *store, char *options) { struct transient_c *tc; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index c0bcd6516dfe..c06b74e91cd6 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1098,7 +1098,7 @@ static void stop_merge(struct dm_snapshot *s) } /* - * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> + * Construct a snapshot mapping: <origin_dev> <COW-dev> <p|po|n> <chunk-size> */ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) { @@ -1302,6 +1302,7 @@ static void __handover_exceptions(struct dm_snapshot *snap_src, u.store_swap = snap_dest->store; snap_dest->store = snap_src->store; + snap_dest->store->userspace_supports_overflow = u.store_swap->userspace_supports_overflow; snap_src->store = u.store_swap; snap_dest->store->snap = snap_dest; @@ -1739,8 +1740,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) pe = __find_pending_exception(s, pe, chunk); if (!pe) { - s->snapshot_overflowed = 1; - DMERR("Snapshot overflowed: Unable to allocate exception."); + if (s->store->userspace_supports_overflow) { + s->snapshot_overflowed = 1; + DMERR("Snapshot overflowed: Unable to allocate exception."); + } else + __invalidate_snapshot(s, -ENOMEM); r = -EIO; goto out_unlock; } @@ -2365,7 +2369,7 @@ static struct target_type origin_target = { static struct target_type snapshot_target = { .name = "snapshot", - .version = {1, 14, 0}, + .version = {1, 15, 0}, .module = THIS_MODULE, .ctr = snapshot_ctr, .dtr = snapshot_dtr, @@ -2379,7 +2383,7 @@ static struct target_type snapshot_target = { static struct target_type merge_target = { .name = dm_snapshot_merge_target_name, - .version = {1, 3, 0}, + .version = {1, 4, 0}, .module = THIS_MODULE, .ctr = snapshot_ctr, .dtr = snapshot_dtr, diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 6fcbfb063366..3897b90bd462 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -3201,7 +3201,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) metadata_low_callback, pool); if (r) - goto out_free_pt; + goto out_flags_changed; pt->callbacks.congested_fn = pool_is_congested; dm_table_add_target_callbacks(ti->table, &pt->callbacks); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6264781dc69a..1b5c6047e4f1 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1001,6 +1001,7 @@ static void end_clone_bio(struct bio *clone) struct dm_rq_target_io *tio = info->tio; struct bio *bio = info->orig; unsigned int nr_bytes = info->orig->bi_iter.bi_size; + int error = clone->bi_error; bio_put(clone); @@ -1011,13 +1012,13 @@ static void end_clone_bio(struct bio *clone) * the remainder. */ return; - else if (bio->bi_error) { + else if (error) { /* * Don't notice the error to the upper layer yet. * The error handling decision is made by the target driver, * when the request is completed. */ - tio->error = bio->bi_error; + tio->error = error; return; } @@ -2837,8 +2838,6 @@ static void __dm_destroy(struct mapped_device *md, bool wait) might_sleep(); - map = dm_get_live_table(md, &srcu_idx); - spin_lock(&_minor_lock); idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); set_bit(DMF_FREEING, &md->flags); @@ -2852,14 +2851,14 @@ static void __dm_destroy(struct mapped_device *md, bool wait) * do not race with internal suspend. */ mutex_lock(&md->suspend_lock); + map = dm_get_live_table(md, &srcu_idx); if (!dm_suspended_md(md)) { dm_table_presuspend_targets(map); dm_table_postsuspend_targets(map); } - mutex_unlock(&md->suspend_lock); - /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ dm_put_live_table(md, srcu_idx); + mutex_unlock(&md->suspend_lock); /* * Rare, but there may be I/O requests still going to complete, diff --git a/drivers/md/md.c b/drivers/md/md.c index 4f5ecbe94ccb..c702de18207a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5409,9 +5409,13 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) * which will now never happen */ wake_up_process(mddev->sync_thread->tsk); + if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags)) + return -EBUSY; mddev_unlock(mddev); wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)); + wait_event(mddev->sb_wait, + !test_bit(MD_CHANGE_PENDING, &mddev->flags)); mddev_lock_nointr(mddev); mutex_lock(&mddev->open_mutex); @@ -8160,6 +8164,7 @@ void md_check_recovery(struct mddev *mddev) md_reap_sync_thread(mddev); clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + clear_bit(MD_CHANGE_PENDING, &mddev->flags); goto unlock; } diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index d222522c52e0..d132f06afdd1 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -470,8 +470,7 @@ static int multipath_run (struct mddev *mddev) return 0; out_free_conf: - if (conf->pool) - mempool_destroy(conf->pool); + mempool_destroy(conf->pool); kfree(conf->multipaths); kfree(conf); mddev->private = NULL; diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 63e619b2f44e..f8e5db0cb5aa 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -376,12 +376,6 @@ static int raid0_run(struct mddev *mddev) struct md_rdev *rdev; bool discard_supported = false; - rdev_for_each(rdev, mddev) { - disk_stack_limits(mddev->gendisk, rdev->bdev, - rdev->data_offset << 9); - if (blk_queue_discard(bdev_get_queue(rdev->bdev))) - discard_supported = true; - } blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); @@ -390,6 +384,12 @@ static int raid0_run(struct mddev *mddev) blk_queue_io_opt(mddev->queue, (mddev->chunk_sectors << 9) * mddev->raid_disks); + rdev_for_each(rdev, mddev) { + disk_stack_limits(mddev->gendisk, rdev->bdev, + rdev->data_offset << 9); + if (blk_queue_discard(bdev_get_queue(rdev->bdev))) + discard_supported = true; + } if (!discard_supported) queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); else diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 4517f06c41ba..ddd8a5f572aa 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -881,8 +881,7 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) } if (bio && bio_data_dir(bio) == WRITE) { - if (bio->bi_iter.bi_sector >= - conf->mddev->curr_resync_completed) { + if (bio->bi_iter.bi_sector >= conf->next_resync) { if (conf->start_next_window == MaxSector) conf->start_next_window = conf->next_resync + @@ -1516,7 +1515,7 @@ static void close_sync(struct r1conf *conf) conf->r1buf_pool = NULL; spin_lock_irq(&conf->resync_lock); - conf->next_resync = 0; + conf->next_resync = MaxSector - 2 * NEXT_NORMALIO_DISTANCE; conf->start_next_window = MaxSector; conf->current_window_requests += conf->next_window_requests; @@ -2383,8 +2382,8 @@ static void raid1d(struct md_thread *thread) } spin_unlock_irqrestore(&conf->device_lock, flags); while (!list_empty(&tmp)) { - r1_bio = list_first_entry(&conf->bio_end_io_list, - struct r1bio, retry_list); + r1_bio = list_first_entry(&tmp, struct r1bio, + retry_list); list_del(&r1_bio->retry_list); raid_end_bio_io(r1_bio); } @@ -2843,8 +2842,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) abort: if (conf) { - if (conf->r1bio_pool) - mempool_destroy(conf->r1bio_pool); + mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); safe_put_page(conf->tmppage); kfree(conf->poolinfo); @@ -2946,8 +2944,7 @@ static void raid1_free(struct mddev *mddev, void *priv) { struct r1conf *conf = priv; - if (conf->r1bio_pool) - mempool_destroy(conf->r1bio_pool); + mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); safe_put_page(conf->tmppage); kfree(conf->poolinfo); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 0fc33eb88855..9f69dc526f8c 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2688,8 +2688,8 @@ static void raid10d(struct md_thread *thread) } spin_unlock_irqrestore(&conf->device_lock, flags); while (!list_empty(&tmp)) { - r10_bio = list_first_entry(&conf->bio_end_io_list, - struct r10bio, retry_list); + r10_bio = list_first_entry(&tmp, struct r10bio, + retry_list); list_del(&r10_bio->retry_list); raid_end_bio_io(r10_bio); } @@ -3486,8 +3486,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n", mdname(mddev)); if (conf) { - if (conf->r10bio_pool) - mempool_destroy(conf->r10bio_pool); + mempool_destroy(conf->r10bio_pool); kfree(conf->mirrors); safe_put_page(conf->tmppage); kfree(conf); @@ -3682,8 +3681,7 @@ static int run(struct mddev *mddev) out_free_conf: md_unregister_thread(&mddev->thread); - if (conf->r10bio_pool) - mempool_destroy(conf->r10bio_pool); + mempool_destroy(conf->r10bio_pool); safe_put_page(conf->tmppage); kfree(conf->mirrors); kfree(conf); @@ -3696,8 +3694,7 @@ static void raid10_free(struct mddev *mddev, void *priv) { struct r10conf *conf = priv; - if (conf->r10bio_pool) - mempool_destroy(conf->r10bio_pool); + mempool_destroy(conf->r10bio_pool); safe_put_page(conf->tmppage); kfree(conf->mirrors); kfree(conf->mirrors_old); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 15ef2c641b2b..49bb8d3ff9be 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2271,8 +2271,7 @@ static void shrink_stripes(struct r5conf *conf) drop_one_stripe(conf)) ; - if (conf->slab_cache) - kmem_cache_destroy(conf->slab_cache); + kmem_cache_destroy(conf->slab_cache); conf->slab_cache = NULL; } @@ -3150,6 +3149,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, spin_unlock_irq(&sh->stripe_lock); if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) wake_up(&conf->wait_for_overlap); + if (bi) + s->to_read--; while (bi && bi->bi_iter.bi_sector < sh->dev[i].sector + STRIPE_SECTORS) { struct bio *nextbi = @@ -3169,6 +3170,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, */ clear_bit(R5_LOCKED, &sh->dev[i].flags); } + s->to_write = 0; + s->written = 0; if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) if (atomic_dec_and_test(&conf->pending_full_writes)) @@ -3300,7 +3303,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, */ return 0; - for (i = 0; i < s->failed; i++) { + for (i = 0; i < s->failed && i < 2; i++) { if (fdev[i]->towrite && !test_bit(R5_UPTODATE, &fdev[i]->flags) && !test_bit(R5_OVERWRITE, &fdev[i]->flags)) @@ -3324,7 +3327,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, sh->sector < sh->raid_conf->mddev->recovery_cp) /* reconstruct-write isn't being forced */ return 0; - for (i = 0; i < s->failed; i++) { + for (i = 0; i < s->failed && i < 2; i++) { if (s->failed_num[i] != sh->pd_idx && s->failed_num[i] != sh->qd_idx && !test_bit(R5_UPTODATE, &fdev[i]->flags) && diff --git a/drivers/mfd/intel-lpss.h b/drivers/mfd/intel-lpss.h index f28cb28a62f8..2c7f8d7c0595 100644 --- a/drivers/mfd/intel-lpss.h +++ b/drivers/mfd/intel-lpss.h @@ -42,6 +42,8 @@ int intel_lpss_resume(struct device *dev); .thaw = intel_lpss_resume, \ .poweroff = intel_lpss_suspend, \ .restore = intel_lpss_resume, +#else +#define INTEL_LPSS_SLEEP_PM_OPS #endif #define INTEL_LPSS_RUNTIME_PM_OPS \ diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c index c52162ea3d0a..586098f1b233 100644 --- a/drivers/mfd/max77843.c +++ b/drivers/mfd/max77843.c @@ -80,7 +80,7 @@ static int max77843_chg_init(struct max77693_dev *max77843) if (!max77843->i2c_chg) { dev_err(&max77843->i2c->dev, "Cannot allocate I2C device for Charger\n"); - return PTR_ERR(max77843->i2c_chg); + return -ENODEV; } i2c_set_clientdata(max77843->i2c_chg, max77843); diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c index 8af12c884b04..103baf0e0c5b 100644 --- a/drivers/misc/cxl/api.c +++ b/drivers/misc/cxl/api.c @@ -105,6 +105,7 @@ EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs); void cxl_free_afu_irqs(struct cxl_context *ctx) { + afu_irq_name_free(ctx); cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter); } EXPORT_SYMBOL_GPL(cxl_free_afu_irqs); diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index e762f85ee233..2faa1270d085 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c @@ -275,6 +275,9 @@ static void reclaim_ctx(struct rcu_head *rcu) if (ctx->kernelapi) kfree(ctx->mapping); + if (ctx->irq_bitmap) + kfree(ctx->irq_bitmap); + kfree(ctx); } diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index 1c30ef77073d..0cfb9c129f27 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h @@ -677,6 +677,7 @@ int cxl_register_serr_irq(struct cxl_afu *afu); void cxl_release_serr_irq(struct cxl_afu *afu); int afu_register_irqs(struct cxl_context *ctx, u32 count); void afu_release_irqs(struct cxl_context *ctx, void *cookie); +void afu_irq_name_free(struct cxl_context *ctx); irqreturn_t cxl_slice_irq_err(int irq, void *data); int cxl_debugfs_init(void); diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c index a30bf285b5bd..7ccd2998be92 100644 --- a/drivers/misc/cxl/file.c +++ b/drivers/misc/cxl/file.c @@ -120,9 +120,16 @@ int afu_release(struct inode *inode, struct file *file) __func__, ctx->pe); cxl_context_detach(ctx); - mutex_lock(&ctx->mapping_lock); - ctx->mapping = NULL; - mutex_unlock(&ctx->mapping_lock); + + /* + * Delete the context's mapping pointer, unless it's created by the + * kernel API, in which case leave it so it can be freed by reclaim_ctx() + */ + if (!ctx->kernelapi) { + mutex_lock(&ctx->mapping_lock); + ctx->mapping = NULL; + mutex_unlock(&ctx->mapping_lock); + } put_device(&ctx->afu->dev); diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c index 583b42afeda2..09a406058c46 100644 --- a/drivers/misc/cxl/irq.c +++ b/drivers/misc/cxl/irq.c @@ -414,7 +414,7 @@ void cxl_release_psl_irq(struct cxl_afu *afu) kfree(afu->psl_irq_name); } -static void afu_irq_name_free(struct cxl_context *ctx) +void afu_irq_name_free(struct cxl_context *ctx) { struct cxl_irq_name *irq_name, *tmp; @@ -524,7 +524,5 @@ void afu_release_irqs(struct cxl_context *ctx, void *cookie) afu_irq_name_free(ctx); cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter); - kfree(ctx->irq_bitmap); - ctx->irq_bitmap = NULL; ctx->irq_count = 0; } diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index b37f2e8004f5..d2e75c88f4d2 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c @@ -457,6 +457,7 @@ static int activate_afu_directed(struct cxl_afu *afu) dev_info(&afu->dev, "Activating AFU directed mode\n"); + afu->num_procs = afu->max_procs_virtualised; if (afu->spa == NULL) { if (cxl_alloc_spa(afu)) return -ENOMEM; @@ -468,7 +469,6 @@ static int activate_afu_directed(struct cxl_afu *afu) cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L); afu->current_mode = CXL_MODE_DIRECTED; - afu->num_procs = afu->max_procs_virtualised; if ((rc = cxl_chardev_m_afu_add(afu))) return rc; diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index a5e977192b61..85761d7eb333 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -1035,6 +1035,32 @@ static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev) return 0; } +/* + * Workaround a PCIe Host Bridge defect on some cards, that can cause + * malformed Transaction Layer Packet (TLP) errors to be erroneously + * reported. Mask this error in the Uncorrectable Error Mask Register. + * + * The upper nibble of the PSL revision is used to distinguish between + * different cards. The affected ones have it set to 0. + */ +static void cxl_fixup_malformed_tlp(struct cxl *adapter, struct pci_dev *dev) +{ + int aer; + u32 data; + + if (adapter->psl_rev & 0xf000) + return; + if (!(aer = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))) + return; + pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &data); + if (data & PCI_ERR_UNC_MALF_TLP) + if (data & PCI_ERR_UNC_INTN) + return; + data |= PCI_ERR_UNC_MALF_TLP; + data |= PCI_ERR_UNC_INTN; + pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, data); +} + static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev) { if (adapter->vsec_status & CXL_STATUS_SECOND_PORT) @@ -1134,6 +1160,8 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev) if ((rc = cxl_vsec_looks_ok(adapter, dev))) return rc; + cxl_fixup_malformed_tlp(adapter, dev); + if ((rc = setup_cxl_bars(dev))) return rc; diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index 8eec887c8f70..6d7c188fb65c 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -1209,7 +1209,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) * after the host receives the enum_resp * message clients may be added or removed */ - if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS && + if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS || dev->hbm_state >= MEI_HBM_STOPPED) { dev_err(dev->dev, "hbm: add client: state mismatch, [%d, %d]\n", dev->dev_state, dev->hbm_state); diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 781e4db31767..7fb0753abe30 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -182,6 +182,7 @@ struct omap_hsmmc_host { struct clk *fclk; struct clk *dbclk; struct regulator *pbias; + bool pbias_enabled; void __iomem *base; int vqmmc_enabled; resource_size_t mapbase; @@ -328,20 +329,22 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on, return ret; } - if (!regulator_is_enabled(host->pbias)) { + if (host->pbias_enabled == 0) { ret = regulator_enable(host->pbias); if (ret) { dev_err(host->dev, "pbias reg enable fail\n"); return ret; } + host->pbias_enabled = 1; } } else { - if (regulator_is_enabled(host->pbias)) { + if (host->pbias_enabled == 1) { ret = regulator_disable(host->pbias); if (ret) { dev_err(host->dev, "pbias reg disable fail\n"); return ret; } + host->pbias_enabled = 0; } } @@ -475,7 +478,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) mmc->supply.vmmc = devm_regulator_get_optional(host->dev, "vmmc"); if (IS_ERR(mmc->supply.vmmc)) { ret = PTR_ERR(mmc->supply.vmmc); - if (ret != -ENODEV) + if ((ret != -ENODEV) && host->dev->of_node) return ret; dev_dbg(host->dev, "unable to get vmmc regulator %ld\n", PTR_ERR(mmc->supply.vmmc)); @@ -490,7 +493,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) mmc->supply.vqmmc = devm_regulator_get_optional(host->dev, "vmmc_aux"); if (IS_ERR(mmc->supply.vqmmc)) { ret = PTR_ERR(mmc->supply.vqmmc); - if (ret != -ENODEV) + if ((ret != -ENODEV) && host->dev->of_node) return ret; dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n", PTR_ERR(mmc->supply.vqmmc)); @@ -500,7 +503,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) host->pbias = devm_regulator_get_optional(host->dev, "pbias"); if (IS_ERR(host->pbias)) { ret = PTR_ERR(host->pbias); - if (ret != -ENODEV) + if ((ret != -ENODEV) && host->dev->of_node) return ret; dev_dbg(host->dev, "unable to get pbias regulator %ld\n", PTR_ERR(host->pbias)); @@ -2053,6 +2056,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev) host->base = base + pdata->reg_offset; host->power_mode = MMC_POWER_OFF; host->next_data.cookie = 1; + host->pbias_enabled = 0; host->vqmmc_enabled = 0; ret = omap_hsmmc_gpio_init(mmc, host, pdata); diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index d1556643a41d..a0f05de5409f 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -43,6 +43,7 @@ static const struct sdhci_ops sdhci_at91_sama5d2_ops = { static const struct sdhci_pltfm_data soc_data_sama5d2 = { .ops = &sdhci_at91_sama5d2_ops, + .quirks2 = SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST, }; static const struct of_device_id sdhci_at91_dt_match[] = { diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index 946d37f94a31..f5edf9d3a18a 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -135,6 +135,7 @@ static int armada_38x_quirks(struct platform_device *pdev, struct sdhci_pxa *pxa = pltfm_host->priv; struct resource *res; + host->quirks &= ~SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN; host->quirks |= SDHCI_QUIRK_MISSING_CAPS; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "conf-sdio3"); @@ -290,6 +291,9 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) uhs == MMC_TIMING_UHS_DDR50) { reg_val &= ~SDIO3_CONF_CLK_INV; reg_val |= SDIO3_CONF_SD_FB_CLK; + } else if (uhs == MMC_TIMING_MMC_HS) { + reg_val &= ~SDIO3_CONF_CLK_INV; + reg_val &= ~SDIO3_CONF_SD_FB_CLK; } else { reg_val |= SDIO3_CONF_CLK_INV; reg_val &= ~SDIO3_CONF_SD_FB_CLK; @@ -398,7 +402,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) { ret = armada_38x_quirks(pdev, host); if (ret < 0) - goto err_clk_get; + goto err_mbus_win; ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info()); if (ret < 0) goto err_mbus_win; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 64b7fdbd1a9c..fbc7efdddcb5 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1160,6 +1160,8 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) host->mmc->actual_clock = 0; sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); + if (host->quirks2 & SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST) + mdelay(1); if (clock == 0) return; diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 7c02ff46c8ac..9d4aa31b683a 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -412,6 +412,11 @@ struct sdhci_host { #define SDHCI_QUIRK2_ACMD23_BROKEN (1<<14) /* Broken Clock divider zero in controller */ #define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN (1<<15) +/* + * When internal clock is disabled, a delay is needed before modifying the + * SD clock frequency or enabling back the internal clock. + */ +#define SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST (1<<16) int irq; /* Device IRQ */ void __iomem *ioaddr; /* Mapped address */ diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index 2426db88db36..f04445b992f5 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c @@ -879,7 +879,7 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom) oob_chunk_size); /* the last chunk */ - memcpy16_toio(&s[oob_chunk_size * sparebuf_size], + memcpy16_toio(&s[i * sparebuf_size], &d[i * oob_chunk_size], host->used_oobsize - i * oob_chunk_size); } diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c index f97a58d6aae1..e7d333c162be 100644 --- a/drivers/mtd/nand/sunxi_nand.c +++ b/drivers/mtd/nand/sunxi_nand.c @@ -147,6 +147,10 @@ #define NFC_ECC_MODE GENMASK(15, 12) #define NFC_RANDOM_SEED GENMASK(30, 16) +/* NFC_USER_DATA helper macros */ +#define NFC_BUF_TO_USER_DATA(buf) ((buf)[0] | ((buf)[1] << 8) | \ + ((buf)[2] << 16) | ((buf)[3] << 24)) + #define NFC_DEFAULT_TIMEOUT_MS 1000 #define NFC_SRAM_SIZE 1024 @@ -646,15 +650,9 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd, offset = layout->eccpos[i * ecc->bytes] - 4 + mtd->writesize; /* Fill OOB data in */ - if (oob_required) { - tmp = 0xffffffff; - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp, - 4); - } else { - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, - chip->oob_poi + offset - mtd->writesize, - 4); - } + writel(NFC_BUF_TO_USER_DATA(chip->oob_poi + + layout->oobfree[i].offset), + nfc->regs + NFC_REG_USER_DATA_BASE); chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1); @@ -784,14 +782,8 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd, offset += ecc->size; /* Fill OOB data in */ - if (oob_required) { - tmp = 0xffffffff; - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp, - 4); - } else { - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, oob, - 4); - } + writel(NFC_BUF_TO_USER_DATA(oob), + nfc->regs + NFC_REG_USER_DATA_BASE); tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR | (1 << 30); @@ -1389,6 +1381,7 @@ static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc) node); nand_release(&chip->mtd); sunxi_nand_ecc_cleanup(&chip->nand.ecc); + list_del(&chip->node); } } diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index b9ebd0d18a52..f184fb5bd110 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -298,8 +298,10 @@ config NLMON config NET_VRF tristate "Virtual Routing and Forwarding (Lite)" - depends on IP_MULTIPLE_TABLES && IPV6_MULTIPLE_TABLES + depends on IP_MULTIPLE_TABLES depends on NET_L3_MASTER_DEV + depends on IPV6 || IPV6=n + depends on IPV6_MULTIPLE_TABLES || IPV6=n ---help--- This option enables the support for mapping interfaces into VRF's. The support enables VRF devices. diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h index d7fdea11e694..20bfb9ba83ea 100644 --- a/drivers/net/arcnet/arcdevice.h +++ b/drivers/net/arcnet/arcdevice.h @@ -237,6 +237,8 @@ struct Outgoing { numsegs; /* number of segments */ }; +#define ARCNET_LED_NAME_SZ (IFNAMSIZ + 6) + struct arcnet_local { uint8_t config, /* current value of CONFIG register */ timeout, /* Extended timeout for COM20020 */ @@ -260,6 +262,13 @@ struct arcnet_local { /* On preemtive and SMB a lock is needed */ spinlock_t lock; + struct led_trigger *tx_led_trig; + char tx_led_trig_name[ARCNET_LED_NAME_SZ]; + struct led_trigger *recon_led_trig; + char recon_led_trig_name[ARCNET_LED_NAME_SZ]; + + struct timer_list timer; + /* * Buffer management: an ARCnet card has 4 x 512-byte buffers, each of * which can be used for either sending or receiving. The new dynamic @@ -309,6 +318,8 @@ struct arcnet_local { int (*reset)(struct net_device *dev, int really_reset); void (*open)(struct net_device *dev); void (*close)(struct net_device *dev); + void (*datatrigger) (struct net_device * dev, int enable); + void (*recontrigger) (struct net_device * dev, int enable); void (*copy_to_card)(struct net_device *dev, int bufnum, int offset, void *buf, int count); @@ -319,6 +330,16 @@ struct arcnet_local { void __iomem *mem_start; /* pointer to ioremap'ed MMIO */ }; +enum arcnet_led_event { + ARCNET_LED_EVENT_RECON, + ARCNET_LED_EVENT_OPEN, + ARCNET_LED_EVENT_STOP, + ARCNET_LED_EVENT_TX, +}; + +void arcnet_led_event(struct net_device *netdev, enum arcnet_led_event event); +void devm_arcnet_led_init(struct net_device *netdev, int index, int subid); + #if ARCNET_DEBUG_MAX & D_SKB void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc); #else diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index e41dd36fe832..6ea963e3b89a 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c @@ -52,6 +52,8 @@ #include <linux/init.h> #include <linux/jiffies.h> +#include <linux/leds.h> + #include "arcdevice.h" #include "com9026.h" @@ -189,6 +191,71 @@ static void arcnet_dump_packet(struct net_device *dev, int bufnum, #endif +/* Trigger a LED event in response to a ARCNET device event */ +void arcnet_led_event(struct net_device *dev, enum arcnet_led_event event) +{ + struct arcnet_local *lp = netdev_priv(dev); + unsigned long led_delay = 350; + unsigned long tx_delay = 50; + + switch (event) { + case ARCNET_LED_EVENT_RECON: + led_trigger_blink_oneshot(lp->recon_led_trig, + &led_delay, &led_delay, 0); + break; + case ARCNET_LED_EVENT_OPEN: + led_trigger_event(lp->tx_led_trig, LED_OFF); + led_trigger_event(lp->recon_led_trig, LED_OFF); + break; + case ARCNET_LED_EVENT_STOP: + led_trigger_event(lp->tx_led_trig, LED_OFF); + led_trigger_event(lp->recon_led_trig, LED_OFF); + break; + case ARCNET_LED_EVENT_TX: + led_trigger_blink_oneshot(lp->tx_led_trig, + &tx_delay, &tx_delay, 0); + break; + } +} +EXPORT_SYMBOL_GPL(arcnet_led_event); + +static void arcnet_led_release(struct device *gendev, void *res) +{ + struct arcnet_local *lp = netdev_priv(to_net_dev(gendev)); + + led_trigger_unregister_simple(lp->tx_led_trig); + led_trigger_unregister_simple(lp->recon_led_trig); +} + +/* Register ARCNET LED triggers for a arcnet device + * + * This is normally called from a driver's probe function + */ +void devm_arcnet_led_init(struct net_device *netdev, int index, int subid) +{ + struct arcnet_local *lp = netdev_priv(netdev); + void *res; + + res = devres_alloc(arcnet_led_release, 0, GFP_KERNEL); + if (!res) { + netdev_err(netdev, "cannot register LED triggers\n"); + return; + } + + snprintf(lp->tx_led_trig_name, sizeof(lp->tx_led_trig_name), + "arc%d-%d-tx", index, subid); + snprintf(lp->recon_led_trig_name, sizeof(lp->recon_led_trig_name), + "arc%d-%d-recon", index, subid); + + led_trigger_register_simple(lp->tx_led_trig_name, + &lp->tx_led_trig); + led_trigger_register_simple(lp->recon_led_trig_name, + &lp->recon_led_trig); + + devres_add(&netdev->dev, res); +} +EXPORT_SYMBOL_GPL(devm_arcnet_led_init); + /* Unregister a protocol driver from the arc_proto_map. Protocol drivers * are responsible for registering themselves, but the unregister routine * is pretty generic so we'll do it here. @@ -314,6 +381,16 @@ static void arcdev_setup(struct net_device *dev) dev->flags = IFF_BROADCAST; } +static void arcnet_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + + if (!netif_carrier_ok(dev)) { + netif_carrier_on(dev); + netdev_info(dev, "link up\n"); + } +} + struct net_device *alloc_arcdev(const char *name) { struct net_device *dev; @@ -325,6 +402,9 @@ struct net_device *alloc_arcdev(const char *name) struct arcnet_local *lp = netdev_priv(dev); spin_lock_init(&lp->lock); + init_timer(&lp->timer); + lp->timer.data = (unsigned long) dev; + lp->timer.function = arcnet_timer; } return dev; @@ -423,8 +503,11 @@ int arcnet_open(struct net_device *dev) lp->hw.intmask(dev, lp->intmask); arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__); + netif_carrier_off(dev); netif_start_queue(dev); + mod_timer(&lp->timer, jiffies + msecs_to_jiffies(1000)); + arcnet_led_event(dev, ARCNET_LED_EVENT_OPEN); return 0; out_module_put: @@ -438,7 +521,11 @@ int arcnet_close(struct net_device *dev) { struct arcnet_local *lp = netdev_priv(dev); + arcnet_led_event(dev, ARCNET_LED_EVENT_STOP); + del_timer_sync(&lp->timer); + netif_stop_queue(dev); + netif_carrier_off(dev); /* flush TX and disable RX */ lp->hw.intmask(dev, 0); @@ -515,7 +602,7 @@ netdev_tx_t arcnet_send_packet(struct sk_buff *skb, struct ArcProto *proto; int txbuf; unsigned long flags; - int freeskb, retval; + int retval; arc_printk(D_DURING, dev, "transmit requested (status=%Xh, txbufs=%d/%d, len=%d, protocol %x)\n", @@ -554,15 +641,13 @@ netdev_tx_t arcnet_send_packet(struct sk_buff *skb, * the package later - forget about it now */ dev->stats.tx_bytes += skb->len; - freeskb = 1; + dev_kfree_skb(skb); } else { /* do it the 'split' way */ lp->outgoing.proto = proto; lp->outgoing.skb = skb; lp->outgoing.pkt = pkt; - freeskb = 0; - if (proto->continue_tx && proto->continue_tx(dev, txbuf)) { arc_printk(D_NORMAL, dev, @@ -574,7 +659,6 @@ netdev_tx_t arcnet_send_packet(struct sk_buff *skb, lp->next_tx = txbuf; } else { retval = NETDEV_TX_BUSY; - freeskb = 0; } arc_printk(D_DEBUG, dev, "%s: %d: %s, status: %x\n", @@ -588,10 +672,9 @@ netdev_tx_t arcnet_send_packet(struct sk_buff *skb, arc_printk(D_DEBUG, dev, "%s: %d: %s, status: %x\n", __FILE__, __LINE__, __func__, lp->hw.status(dev)); - spin_unlock_irqrestore(&lp->lock, flags); - if (freeskb) - dev_kfree_skb(skb); + arcnet_led_event(dev, ARCNET_LED_EVENT_TX); + spin_unlock_irqrestore(&lp->lock, flags); return retval; /* no need to try again */ } EXPORT_SYMBOL(arcnet_send_packet); @@ -843,6 +926,13 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) arc_printk(D_RECON, dev, "Network reconfiguration detected (status=%Xh)\n", status); + if (netif_carrier_ok(dev)) { + netif_carrier_off(dev); + netdev_info(dev, "link down\n"); + } + mod_timer(&lp->timer, jiffies + msecs_to_jiffies(1000)); + + arcnet_led_event(dev, ARCNET_LED_EVENT_RECON); /* MYRECON bit is at bit 7 of diagstatus */ if (diagstatus & 0x80) arc_printk(D_RECON, dev, "Put out that recon myself\n"); @@ -893,6 +983,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) lp->num_recons = lp->network_down = 0; arc_printk(D_DURING, dev, "not recon: clearing counters anyway.\n"); + netif_carrier_on(dev); } if (didsomething) diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index a12bf83be750..239de38fbd6a 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -41,6 +41,7 @@ #include <linux/pci.h> #include <linux/list.h> #include <linux/io.h> +#include <linux/leds.h> #include "arcdevice.h" #include "com20020.h" @@ -62,12 +63,43 @@ module_param(clockp, int, 0); module_param(clockm, int, 0); MODULE_LICENSE("GPL"); +static void led_tx_set(struct led_classdev *led_cdev, + enum led_brightness value) +{ + struct com20020_dev *card; + struct com20020_priv *priv; + struct com20020_pci_card_info *ci; + + card = container_of(led_cdev, struct com20020_dev, tx_led); + + priv = card->pci_priv; + ci = priv->ci; + + outb(!!value, priv->misc + ci->leds[card->index].green); +} + +static void led_recon_set(struct led_classdev *led_cdev, + enum led_brightness value) +{ + struct com20020_dev *card; + struct com20020_priv *priv; + struct com20020_pci_card_info *ci; + + card = container_of(led_cdev, struct com20020_dev, recon_led); + + priv = card->pci_priv; + ci = priv->ci; + + outb(!!value, priv->misc + ci->leds[card->index].red); +} + static void com20020pci_remove(struct pci_dev *pdev); static int com20020pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct com20020_pci_card_info *ci; + struct com20020_pci_channel_map *mm; struct net_device *dev; struct arcnet_local *lp; struct com20020_priv *priv; @@ -84,9 +116,22 @@ static int com20020pci_probe(struct pci_dev *pdev, ci = (struct com20020_pci_card_info *)id->driver_data; priv->ci = ci; + mm = &ci->misc_map; INIT_LIST_HEAD(&priv->list_dev); + if (mm->size) { + ioaddr = pci_resource_start(pdev, mm->bar) + mm->offset; + r = devm_request_region(&pdev->dev, ioaddr, mm->size, + "com20020-pci"); + if (!r) { + pr_err("IO region %xh-%xh already allocated.\n", + ioaddr, ioaddr + mm->size - 1); + return -EBUSY; + } + priv->misc = ioaddr; + } + for (i = 0; i < ci->devcount; i++) { struct com20020_pci_channel_map *cm = &ci->chan_map_tbl[i]; struct com20020_dev *card; @@ -96,6 +141,7 @@ static int com20020pci_probe(struct pci_dev *pdev, ret = -ENOMEM; goto out_port; } + dev->dev_port = i; dev->netdev_ops = &com20020_netdev_ops; @@ -131,6 +177,13 @@ static int com20020pci_probe(struct pci_dev *pdev, lp->timeout = timeout; lp->hw.owner = THIS_MODULE; + /* Get the dev_id from the PLX rotary coder */ + if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15)) + dev->dev_id = 0xc; + dev->dev_id ^= inb(priv->misc + ci->rotary) >> 4; + + snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i); + if (arcnet_inb(ioaddr, COM20020_REG_R_STATUS) == 0xFF) { pr_err("IO address %Xh is empty!\n", ioaddr); ret = -EIO; @@ -148,14 +201,41 @@ static int com20020pci_probe(struct pci_dev *pdev, card->index = i; card->pci_priv = priv; + card->tx_led.brightness_set = led_tx_set; + card->tx_led.default_trigger = devm_kasprintf(&pdev->dev, + GFP_KERNEL, "arc%d-%d-tx", + dev->dev_id, i); + card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, + "pci:green:tx:%d-%d", + dev->dev_id, i); + + card->tx_led.dev = &dev->dev; + card->recon_led.brightness_set = led_recon_set; + card->recon_led.default_trigger = devm_kasprintf(&pdev->dev, + GFP_KERNEL, "arc%d-%d-recon", + dev->dev_id, i); + card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, + "pci:red:recon:%d-%d", + dev->dev_id, i); + card->recon_led.dev = &dev->dev; card->dev = dev; + ret = devm_led_classdev_register(&pdev->dev, &card->tx_led); + if (ret) + goto out_port; + + ret = devm_led_classdev_register(&pdev->dev, &card->recon_led); + if (ret) + goto out_port; + dev_set_drvdata(&dev->dev, card); ret = com20020_found(dev, IRQF_SHARED); if (ret) goto out_port; + devm_arcnet_led_init(dev, dev->dev_id, i); + list_add(&card->list, &priv->list_dev); } @@ -234,6 +314,18 @@ static struct com20020_pci_card_info card_info_eae_arc1 = { .size = 0x08, }, }, + .misc_map = { + .bar = 2, + .offset = 0x10, + .size = 0x04, + }, + .leds = { + { + .green = 0x0, + .red = 0x1, + }, + }, + .rotary = 0x0, .flags = ARC_CAN_10MBIT, }; @@ -251,6 +343,21 @@ static struct com20020_pci_card_info card_info_eae_ma1 = { .size = 0x08, } }, + .misc_map = { + .bar = 2, + .offset = 0x10, + .size = 0x04, + }, + .leds = { + { + .green = 0x0, + .red = 0x1, + }, { + .green = 0x2, + .red = 0x3, + }, + }, + .rotary = 0x0, .flags = ARC_CAN_10MBIT, }; diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c index c82f323a8c2b..13d9ad4b3f5c 100644 --- a/drivers/net/arcnet/com20020.c +++ b/drivers/net/arcnet/com20020.c @@ -118,7 +118,7 @@ int com20020_check(struct net_device *dev) arcnet_outb(STARTIOcmd, ioaddr, COM20020_REG_W_COMMAND); } - lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2) | SUB_NODE; + lp->config = (lp->timeout << 3) | (lp->backplane << 2) | SUB_NODE; /* set node ID to 0x42 (but transmitter is disabled, so it's okay) */ arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG); arcnet_outb(0x42, ioaddr, COM20020_REG_W_XREG); @@ -131,11 +131,6 @@ int com20020_check(struct net_device *dev) } arc_printk(D_INIT_REASONS, dev, "status after reset: %X\n", status); - /* Enable TX */ - lp->config |= TXENcfg; - arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG); - arcnet_outb(arcnet_inb(ioaddr, 8), ioaddr, COM20020_REG_W_XREG); - arcnet_outb(CFLAGScmd | RESETclear | CONFIGclear, ioaddr, COM20020_REG_W_COMMAND); status = arcnet_inb(ioaddr, COM20020_REG_R_STATUS); @@ -169,9 +164,33 @@ static int com20020_set_hwaddr(struct net_device *dev, void *addr) return 0; } +static int com20020_netdev_open(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + struct arcnet_local *lp = netdev_priv(dev); + + lp->config |= TXENcfg; + arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG); + + return arcnet_open(dev); +} + +static int com20020_netdev_close(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + struct arcnet_local *lp = netdev_priv(dev); + + arcnet_close(dev); + + /* disable transmitter */ + lp->config &= ~TXENcfg; + arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG); + return 0; +} + const struct net_device_ops com20020_netdev_ops = { - .ndo_open = arcnet_open, - .ndo_stop = arcnet_close, + .ndo_open = com20020_netdev_open, + .ndo_stop = com20020_netdev_close, .ndo_start_xmit = arcnet_send_packet, .ndo_tx_timeout = arcnet_timeout, .ndo_set_mac_address = com20020_set_hwaddr, @@ -215,7 +234,7 @@ int com20020_found(struct net_device *dev, int shared) arcnet_outb(STARTIOcmd, ioaddr, COM20020_REG_W_COMMAND); } - lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2) | SUB_NODE; + lp->config = (lp->timeout << 3) | (lp->backplane << 2) | SUB_NODE; /* Default 0x38 + register: Node ID */ arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG); arcnet_outb(dev->dev_addr[0], ioaddr, COM20020_REG_W_XREG); @@ -274,7 +293,7 @@ static int com20020_reset(struct net_device *dev, int really_reset) dev->name, arcnet_inb(ioaddr, COM20020_REG_R_STATUS)); arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__); - lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2); + lp->config |= (lp->timeout << 3) | (lp->backplane << 2); /* power-up defaults */ arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG); arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__); diff --git a/drivers/net/arcnet/com20020.h b/drivers/net/arcnet/com20020.h index 22a460f39fb9..0bcc5d0a6903 100644 --- a/drivers/net/arcnet/com20020.h +++ b/drivers/net/arcnet/com20020.h @@ -26,6 +26,7 @@ */ #ifndef __COM20020_H #define __COM20020_H +#include <linux/leds.h> int com20020_check(struct net_device *dev); int com20020_found(struct net_device *dev, int shared); @@ -36,6 +37,11 @@ extern const struct net_device_ops com20020_netdev_ops; #define PLX_PCI_MAX_CARDS 2 +struct ledoffsets { + int green; + int red; +}; + struct com20020_pci_channel_map { u32 bar; u32 offset; @@ -47,6 +53,10 @@ struct com20020_pci_card_info { int devcount; struct com20020_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CARDS]; + struct com20020_pci_channel_map misc_map; + + struct ledoffsets leds[PLX_PCI_MAX_CARDS]; + int rotary; unsigned int flags; }; @@ -54,12 +64,16 @@ struct com20020_pci_card_info { struct com20020_priv { struct com20020_pci_card_info *ci; struct list_head list_dev; + resource_size_t misc; }; struct com20020_dev { struct list_head list; struct net_device *dev; + struct led_classdev tx_led; + struct led_classdev recon_led; + struct com20020_priv *pci_priv; int index; }; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 90f2615428c0..d0f23cd6e236 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1071,7 +1071,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev, NETIF_F_HIGHDMA | NETIF_F_LRO) #define BOND_ENC_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | NETIF_F_RXCSUM |\ - NETIF_F_TSO) + NETIF_F_ALL_TSO) static void bond_compute_features(struct bonding *bond) { diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index 945c0955a967..8b3275d7792a 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -8,15 +8,6 @@ * Public License ("GPL") version 2 as distributed in the 'COPYING' * file from the main directory of the linux kernel source. * - * - * Your platform definition file should specify something like: - * - * static struct at91_can_data ek_can_data = { - * transceiver_switch = sam9263ek_transceiver_switch, - * }; - * - * at91_add_device_can(&ek_can_data); - * */ #include <linux/clk.h> @@ -33,7 +24,6 @@ #include <linux/spinlock.h> #include <linux/string.h> #include <linux/types.h> -#include <linux/platform_data/atmel.h> #include <linux/can/dev.h> #include <linux/can/error.h> @@ -324,15 +314,6 @@ static inline u32 at91_can_id_to_reg_mid(canid_t can_id) return reg_mid; } -/* - * Swtich transceiver on or off - */ -static void at91_transceiver_switch(const struct at91_priv *priv, int on) -{ - if (priv->pdata && priv->pdata->transceiver_switch) - priv->pdata->transceiver_switch(on); -} - static void at91_setup_mailboxes(struct net_device *dev) { struct at91_priv *priv = netdev_priv(dev); @@ -416,7 +397,6 @@ static void at91_chip_start(struct net_device *dev) at91_set_bittiming(dev); at91_setup_mailboxes(dev); - at91_transceiver_switch(priv, 1); /* enable chip */ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) @@ -444,7 +424,6 @@ static void at91_chip_stop(struct net_device *dev, enum can_state state) reg_mr = at91_read(priv, AT91_MR); at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN); - at91_transceiver_switch(priv, 0); priv->can.state = state; } diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index e5fac368068a..131026fbc2d7 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -87,6 +87,7 @@ static const struct pci_device_id peak_pci_tbl[] = { {PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {PEAK_PCI_VENDOR_ID, PEAK_PCIE_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,}, #ifdef CONFIG_CAN_PEAK_PCIEC {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index 10d8497635e8..d9a42c646783 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c @@ -601,7 +601,7 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status) stats->tx_errors++; if (likely(skb)) { cf->can_id |= CAN_ERR_LOSTARB; - cf->data[0] = (alc & 0x1f) >> 8; + cf->data[0] = (alc >> 8) & 0x1f; } } @@ -854,4 +854,4 @@ module_platform_driver(sun4i_can_driver); MODULE_AUTHOR("Peter Chen <xingkongcp@gmail.com>"); MODULE_AUTHOR("Gerhard Bertelsmann <info@gerhard-bertelsmann.de>"); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_DESCRIPTION(DRV_NAME "CAN driver for Allwinner SoCs (A10/A20)"); +MODULE_DESCRIPTION("CAN driver for Allwinner SoCs (A10/A20)"); diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 9d56515f4c4d..6f946fedbb77 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -21,10 +21,13 @@ #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_address.h> +#include <linux/of_net.h> #include <net/dsa.h> #include <linux/ethtool.h> #include <linux/if_bridge.h> #include <linux/brcmphy.h> +#include <linux/etherdevice.h> +#include <net/switchdev.h> #include "bcm_sf2.h" #include "bcm_sf2_regs.h" @@ -264,6 +267,50 @@ static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable) } } +static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv, + int port) +{ + unsigned int off; + + switch (port) { + case 7: + off = P7_IRQ_OFF; + break; + case 0: + /* Port 0 interrupts are located on the first bank */ + intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF)); + return; + default: + off = P_IRQ_OFF(port); + break; + } + + intrl2_1_mask_clear(priv, P_IRQ_MASK(off)); +} + +static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv, + int port) +{ + unsigned int off; + + switch (port) { + case 7: + off = P7_IRQ_OFF; + break; + case 0: + /* Port 0 interrupts are located on the first bank */ + intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF)); + intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR); + return; + default: + off = P_IRQ_OFF(port); + break; + } + + intrl2_1_mask_set(priv, P_IRQ_MASK(off)); + intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR); +} + static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, struct phy_device *phy) { @@ -280,7 +327,7 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, core_writel(priv, 0, CORE_G_PCTL_PORT(port)); /* Re-enable the GPHY and re-apply workarounds */ - if (port == 0 && priv->hw_params.num_gphy == 1) { + if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) { bcm_sf2_gphy_enable_set(ds, true); if (phy) { /* if phy_stop() has been called before, phy @@ -297,9 +344,9 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, } } - /* Enable port 7 interrupts to get notified */ - if (port == 7) - intrl2_1_mask_clear(priv, P_IRQ_MASK(P7_IRQ_OFF)); + /* Enable MoCA port interrupts to get notified */ + if (port == priv->moca_port) + bcm_sf2_port_intr_enable(priv, port); /* Set this port, and only this one to be in the default VLAN, * if member of a bridge, restore its membership prior to @@ -329,12 +376,10 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, if (priv->wol_ports_mask & (1 << port)) return; - if (port == 7) { - intrl2_1_mask_set(priv, P_IRQ_MASK(P7_IRQ_OFF)); - intrl2_1_writel(priv, P_IRQ_MASK(P7_IRQ_OFF), INTRL2_CPU_CLEAR); - } + if (port == priv->moca_port) + bcm_sf2_port_intr_disable(priv, port); - if (port == 0 && priv->hw_params.num_gphy == 1) + if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) bcm_sf2_gphy_enable_set(ds, false); if (dsa_is_cpu_port(ds, port)) @@ -555,6 +600,236 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port, return 0; } +/* Address Resolution Logic routines */ +static int bcm_sf2_arl_op_wait(struct bcm_sf2_priv *priv) +{ + unsigned int timeout = 10; + u32 reg; + + do { + reg = core_readl(priv, CORE_ARLA_RWCTL); + if (!(reg & ARL_STRTDN)) + return 0; + + usleep_range(1000, 2000); + } while (timeout--); + + return -ETIMEDOUT; +} + +static int bcm_sf2_arl_rw_op(struct bcm_sf2_priv *priv, unsigned int op) +{ + u32 cmd; + + if (op > ARL_RW) + return -EINVAL; + + cmd = core_readl(priv, CORE_ARLA_RWCTL); + cmd &= ~IVL_SVL_SELECT; + cmd |= ARL_STRTDN; + if (op) + cmd |= ARL_RW; + else + cmd &= ~ARL_RW; + core_writel(priv, cmd, CORE_ARLA_RWCTL); + + return bcm_sf2_arl_op_wait(priv); +} + +static int bcm_sf2_arl_read(struct bcm_sf2_priv *priv, u64 mac, + u16 vid, struct bcm_sf2_arl_entry *ent, u8 *idx, + bool is_valid) +{ + unsigned int i; + int ret; + + ret = bcm_sf2_arl_op_wait(priv); + if (ret) + return ret; + + /* Read the 4 bins */ + for (i = 0; i < 4; i++) { + u64 mac_vid; + u32 fwd_entry; + + mac_vid = core_readq(priv, CORE_ARLA_MACVID_ENTRY(i)); + fwd_entry = core_readl(priv, CORE_ARLA_FWD_ENTRY(i)); + bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry); + + if (ent->is_valid && is_valid) { + *idx = i; + return 0; + } + + /* This is the MAC we just deleted */ + if (!is_valid && (mac_vid & mac)) + return 0; + } + + return -ENOENT; +} + +static int bcm_sf2_arl_op(struct bcm_sf2_priv *priv, int op, int port, + const unsigned char *addr, u16 vid, bool is_valid) +{ + struct bcm_sf2_arl_entry ent; + u32 fwd_entry; + u64 mac, mac_vid = 0; + u8 idx = 0; + int ret; + + /* Convert the array into a 64-bit MAC */ + mac = bcm_sf2_mac_to_u64(addr); + + /* Perform a read for the given MAC and VID */ + core_writeq(priv, mac, CORE_ARLA_MAC); + core_writel(priv, vid, CORE_ARLA_VID); + + /* Issue a read operation for this MAC */ + ret = bcm_sf2_arl_rw_op(priv, 1); + if (ret) + return ret; + + ret = bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid); + /* If this is a read, just finish now */ + if (op) + return ret; + + /* We could not find a matching MAC, so reset to a new entry */ + if (ret) { + fwd_entry = 0; + idx = 0; + } + + memset(&ent, 0, sizeof(ent)); + ent.port = port; + ent.is_valid = is_valid; + ent.vid = vid; + ent.is_static = true; + memcpy(ent.mac, addr, ETH_ALEN); + bcm_sf2_arl_from_entry(&mac_vid, &fwd_entry, &ent); + + core_writeq(priv, mac_vid, CORE_ARLA_MACVID_ENTRY(idx)); + core_writel(priv, fwd_entry, CORE_ARLA_FWD_ENTRY(idx)); + + ret = bcm_sf2_arl_rw_op(priv, 0); + if (ret) + return ret; + + /* Re-read the entry to check */ + return bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid); +} + +static int bcm_sf2_sw_fdb_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) +{ + /* We do not need to do anything specific here yet */ + return 0; +} + +static int bcm_sf2_sw_fdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + + return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, true); +} + +static int bcm_sf2_sw_fdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + + return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, false); +} + +static int bcm_sf2_arl_search_wait(struct bcm_sf2_priv *priv) +{ + unsigned timeout = 1000; + u32 reg; + + do { + reg = core_readl(priv, CORE_ARLA_SRCH_CTL); + if (!(reg & ARLA_SRCH_STDN)) + return 0; + + if (reg & ARLA_SRCH_VLID) + return 0; + + usleep_range(1000, 2000); + } while (timeout--); + + return -ETIMEDOUT; +} + +static void bcm_sf2_arl_search_rd(struct bcm_sf2_priv *priv, u8 idx, + struct bcm_sf2_arl_entry *ent) +{ + u64 mac_vid; + u32 fwd_entry; + + mac_vid = core_readq(priv, CORE_ARLA_SRCH_RSLT_MACVID(idx)); + fwd_entry = core_readl(priv, CORE_ARLA_SRCH_RSLT(idx)); + bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry); +} + +static int bcm_sf2_sw_fdb_copy(struct net_device *dev, int port, + const struct bcm_sf2_arl_entry *ent, + struct switchdev_obj_port_fdb *fdb, + int (*cb)(struct switchdev_obj *obj)) +{ + if (!ent->is_valid) + return 0; + + if (port != ent->port) + return 0; + + ether_addr_copy(fdb->addr, ent->mac); + fdb->vid = ent->vid; + fdb->ndm_state = ent->is_static ? NUD_NOARP : NUD_REACHABLE; + + return cb(&fdb->obj); +} + +static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_fdb *fdb, + int (*cb)(struct switchdev_obj *obj)) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct net_device *dev = ds->ports[port]; + struct bcm_sf2_arl_entry results[2]; + unsigned int count = 0; + int ret; + + /* Start search operation */ + core_writel(priv, ARLA_SRCH_STDN, CORE_ARLA_SRCH_CTL); + + do { + ret = bcm_sf2_arl_search_wait(priv); + if (ret) + return ret; + + /* Read both entries, then return their values back */ + bcm_sf2_arl_search_rd(priv, 0, &results[0]); + ret = bcm_sf2_sw_fdb_copy(dev, port, &results[0], fdb, cb); + if (ret) + return ret; + + bcm_sf2_arl_search_rd(priv, 1, &results[1]); + ret = bcm_sf2_sw_fdb_copy(dev, port, &results[1], fdb, cb); + if (ret) + return ret; + + if (!results[0].is_valid && !results[1].is_valid) + break; + + } while (count++ < CORE_ARLA_NUM_ENTRIES); + + return 0; +} + static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id) { struct bcm_sf2_priv *priv = dev_id; @@ -615,6 +890,42 @@ static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv) intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); } +static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv, + struct device_node *dn) +{ + struct device_node *port; + const char *phy_mode_str; + int mode; + unsigned int port_num; + int ret; + + priv->moca_port = -1; + + for_each_available_child_of_node(dn, port) { + if (of_property_read_u32(port, "reg", &port_num)) + continue; + + /* Internal PHYs get assigned a specific 'phy-mode' property + * value: "internal" to help flag them before MDIO probing + * has completed, since they might be turned off at that + * time + */ + mode = of_get_phy_mode(port); + if (mode < 0) { + ret = of_property_read_string(port, "phy-mode", + &phy_mode_str); + if (ret < 0) + continue; + + if (!strcasecmp(phy_mode_str, "internal")) + priv->int_phy_mask |= 1 << port_num; + } + + if (mode == PHY_INTERFACE_MODE_MOCA) + priv->moca_port = port_num; + } +} + static int bcm_sf2_sw_setup(struct dsa_switch *ds) { const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; @@ -633,6 +944,7 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) * level */ dn = ds->pd->of_node->parent; + bcm_sf2_identify_ports(priv, ds->pd->of_node); priv->irq0 = irq_of_parse_and_map(dn, 0); priv->irq1 = irq_of_parse_and_map(dn, 1); @@ -913,7 +1225,7 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, status->link = 0; - /* Port 7 is special as we do not get link status from CORE_LNKSTS, + /* MoCA port is special as we do not get link status from CORE_LNKSTS, * which means that we need to force the link at the port override * level to get the data to flow. We do use what the interrupt handler * did determine before. @@ -921,7 +1233,7 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, * For the other ports, we just force the link status, since this is * a fixed PHY device. */ - if (port == 7) { + if (port == priv->moca_port) { status->link = priv->port_sts[port].link; /* For MoCA interfaces, also force a link down notification * since some version of the user-space daemon (mocad) use @@ -1076,6 +1388,10 @@ static struct dsa_switch_driver bcm_sf2_switch_driver = { .port_join_bridge = bcm_sf2_sw_br_join, .port_leave_bridge = bcm_sf2_sw_br_leave, .port_stp_update = bcm_sf2_sw_br_set_stp_state, + .port_fdb_prepare = bcm_sf2_sw_fdb_prepare, + .port_fdb_add = bcm_sf2_sw_fdb_add, + .port_fdb_del = bcm_sf2_sw_fdb_del, + .port_fdb_dump = bcm_sf2_sw_fdb_dump, }; static int __init bcm_sf2_init(void) diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index 789d7b7737da..6bba1c98d764 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -19,6 +19,8 @@ #include <linux/mutex.h> #include <linux/mii.h> #include <linux/ethtool.h> +#include <linux/types.h> +#include <linux/bitops.h> #include <net/dsa.h> @@ -50,6 +52,60 @@ struct bcm_sf2_port_status { u32 vlan_ctl_mask; }; +struct bcm_sf2_arl_entry { + u8 port; + u8 mac[ETH_ALEN]; + u16 vid; + u8 is_valid:1; + u8 is_age:1; + u8 is_static:1; +}; + +static inline void bcm_sf2_mac_from_u64(u64 src, u8 *dst) +{ + unsigned int i; + + for (i = 0; i < ETH_ALEN; i++) + dst[ETH_ALEN - 1 - i] = (src >> (8 * i)) & 0xff; +} + +static inline u64 bcm_sf2_mac_to_u64(const u8 *src) +{ + unsigned int i; + u64 dst = 0; + + for (i = 0; i < ETH_ALEN; i++) + dst |= (u64)src[ETH_ALEN - 1 - i] << (8 * i); + + return dst; +} + +static inline void bcm_sf2_arl_to_entry(struct bcm_sf2_arl_entry *ent, + u64 mac_vid, u32 fwd_entry) +{ + memset(ent, 0, sizeof(*ent)); + ent->port = fwd_entry & PORTID_MASK; + ent->is_valid = !!(fwd_entry & ARL_VALID); + ent->is_age = !!(fwd_entry & ARL_AGE); + ent->is_static = !!(fwd_entry & ARL_STATIC); + bcm_sf2_mac_from_u64(mac_vid, ent->mac); + ent->vid = mac_vid >> VID_SHIFT; +} + +static inline void bcm_sf2_arl_from_entry(u64 *mac_vid, u32 *fwd_entry, + const struct bcm_sf2_arl_entry *ent) +{ + *mac_vid = bcm_sf2_mac_to_u64(ent->mac); + *mac_vid |= (u64)(ent->vid & VID_MASK) << VID_SHIFT; + *fwd_entry = ent->port & PORTID_MASK; + if (ent->is_valid) + *fwd_entry |= ARL_VALID; + if (ent->is_static) + *fwd_entry |= ARL_STATIC; + if (ent->is_age) + *fwd_entry |= ARL_AGE; +} + struct bcm_sf2_priv { /* Base registers, keep those in order with BCM_SF2_REGS_NAME */ void __iomem *core; @@ -78,6 +134,12 @@ struct bcm_sf2_priv { /* Mask of ports enabled for Wake-on-LAN */ u32 wol_ports_mask; + + /* MoCA port location */ + int moca_port; + + /* Bitmask of ports having an integrated PHY */ + unsigned int int_phy_mask; }; struct bcm_sf2_hw_stats { diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h index fa4e6e78c9ea..97780d43b5c0 100644 --- a/drivers/net/dsa/bcm_sf2_regs.h +++ b/drivers/net/dsa/bcm_sf2_regs.h @@ -231,6 +231,49 @@ #define CORE_BRCM_HDR_RX_DIS 0x0980 #define CORE_BRCM_HDR_TX_DIS 0x0988 +#define CORE_ARLA_NUM_ENTRIES 1024 + +#define CORE_ARLA_RWCTL 0x1400 +#define ARL_RW (1 << 0) +#define IVL_SVL_SELECT (1 << 6) +#define ARL_STRTDN (1 << 7) + +#define CORE_ARLA_MAC 0x1408 +#define CORE_ARLA_VID 0x1420 +#define ARLA_VIDTAB_INDX_MASK 0x1fff + +#define CORE_ARLA_MACVID0 0x1440 +#define MAC_MASK 0xffffffffff +#define VID_SHIFT 48 +#define VID_MASK 0xfff + +#define CORE_ARLA_FWD_ENTRY0 0x1460 +#define PORTID_MASK 0x1ff +#define ARL_CON_SHIFT 9 +#define ARL_CON_MASK 0x3 +#define ARL_PRI_SHIFT 11 +#define ARL_PRI_MASK 0x7 +#define ARL_AGE (1 << 14) +#define ARL_STATIC (1 << 15) +#define ARL_VALID (1 << 16) + +#define CORE_ARLA_MACVID_ENTRY(x) (CORE_ARLA_MACVID0 + ((x) * 0x40)) +#define CORE_ARLA_FWD_ENTRY(x) (CORE_ARLA_FWD_ENTRY0 + ((x) * 0x40)) + +#define CORE_ARLA_SRCH_CTL 0x1540 +#define ARLA_SRCH_VLID (1 << 0) +#define IVL_SVL_SELECT (1 << 6) +#define ARLA_SRCH_STDN (1 << 7) + +#define CORE_ARLA_SRCH_ADR 0x1544 +#define ARLA_SRCH_ADR_VALID (1 << 15) + +#define CORE_ARLA_SRCH_RSLT_0_MACVID 0x1580 +#define CORE_ARLA_SRCH_RSLT_0 0x15a0 + +#define CORE_ARLA_SRCH_RSLT_MACVID(x) (CORE_ARLA_SRCH_RSLT_0_MACVID + ((x) * 0x40)) +#define CORE_ARLA_SRCH_RSLT(x) (CORE_ARLA_SRCH_RSLT_0 + ((x) * 0x40)) + #define CORE_MEM_PSM_VDD_CTRL 0x2380 #define P_TXQ_PSM_VDD_SHIFT 2 #define P_TXQ_PSM_VDD_MASK 0x3 diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index c29aebe1e62b..9093577755f6 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -26,7 +26,7 @@ static int reg_read(struct dsa_switch *ds, int addr, int reg) if (bus == NULL) return -EINVAL; - return mdiobus_read(bus, ds->pd->sw_addr + addr, reg); + return mdiobus_read_nested(bus, ds->pd->sw_addr + addr, reg); } #define REG_READ(addr, reg) \ @@ -47,7 +47,7 @@ static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) if (bus == NULL) return -EINVAL; - return mdiobus_write(bus, ds->pd->sw_addr + addr, reg, val); + return mdiobus_write_nested(bus, ds->pd->sw_addr + addr, reg, val); } #define REG_WRITE(addr, reg, val) \ diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index ca3330aec740..2c8eb6f76ebe 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -113,8 +113,6 @@ struct dsa_switch_driver mv88e6171_switch_driver = { #endif .get_regs_len = mv88e6xxx_get_regs_len, .get_regs = mv88e6xxx_get_regs, - .port_join_bridge = mv88e6xxx_join_bridge, - .port_leave_bridge = mv88e6xxx_leave_bridge, .port_stp_update = mv88e6xxx_port_stp_update, .port_pvid_get = mv88e6xxx_port_pvid_get, .port_pvid_set = mv88e6xxx_port_pvid_set, @@ -124,7 +122,7 @@ struct dsa_switch_driver mv88e6171_switch_driver = { .port_fdb_prepare = mv88e6xxx_port_fdb_prepare, .port_fdb_add = mv88e6xxx_port_fdb_add, .port_fdb_del = mv88e6xxx_port_fdb_del, - .port_fdb_getnext = mv88e6xxx_port_fdb_getnext, + .port_fdb_dump = mv88e6xxx_port_fdb_dump, }; MODULE_ALIAS("platform:mv88e6171"); diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index 078a358c1b83..cbf4dd8721a6 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -340,8 +340,6 @@ struct dsa_switch_driver mv88e6352_switch_driver = { .set_eeprom = mv88e6352_set_eeprom, .get_regs_len = mv88e6xxx_get_regs_len, .get_regs = mv88e6xxx_get_regs, - .port_join_bridge = mv88e6xxx_join_bridge, - .port_leave_bridge = mv88e6xxx_leave_bridge, .port_stp_update = mv88e6xxx_port_stp_update, .port_pvid_get = mv88e6xxx_port_pvid_get, .port_pvid_set = mv88e6xxx_port_pvid_set, @@ -351,7 +349,7 @@ struct dsa_switch_driver mv88e6352_switch_driver = { .port_fdb_prepare = mv88e6xxx_port_fdb_prepare, .port_fdb_add = mv88e6xxx_port_fdb_add, .port_fdb_del = mv88e6xxx_port_fdb_del, - .port_fdb_getnext = mv88e6xxx_port_fdb_getnext, + .port_fdb_dump = mv88e6xxx_port_fdb_dump, }; MODULE_ALIAS("platform:mv88e6172"); diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 87b405e4f9f6..b1b14f519d8b 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -11,7 +11,6 @@ * (at your option) any later version. */ -#include <linux/debugfs.h> #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> @@ -21,39 +20,10 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/phy.h> -#include <linux/seq_file.h> #include <net/dsa.h> #include <net/switchdev.h> #include "mv88e6xxx.h" -/* MDIO bus access can be nested in the case of PHYs connected to the - * internal MDIO bus of the switch, which is accessed via MDIO bus of - * the Ethernet interface. Avoid lockdep false positives by using - * mutex_lock_nested(). - */ -static int mv88e6xxx_mdiobus_read(struct mii_bus *bus, int addr, u32 regnum) -{ - int ret; - - mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING); - ret = bus->read(bus, addr, regnum); - mutex_unlock(&bus->mdio_lock); - - return ret; -} - -static int mv88e6xxx_mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, - u16 val) -{ - int ret; - - mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING); - ret = bus->write(bus, addr, regnum, val); - mutex_unlock(&bus->mdio_lock); - - return ret; -} - /* If the switch's ADDR[4:0] strap pins are strapped to zero, it will * use all 32 SMI bus addresses on its SMI bus, and all switch registers * will be directly accessible on some {device address,register address} @@ -68,7 +38,7 @@ static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr) int i; for (i = 0; i < 16; i++) { - ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_CMD); + ret = mdiobus_read_nested(bus, sw_addr, SMI_CMD); if (ret < 0) return ret; @@ -84,7 +54,7 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg) int ret; if (sw_addr == 0) - return mv88e6xxx_mdiobus_read(bus, addr, reg); + return mdiobus_read_nested(bus, addr, reg); /* Wait for the bus to become free. */ ret = mv88e6xxx_reg_wait_ready(bus, sw_addr); @@ -92,8 +62,8 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg) return ret; /* Transmit the read command. */ - ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD, - SMI_CMD_OP_22_READ | (addr << 5) | reg); + ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD, + SMI_CMD_OP_22_READ | (addr << 5) | reg); if (ret < 0) return ret; @@ -103,7 +73,7 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg) return ret; /* Read the data. */ - ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_DATA); + ret = mdiobus_read_nested(bus, sw_addr, SMI_DATA); if (ret < 0) return ret; @@ -147,7 +117,7 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr, int ret; if (sw_addr == 0) - return mv88e6xxx_mdiobus_write(bus, addr, reg, val); + return mdiobus_write_nested(bus, addr, reg, val); /* Wait for the bus to become free. */ ret = mv88e6xxx_reg_wait_ready(bus, sw_addr); @@ -155,13 +125,13 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr, return ret; /* Transmit the data to write. */ - ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_DATA, val); + ret = mdiobus_write_nested(bus, sw_addr, SMI_DATA, val); if (ret < 0) return ret; /* Transmit the write command. */ - ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD, - SMI_CMD_OP_22_WRITE | (addr << 5) | reg); + ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD, + SMI_CMD_OP_22_WRITE | (addr << 5) | reg); if (ret < 0) return ret; @@ -876,13 +846,6 @@ static int _mv88e6xxx_atu_wait(struct dsa_switch *ds) GLOBAL_ATU_OP_BUSY); } -/* Must be called with SMI lock held */ -static int _mv88e6xxx_scratch_wait(struct dsa_switch *ds) -{ - return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC, - GLOBAL2_SCRATCH_BUSY); -} - /* Must be called with SMI mutex held */ static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum) @@ -1046,11 +1009,6 @@ static int _mv88e6xxx_atu_flush(struct dsa_switch *ds, u16 fid, bool static_too) return _mv88e6xxx_atu_flush_move(ds, &entry, static_too); } -static int _mv88e6xxx_flush_fid(struct dsa_switch *ds, int fid) -{ - return _mv88e6xxx_atu_flush(ds, fid, false); -} - static int _mv88e6xxx_atu_move(struct dsa_switch *ds, u16 fid, int from_port, int to_port, bool static_too) { @@ -1112,130 +1070,21 @@ abort: return ret; } -/* Must be called with smi lock held */ -static int _mv88e6xxx_update_port_config(struct dsa_switch *ds, int port) -{ - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - u8 fid = ps->fid[port]; - u16 reg = fid << 12; - - if (dsa_is_cpu_port(ds, port)) - reg |= ds->phys_port_mask; - else - reg |= (ps->bridge_mask[fid] | - (1 << dsa_upstream_port(ds))) & ~(1 << port); - - return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg); -} - -/* Must be called with smi lock held */ -static int _mv88e6xxx_update_bridge_config(struct dsa_switch *ds, int fid) -{ - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int port; - u32 mask; - int ret; - - mask = ds->phys_port_mask; - while (mask) { - port = __ffs(mask); - mask &= ~(1 << port); - if (ps->fid[port] != fid) - continue; - - ret = _mv88e6xxx_update_port_config(ds, port); - if (ret) - return ret; - } - - return _mv88e6xxx_flush_fid(ds, fid); -} - -/* Bridge handling functions */ - -int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask) -{ - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int ret = 0; - u32 nmask; - int fid; - - /* If the bridge group is not empty, join that group. - * Otherwise create a new group. - */ - fid = ps->fid[port]; - nmask = br_port_mask & ~(1 << port); - if (nmask) - fid = ps->fid[__ffs(nmask)]; - - nmask = ps->bridge_mask[fid] | (1 << port); - if (nmask != br_port_mask) { - netdev_err(ds->ports[port], - "join: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n", - fid, br_port_mask, nmask); - return -EINVAL; - } - - mutex_lock(&ps->smi_mutex); - - ps->bridge_mask[fid] = br_port_mask; - - if (fid != ps->fid[port]) { - clear_bit(ps->fid[port], ps->fid_bitmap); - ps->fid[port] = fid; - ret = _mv88e6xxx_update_bridge_config(ds, fid); - } - - mutex_unlock(&ps->smi_mutex); - - return ret; -} - -int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask) +static int _mv88e6xxx_port_vlan_map_set(struct dsa_switch *ds, int port, + u16 output_ports) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - u8 fid, newfid; - int ret; - - fid = ps->fid[port]; - - if (ps->bridge_mask[fid] != br_port_mask) { - netdev_err(ds->ports[port], - "leave: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n", - fid, br_port_mask, ps->bridge_mask[fid]); - return -EINVAL; - } - - /* If the port was the last port of a bridge, we are done. - * Otherwise assign a new fid to the port, and fix up - * the bridge configuration. - */ - if (br_port_mask == (1 << port)) - return 0; - - mutex_lock(&ps->smi_mutex); - - newfid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID, 1); - if (unlikely(newfid > ps->num_ports)) { - netdev_err(ds->ports[port], "all first %d FIDs are used\n", - ps->num_ports); - ret = -ENOSPC; - goto unlock; - } - - ps->fid[port] = newfid; - set_bit(newfid, ps->fid_bitmap); - ps->bridge_mask[fid] &= ~(1 << port); - ps->bridge_mask[newfid] = 1 << port; + const u16 mask = (1 << ps->num_ports) - 1; + int reg; - ret = _mv88e6xxx_update_bridge_config(ds, fid); - if (!ret) - ret = _mv88e6xxx_update_bridge_config(ds, newfid); + reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN); + if (reg < 0) + return reg; -unlock: - mutex_unlock(&ps->smi_mutex); + reg &= ~mask; + reg |= output_ports & mask; - return ret; + return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg); } int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state) @@ -1373,7 +1222,13 @@ static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds, return 0; } -static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, u16 vid, +static int _mv88e6xxx_vtu_vid_write(struct dsa_switch *ds, u16 vid) +{ + return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, + vid & GLOBAL_VTU_VID_MASK); +} + +static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, struct mv88e6xxx_vtu_stu_entry *entry) { struct mv88e6xxx_vtu_stu_entry next = { 0 }; @@ -1383,11 +1238,6 @@ static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, u16 vid, if (ret < 0) return ret; - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, - vid & GLOBAL_VTU_VID_MASK); - if (ret < 0) - return ret; - ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT); if (ret < 0) return ret; @@ -1547,6 +1397,7 @@ static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid, struct mv88e6xxx_vtu_stu_entry vlan = { .valid = true, .vid = vid, + .fid = vid, /* We use one FID per VLAN */ }; int i; @@ -1580,22 +1431,10 @@ static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid, return err; } - /* Non-bridged ports and bridge groups use FIDs from 1 to - * num_ports; VLANs use FIDs from num_ports+1 to 4095. - */ - vlan.fid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID, - ps->num_ports + 1); - if (unlikely(vlan.fid == VLAN_N_VID)) { - pr_err("no more FID available for VLAN %d\n", vid); - return -ENOSPC; - } - /* Clear all MAC addresses from the new database */ err = _mv88e6xxx_atu_flush(ds, vlan.fid, true); if (err) return err; - - set_bit(vlan.fid, ps->fid_bitmap); } *entry = vlan; @@ -1610,7 +1449,12 @@ int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid, int err; mutex_lock(&ps->smi_mutex); - err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan); + + err = _mv88e6xxx_vtu_vid_write(ds, vid - 1); + if (err) + goto unlock; + + err = _mv88e6xxx_vtu_getnext(ds, &vlan); if (err) goto unlock; @@ -1635,12 +1479,15 @@ int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); struct mv88e6xxx_vtu_stu_entry vlan; - bool keep = false; int i, err; mutex_lock(&ps->smi_mutex); - err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan); + err = _mv88e6xxx_vtu_vid_write(ds, vid - 1); + if (err) + goto unlock; + + err = _mv88e6xxx_vtu_getnext(ds, &vlan); if (err) goto unlock; @@ -1653,57 +1500,28 @@ int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid) vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER; /* keep the VLAN unless all ports are excluded */ + vlan.valid = false; for (i = 0; i < ps->num_ports; ++i) { if (dsa_is_cpu_port(ds, i)) continue; if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) { - keep = true; + vlan.valid = true; break; } } - vlan.valid = keep; err = _mv88e6xxx_vtu_loadpurge(ds, &vlan); if (err) goto unlock; err = _mv88e6xxx_atu_remove(ds, vlan.fid, port, false); - if (err) - goto unlock; - - if (!keep) - clear_bit(vlan.fid, ps->fid_bitmap); - unlock: mutex_unlock(&ps->smi_mutex); return err; } -static int _mv88e6xxx_port_vtu_getnext(struct dsa_switch *ds, int port, u16 vid, - struct mv88e6xxx_vtu_stu_entry *entry) -{ - int err; - - do { - if (vid == 4095) - return -ENOENT; - - err = _mv88e6xxx_vtu_getnext(ds, vid, entry); - if (err) - return err; - - if (!entry->valid) - return -ENOENT; - - vid = entry->vid; - } while (entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED && - entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED); - - return 0; -} - int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid, unsigned long *ports, unsigned long *untagged) { @@ -1716,7 +1534,12 @@ int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid, return -ENOENT; mutex_lock(&ps->smi_mutex); - err = _mv88e6xxx_vtu_getnext(ds, *vid, &next); + err = _mv88e6xxx_vtu_vid_write(ds, *vid); + if (err) + goto unlock; + + err = _mv88e6xxx_vtu_getnext(ds, &next); +unlock: mutex_unlock(&ps->smi_mutex); if (err) @@ -1801,37 +1624,13 @@ static int _mv88e6xxx_atu_load(struct dsa_switch *ds, return _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_LOAD_DB); } -static int _mv88e6xxx_port_vid_to_fid(struct dsa_switch *ds, int port, u16 vid) -{ - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - struct mv88e6xxx_vtu_stu_entry vlan; - int err; - - if (vid == 0) - return ps->fid[port]; - - err = _mv88e6xxx_port_vtu_getnext(ds, port, vid - 1, &vlan); - if (err) - return err; - - if (vlan.vid == vid) - return vlan.fid; - - return -ENOENT; -} - static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid, u8 state) { struct mv88e6xxx_atu_entry entry = { 0 }; - int ret; - - ret = _mv88e6xxx_port_vid_to_fid(ds, port, vid); - if (ret < 0) - return ret; - entry.fid = ret; + entry.fid = vid; /* We use one FID per VLAN */ entry.state = state; ether_addr_copy(entry.mac, addr); if (state != GLOBAL_ATU_DATA_STATE_UNUSED) { @@ -1846,6 +1645,10 @@ int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port, const struct switchdev_obj_port_fdb *fdb, struct switchdev_trans *trans) { + /* We don't use per-port FDB */ + if (fdb->vid == 0) + return -EOPNOTSUPP; + /* We don't need any dynamic resource from the kernel (yet), * so skip the prepare phase. */ @@ -1884,7 +1687,6 @@ int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, } static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid, - const unsigned char *addr, struct mv88e6xxx_atu_entry *entry) { struct mv88e6xxx_atu_entry next = { 0 }; @@ -1896,10 +1698,6 @@ static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid, if (ret < 0) return ret; - ret = _mv88e6xxx_atu_mac_write(ds, addr); - if (ret < 0) - return ret; - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid); if (ret < 0) return ret; @@ -1937,51 +1735,69 @@ static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid, return 0; } -/* get next entry for port */ -int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port, - unsigned char *addr, u16 *vid, bool *is_static) +int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_fdb *fdb, + int (*cb)(struct switchdev_obj *obj)) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - struct mv88e6xxx_atu_entry next; - u16 fid; - int ret; + struct mv88e6xxx_vtu_stu_entry vlan = { + .vid = GLOBAL_VTU_VID_MASK, /* all ones */ + }; + int err; mutex_lock(&ps->smi_mutex); - ret = _mv88e6xxx_port_vid_to_fid(ds, port, *vid); - if (ret < 0) + err = _mv88e6xxx_vtu_vid_write(ds, vlan.vid); + if (err) goto unlock; - fid = ret; do { - if (is_broadcast_ether_addr(addr)) { - struct mv88e6xxx_vtu_stu_entry vtu; + struct mv88e6xxx_atu_entry addr = { + .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + }; - ret = _mv88e6xxx_port_vtu_getnext(ds, port, *vid, &vtu); - if (ret < 0) - goto unlock; + err = _mv88e6xxx_vtu_getnext(ds, &vlan); + if (err) + goto unlock; - *vid = vtu.vid; - fid = vtu.fid; - } + if (!vlan.valid) + break; - ret = _mv88e6xxx_atu_getnext(ds, fid, addr, &next); - if (ret < 0) + err = _mv88e6xxx_atu_mac_write(ds, addr.mac); + if (err) goto unlock; - ether_addr_copy(addr, next.mac); + do { + err = _mv88e6xxx_atu_getnext(ds, vlan.fid, &addr); + if (err) + goto unlock; - if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED) - continue; - } while (next.trunk || (next.portv_trunkid & BIT(port)) == 0); + if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED) + break; + + if (!addr.trunk && addr.portv_trunkid & BIT(port)) { + bool is_static = addr.state == + (is_multicast_ether_addr(addr.mac) ? + GLOBAL_ATU_DATA_STATE_MC_STATIC : + GLOBAL_ATU_DATA_STATE_UC_STATIC); + + fdb->vid = vlan.vid; + ether_addr_copy(fdb->addr, addr.mac); + fdb->ndm_state = is_static ? NUD_NOARP : + NUD_REACHABLE; + + err = cb(&fdb->obj); + if (err) + goto unlock; + } + } while (!is_broadcast_ether_addr(addr.mac)); + + } while (vlan.vid < GLOBAL_VTU_VID_MASK); - *is_static = next.state == (is_multicast_ether_addr(addr) ? - GLOBAL_ATU_DATA_STATE_MC_STATIC : - GLOBAL_ATU_DATA_STATE_UC_STATIC); unlock: mutex_unlock(&ps->smi_mutex); - return ret; + return err; } static void mv88e6xxx_bridge_work(struct work_struct *work) @@ -2003,7 +1819,7 @@ static void mv88e6xxx_bridge_work(struct work_struct *work) static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int ret, fid; + int ret; u16 reg; mutex_lock(&ps->smi_mutex); @@ -2129,7 +1945,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) reg |= PORT_CONTROL_2_FORWARD_UNKNOWN; } - reg |= PORT_CONTROL_2_8021Q_FALLBACK; + reg |= PORT_CONTROL_2_8021Q_SECURE; if (reg) { ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), @@ -2222,19 +2038,11 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) if (ret) goto abort; - /* Port based VLAN map: give each port its own address - * database, allow the CPU port to talk to each of the 'real' - * ports, and allow each of the 'real' ports to only talk to - * the upstream port. + /* Port based VLAN map: do not give each port its own address + * database, and allow every port to egress frames on all other ports. */ - fid = port + 1; - ps->fid[port] = fid; - set_bit(fid, ps->fid_bitmap); - - if (!dsa_is_cpu_port(ds, port)) - ps->bridge_mask[fid] = 1 << port; - - ret = _mv88e6xxx_update_port_config(ds, port); + reg = BIT(ps->num_ports) - 1; /* all ports */ + ret = _mv88e6xxx_port_vlan_map_set(ds, port, reg & ~port); if (ret) goto abort; @@ -2262,273 +2070,9 @@ int mv88e6xxx_setup_ports(struct dsa_switch *ds) return 0; } -static int mv88e6xxx_regs_show(struct seq_file *s, void *p) -{ - struct dsa_switch *ds = s->private; - - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int reg, port; - - seq_puts(s, " GLOBAL GLOBAL2 "); - for (port = 0 ; port < ps->num_ports; port++) - seq_printf(s, " %2d ", port); - seq_puts(s, "\n"); - - for (reg = 0; reg < 32; reg++) { - seq_printf(s, "%2x: ", reg); - seq_printf(s, " %4x %4x ", - mv88e6xxx_reg_read(ds, REG_GLOBAL, reg), - mv88e6xxx_reg_read(ds, REG_GLOBAL2, reg)); - - for (port = 0 ; port < ps->num_ports; port++) - seq_printf(s, "%4x ", - mv88e6xxx_reg_read(ds, REG_PORT(port), reg)); - seq_puts(s, "\n"); - } - - return 0; -} - -static int mv88e6xxx_regs_open(struct inode *inode, struct file *file) -{ - return single_open(file, mv88e6xxx_regs_show, inode->i_private); -} - -static const struct file_operations mv88e6xxx_regs_fops = { - .open = mv88e6xxx_regs_open, - .read = seq_read, - .llseek = no_llseek, - .release = single_release, - .owner = THIS_MODULE, -}; - -static void mv88e6xxx_atu_show_header(struct seq_file *s) -{ - seq_puts(s, "DB T/P Vec State Addr\n"); -} - -static void mv88e6xxx_atu_show_entry(struct seq_file *s, int dbnum, - unsigned char *addr, int data) -{ - bool trunk = !!(data & GLOBAL_ATU_DATA_TRUNK); - int portvec = ((data & GLOBAL_ATU_DATA_PORT_VECTOR_MASK) >> - GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT); - int state = data & GLOBAL_ATU_DATA_STATE_MASK; - - seq_printf(s, "%03x %5s %10pb %x %pM\n", - dbnum, (trunk ? "Trunk" : "Port"), &portvec, state, addr); -} - -static int mv88e6xxx_atu_show_db(struct seq_file *s, struct dsa_switch *ds, - int dbnum) -{ - unsigned char bcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; - unsigned char addr[6]; - int ret, data, state; - - ret = _mv88e6xxx_atu_mac_write(ds, bcast); - if (ret < 0) - return ret; - - do { - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, - dbnum); - if (ret < 0) - return ret; - - ret = _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_GET_NEXT_DB); - if (ret < 0) - return ret; - - data = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA); - if (data < 0) - return data; - - state = data & GLOBAL_ATU_DATA_STATE_MASK; - if (state == GLOBAL_ATU_DATA_STATE_UNUSED) - break; - ret = _mv88e6xxx_atu_mac_read(ds, addr); - if (ret < 0) - return ret; - mv88e6xxx_atu_show_entry(s, dbnum, addr, data); - } while (state != GLOBAL_ATU_DATA_STATE_UNUSED); - - return 0; -} - -static int mv88e6xxx_atu_show(struct seq_file *s, void *p) -{ - struct dsa_switch *ds = s->private; - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int dbnum; - - mv88e6xxx_atu_show_header(s); - - for (dbnum = 0; dbnum < 255; dbnum++) { - mutex_lock(&ps->smi_mutex); - mv88e6xxx_atu_show_db(s, ds, dbnum); - mutex_unlock(&ps->smi_mutex); - } - - return 0; -} - -static int mv88e6xxx_atu_open(struct inode *inode, struct file *file) -{ - return single_open(file, mv88e6xxx_atu_show, inode->i_private); -} - -static const struct file_operations mv88e6xxx_atu_fops = { - .open = mv88e6xxx_atu_open, - .read = seq_read, - .llseek = no_llseek, - .release = single_release, - .owner = THIS_MODULE, -}; - -static void mv88e6xxx_stats_show_header(struct seq_file *s, - struct mv88e6xxx_priv_state *ps) -{ - int port; - - seq_puts(s, " Statistic "); - for (port = 0 ; port < ps->num_ports; port++) - seq_printf(s, "Port %2d ", port); - seq_puts(s, "\n"); -} - -static int mv88e6xxx_stats_show(struct seq_file *s, void *p) -{ - struct dsa_switch *ds = s->private; - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - struct mv88e6xxx_hw_stat *stats = mv88e6xxx_hw_stats; - int port, stat, max_stats; - uint64_t value; - - if (have_sw_in_discards(ds)) - max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats); - else - max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats) - 3; - - mv88e6xxx_stats_show_header(s, ps); - - mutex_lock(&ps->smi_mutex); - - for (stat = 0; stat < max_stats; stat++) { - seq_printf(s, "%19s: ", stats[stat].string); - for (port = 0 ; port < ps->num_ports; port++) { - _mv88e6xxx_stats_snapshot(ds, port); - value = _mv88e6xxx_get_ethtool_stat(ds, stat, stats, - port); - seq_printf(s, "%8llu ", value); - } - seq_puts(s, "\n"); - } - mutex_unlock(&ps->smi_mutex); - - return 0; -} - -static int mv88e6xxx_stats_open(struct inode *inode, struct file *file) -{ - return single_open(file, mv88e6xxx_stats_show, inode->i_private); -} - -static const struct file_operations mv88e6xxx_stats_fops = { - .open = mv88e6xxx_stats_open, - .read = seq_read, - .llseek = no_llseek, - .release = single_release, - .owner = THIS_MODULE, -}; - -static int mv88e6xxx_device_map_show(struct seq_file *s, void *p) -{ - struct dsa_switch *ds = s->private; - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int target, ret; - - seq_puts(s, "Target Port\n"); - - mutex_lock(&ps->smi_mutex); - for (target = 0; target < 32; target++) { - ret = _mv88e6xxx_reg_write( - ds, REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING, - target << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT); - if (ret < 0) - goto out; - ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2, - GLOBAL2_DEVICE_MAPPING); - seq_printf(s, " %2d %2d\n", target, - ret & GLOBAL2_DEVICE_MAPPING_PORT_MASK); - } -out: - mutex_unlock(&ps->smi_mutex); - - return 0; -} - -static int mv88e6xxx_device_map_open(struct inode *inode, struct file *file) -{ - return single_open(file, mv88e6xxx_device_map_show, inode->i_private); -} - -static const struct file_operations mv88e6xxx_device_map_fops = { - .open = mv88e6xxx_device_map_open, - .read = seq_read, - .llseek = no_llseek, - .release = single_release, - .owner = THIS_MODULE, -}; - -static int mv88e6xxx_scratch_show(struct seq_file *s, void *p) -{ - struct dsa_switch *ds = s->private; - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int reg, ret; - - seq_puts(s, "Register Value\n"); - - mutex_lock(&ps->smi_mutex); - for (reg = 0; reg < 0x80; reg++) { - ret = _mv88e6xxx_reg_write( - ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC, - reg << GLOBAL2_SCRATCH_REGISTER_SHIFT); - if (ret < 0) - goto out; - - ret = _mv88e6xxx_scratch_wait(ds); - if (ret < 0) - goto out; - - ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2, - GLOBAL2_SCRATCH_MISC); - seq_printf(s, " %2x %2x\n", reg, - ret & GLOBAL2_SCRATCH_VALUE_MASK); - } -out: - mutex_unlock(&ps->smi_mutex); - - return 0; -} - -static int mv88e6xxx_scratch_open(struct inode *inode, struct file *file) -{ - return single_open(file, mv88e6xxx_scratch_show, inode->i_private); -} - -static const struct file_operations mv88e6xxx_scratch_fops = { - .open = mv88e6xxx_scratch_open, - .read = seq_read, - .llseek = no_llseek, - .release = single_release, - .owner = THIS_MODULE, -}; - int mv88e6xxx_setup_common(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - char *name; mutex_init(&ps->smi_mutex); @@ -2536,24 +2080,6 @@ int mv88e6xxx_setup_common(struct dsa_switch *ds) INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work); - name = kasprintf(GFP_KERNEL, "dsa%d", ds->index); - ps->dbgfs = debugfs_create_dir(name, NULL); - kfree(name); - - debugfs_create_file("regs", S_IRUGO, ps->dbgfs, ds, - &mv88e6xxx_regs_fops); - - debugfs_create_file("atu", S_IRUGO, ps->dbgfs, ds, - &mv88e6xxx_atu_fops); - - debugfs_create_file("stats", S_IRUGO, ps->dbgfs, ds, - &mv88e6xxx_stats_fops); - - debugfs_create_file("device_map", S_IRUGO, ps->dbgfs, ds, - &mv88e6xxx_device_map_fops); - - debugfs_create_file("scratch", S_IRUGO, ps->dbgfs, ds, - &mv88e6xxx_scratch_fops); return 0; } diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 8325c11b9be2..6f9ed5d45012 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -402,18 +402,10 @@ struct mv88e6xxx_priv_state { int id; /* switch product id */ int num_ports; /* number of switch ports */ - /* hw bridging */ - - DECLARE_BITMAP(fid_bitmap, VLAN_N_VID); /* FIDs 1 to 4095 available */ - u16 fid[DSA_MAX_PORTS]; /* per (non-bridged) port FID */ - u16 bridge_mask[DSA_MAX_PORTS]; /* br groups (indexed by FID) */ - unsigned long port_state_update_mask; u8 port_state[DSA_MAX_PORTS]; struct work_struct bridge_work; - - struct dentry *dbgfs; }; struct mv88e6xxx_hw_stat { @@ -464,8 +456,6 @@ int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr, int regnum, int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e); int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, struct phy_device *phydev, struct ethtool_eee *e); -int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask); -int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask); int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state); int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *vid); int mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 vid); @@ -482,8 +472,9 @@ int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, struct switchdev_trans *trans); int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, const struct switchdev_obj_port_fdb *fdb); -int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port, - unsigned char *addr, u16 *vid, bool *is_static); +int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_fdb *fdb, + int (*cb)(struct switchdev_obj *obj)); int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg); int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page, int reg, int val); diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 815eb94990f5..69fc8409a973 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -147,8 +147,12 @@ static void dummy_setup(struct net_device *dev) dev->flags |= IFF_NOARP; dev->flags &= ~IFF_MULTICAST; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; - dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO; + dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST; + dev->features |= NETIF_F_ALL_TSO | NETIF_F_UFO; dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX; + dev->features |= NETIF_F_GSO_ENCAP_ALL; + dev->hw_features |= dev->features; + dev->hw_enc_features |= dev->features; eth_hw_addr_random(dev); } diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index ae89de7deb13..20bf55dbd76f 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1141,8 +1141,6 @@ static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *in strlcpy(info->version, "revision: 1.0", sizeof(info->version)); strlcpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info)); strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); - info->eedump_len = 0; - info->regdump_len = sizeof(struct greth_regs); } static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 48ce83e443c2..8d50314ac3eb 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -847,21 +847,25 @@ static int emac_probe(struct platform_device *pdev) if (ndev->irq == -ENXIO) { netdev_err(ndev, "No irq resource\n"); ret = ndev->irq; - goto out; + goto out_iounmap; } db->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(db->clk)) { ret = PTR_ERR(db->clk); - goto out; + goto out_iounmap; } - clk_prepare_enable(db->clk); + ret = clk_prepare_enable(db->clk); + if (ret) { + dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", ret); + goto out_iounmap; + } ret = sunxi_sram_claim(&pdev->dev); if (ret) { dev_err(&pdev->dev, "Error couldn't map SRAM to device\n"); - goto out; + goto out_clk_disable_unprepare; } db->phy_node = of_parse_phandle(np, "phy", 0); @@ -910,6 +914,10 @@ static int emac_probe(struct platform_device *pdev) out_release_sram: sunxi_sram_release(&pdev->dev); +out_clk_disable_unprepare: + clk_disable_unprepare(db->clk); +out_iounmap: + iounmap(db->membase); out: dev_err(db->dev, "not found (%d).\n", ret); @@ -921,8 +929,12 @@ out: static int emac_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); + struct emac_board_info *db = netdev_priv(ndev); unregister_netdev(ndev); + sunxi_sram_release(&pdev->dev); + clk_disable_unprepare(db->clk); + iounmap(db->membase); free_netdev(ndev); dev_dbg(&pdev->dev, "released and freed device\n"); diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index cb367cc59e0b..5330bcb8a944 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -714,7 +714,6 @@ au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) strlcpy(info->version, DRV_VERSION, sizeof(info->version)); snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME, aup->mac_id); - info->regdump_len = 0; } static void au1000_set_msglevel(struct net_device *dev, u32 value) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c index 2c063b60db4b..96f485ab612e 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c @@ -327,9 +327,13 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata) pdata->debugfs_xpcs_reg = 0; buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name); + if (!buf) + return; + pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL); if (!pdata->xgbe_debugfs) { netdev_err(pdata->netdev, "debugfs_create_dir failed\n"); + kfree(buf); return; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 45512242baea..112f1bc8bcee 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1595,7 +1595,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) packet->rdesc_count, 1); /* Make sure ownership is written to the descriptor */ - dma_wmb(); + wmb(); ring->cur = cur_index + 1; if (!packet->skb->xmit_more || diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 14bad8c44c87..cff8940e1694 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -365,7 +365,7 @@ static irqreturn_t xgbe_isr(int irq, void *data) /* Restart the device on a Fatal Bus Error */ if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE)) - queue_work(pdata->dev_workqueue, &pdata->restart_work); + schedule_work(&pdata->restart_work); /* Clear all interrupt signals */ XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr); @@ -1537,7 +1537,7 @@ static void xgbe_tx_timeout(struct net_device *netdev) struct xgbe_prv_data *pdata = netdev_priv(netdev); netdev_warn(netdev, "tx timeout, device restarting\n"); - queue_work(pdata->dev_workqueue, &pdata->restart_work); + schedule_work(&pdata->restart_work); } static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev, @@ -1811,6 +1811,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) struct netdev_queue *txq; int processed = 0; unsigned int tx_packets = 0, tx_bytes = 0; + unsigned int cur; DBGPR("-->xgbe_tx_poll\n"); @@ -1818,10 +1819,11 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) if (!ring) return 0; + cur = ring->cur; txq = netdev_get_tx_queue(netdev, channel->queue_index); while ((processed < XGBE_TX_DESC_MAX_PROC) && - (ring->dirty != ring->cur)) { + (ring->dirty != cur)) { rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); rdesc = rdata->rdesc; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index 204fb3afb182..6040293db9c1 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -375,7 +375,6 @@ static void xgbe_get_drvinfo(struct net_device *netdev, XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER), XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID), XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER)); - drvinfo->n_stats = XGBE_STATS_COUNT; } static u32 xgbe_get_msglevel(struct net_device *netdev) diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c index 48694c239d5c..872b7abb0196 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c @@ -233,10 +233,6 @@ static void atl1c_get_drvinfo(struct net_device *netdev, sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->n_stats = 0; - drvinfo->testinfo_len = 0; - drvinfo->regdump_len = atl1c_get_regs_len(netdev); - drvinfo->eedump_len = atl1c_get_eeprom_len(netdev); } static void atl1c_get_wol(struct net_device *netdev, diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c index 1be072f4afc2..8e3dbd4d9f79 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c @@ -316,10 +316,6 @@ static void atl1e_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->n_stats = 0; - drvinfo->testinfo_len = 0; - drvinfo->regdump_len = atl1e_get_regs_len(netdev); - drvinfo->eedump_len = atl1e_get_eeprom_len(netdev); } static void atl1e_get_wol(struct net_device *netdev, diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index eca1d113fee1..529bca718334 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -3388,7 +3388,6 @@ static void atl1_get_drvinfo(struct net_device *netdev, sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->eedump_len = ATL1_EEDUMP_LEN; } static void atl1_get_wol(struct net_device *netdev, diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index 46a535318c7a..8f76f4558a88 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -2030,10 +2030,6 @@ static void atl2_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->n_stats = 0; - drvinfo->testinfo_len = 0; - drvinfo->regdump_len = atl2_get_regs_len(netdev); - drvinfo->eedump_len = atl2_get_eeprom_len(netdev); } static void atl2_get_wol(struct net_device *netdev, diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index e930aa9a3cfb..67a7d520d9f5 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -170,4 +170,23 @@ config SYSTEMPORT Broadcom BCM7xxx Set Top Box family chipset using an internal Ethernet switch. +config BNXT + tristate "Broadcom NetXtreme-C/E support" + depends on PCI + select FW_LOADER + select LIBCRC32C + ---help--- + This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit + Ethernet cards. To compile this driver as a module, choose M here: + the module will be called bnxt_en. This is recommended. + +config BNXT_SRIOV + bool "Broadcom NetXtreme-C/E SR-IOV support" + depends on BNXT && PCI_IOV + default y + ---help--- + This configuration parameter enables Single Root Input Output + Virtualization support in the NetXtreme-C/E products. This + allows for virtual function acceleration in virtual environments. + endif # NET_VENDOR_BROADCOM diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile index e2a958a657e0..00584d78b3e0 100644 --- a/drivers/net/ethernet/broadcom/Makefile +++ b/drivers/net/ethernet/broadcom/Makefile @@ -12,3 +12,4 @@ obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o obj-$(CONFIG_TIGON3) += tg3.o obj-$(CONFIG_BGMAC) += bgmac.o obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o +obj-$(CONFIG_BNXT) += bnxt/ diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index a7f2cc3e485e..8b1929e9f698 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1333,7 +1333,6 @@ static void bcm_enet_get_drvinfo(struct net_device *netdev, sizeof(drvinfo->version)); strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info)); - drvinfo->n_stats = BCM_ENET_STATS_LEN; } static int bcm_enet_get_sset_count(struct net_device *netdev, @@ -2049,7 +2048,7 @@ static void swphy_poll_timer(unsigned long data) for (i = 0; i < priv->num_ports; i++) { struct bcm63xx_enetsw_port *port; - int val, j, up, advertise, lpa, lpa2, speed, duplex, media; + int val, j, up, advertise, lpa, speed, duplex, media; int external_phy = bcm_enet_port_is_rgmii(i); u8 override; @@ -2092,22 +2091,27 @@ static void swphy_poll_timer(unsigned long data) lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id, MII_LPA); - lpa2 = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id, - MII_STAT1000); - /* figure out media and duplex from advertise and LPA values */ media = mii_nway_result(lpa & advertise); duplex = (media & ADVERTISE_FULL) ? 1 : 0; - if (lpa2 & LPA_1000FULL) - duplex = 1; - - if (lpa2 & (LPA_1000FULL | LPA_1000HALF)) - speed = 1000; - else { - if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)) - speed = 100; - else - speed = 10; + + if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)) + speed = 100; + else + speed = 10; + + if (val & BMSR_ESTATEN) { + advertise = bcmenet_sw_mdio_read(priv, external_phy, + port->phy_id, MII_CTRL1000); + + lpa = bcmenet_sw_mdio_read(priv, external_phy, + port->phy_id, MII_STAT1000); + + if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF) + && lpa & (LPA_1000FULL | LPA_1000HALF)) { + speed = 1000; + duplex = (lpa & LPA_1000FULL); + } } dev_info(&priv->pdev->dev, @@ -2597,7 +2601,6 @@ static void bcm_enetsw_get_drvinfo(struct net_device *netdev, strncpy(drvinfo->version, bcm_enet_driver_version, 32); strncpy(drvinfo->fw_version, "N/A", 32); strncpy(drvinfo->bus_info, "bcm63xx", 32); - drvinfo->n_stats = BCM_ENETSW_STATS_LEN; } static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev, diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index f1b5364f3521..858106352ce9 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -287,7 +287,6 @@ static void bcm_sysport_get_drvinfo(struct net_device *dev, strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); strlcpy(info->version, "0.1", sizeof(info->version)); strlcpy(info->bus_info, "platform", sizeof(info->bus_info)); - info->n_stats = BCM_SYSPORT_STATS_LEN; } static u32 bcm_sysport_get_msglvl(struct net_device *dev) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index aeb7ce64452e..d84efcd34fac 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1090,10 +1090,6 @@ static void bnx2x_get_drvinfo(struct net_device *dev, bnx2x_fill_fw_str(bp, info->fw_version, sizeof(info->fw_version)); strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); - info->n_stats = BNX2X_NUM_STATS; - info->testinfo_len = BNX2X_NUM_TESTS(bp); - info->eedump_len = bp->common.flash_size; - info->regdump_len = bnx2x_get_regs_len(dev); } static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) @@ -3351,6 +3347,13 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) udp_rss_requested = 0; else return -EINVAL; + + if (CHIP_IS_E1x(bp) && udp_rss_requested) { + DP(BNX2X_MSG_ETHTOOL, + "57710, 57711 boards don't support RSS according to UDP 4-tuple\n"); + return -EINVAL; + } + if ((info->flow_type == UDP_V4_FLOW) && (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) { bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested; diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile new file mode 100644 index 000000000000..97e78e217928 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_BNXT) += bnxt_en.o + +bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c new file mode 100644 index 000000000000..6c2e0c622831 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -0,0 +1,5728 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include <linux/module.h> + +#include <linux/stringify.h> +#include <linux/kernel.h> +#include <linux/timer.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/dma-mapping.h> +#include <linux/bitops.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/delay.h> +#include <asm/byteorder.h> +#include <asm/page.h> +#include <linux/time.h> +#include <linux/mii.h> +#include <linux/if.h> +#include <linux/if_vlan.h> +#include <net/ip.h> +#include <net/tcp.h> +#include <net/udp.h> +#include <net/checksum.h> +#include <net/ip6_checksum.h> +#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE) +#include <net/vxlan.h> +#endif +#ifdef CONFIG_NET_RX_BUSY_POLL +#include <net/busy_poll.h> +#endif +#include <linux/workqueue.h> +#include <linux/prefetch.h> +#include <linux/cache.h> +#include <linux/log2.h> +#include <linux/aer.h> +#include <linux/bitmap.h> +#include <linux/cpu_rmap.h> + +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_sriov.h" +#include "bnxt_ethtool.h" + +#define BNXT_TX_TIMEOUT (5 * HZ) + +static const char version[] = + "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n"; + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); +MODULE_VERSION(DRV_MODULE_VERSION); + +#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) +#define BNXT_RX_DMA_OFFSET NET_SKB_PAD +#define BNXT_RX_COPY_THRESH 256 + +#define BNXT_TX_PUSH_THRESH 92 + +enum board_idx { + BCM57302, + BCM57304, + BCM57404, + BCM57406, + BCM57304_VF, + BCM57404_VF, +}; + +/* indexed by enum above */ +static const struct { + char *name; +} board_info[] = { + { "Broadcom BCM57302 NetXtreme-C Single-port 10Gb/25Gb/40Gb/50Gb Ethernet" }, + { "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" }, + { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" }, + { "Broadcom BCM57406 NetXtreme-E Dual-port 10Gb Ethernet" }, + { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" }, + { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" }, +}; + +static const struct pci_device_id bnxt_pci_tbl[] = { + { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, + { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, + { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, + { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, +#ifdef CONFIG_BNXT_SRIOV + { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF }, + { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF }, +#endif + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); + +static const u16 bnxt_vf_req_snif[] = { + HWRM_FUNC_CFG, + HWRM_PORT_PHY_QCFG, + HWRM_CFA_L2_FILTER_ALLOC, +}; + +static bool bnxt_vf_pciid(enum board_idx idx) +{ + return (idx == BCM57304_VF || idx == BCM57404_VF); +} + +#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) +#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) +#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS) + +#define BNXT_CP_DB_REARM(db, raw_cons) \ + writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db) + +#define BNXT_CP_DB(db, raw_cons) \ + writel(DB_CP_FLAGS | RING_CMP(raw_cons), db) + +#define BNXT_CP_DB_IRQ_DIS(db) \ + writel(DB_CP_IRQ_DIS_FLAGS, db) + +static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr) +{ + /* Tell compiler to fetch tx indices from memory. */ + barrier(); + + return bp->tx_ring_size - + ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask); +} + +static const u16 bnxt_lhint_arr[] = { + TX_BD_FLAGS_LHINT_512_AND_SMALLER, + TX_BD_FLAGS_LHINT_512_TO_1023, + TX_BD_FLAGS_LHINT_1024_TO_2047, + TX_BD_FLAGS_LHINT_1024_TO_2047, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, +}; + +static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + struct tx_bd *txbd; + struct tx_bd_ext *txbd1; + struct netdev_queue *txq; + int i; + dma_addr_t mapping; + unsigned int length, pad = 0; + u32 len, free_size, vlan_tag_flags, cfa_action, flags; + u16 prod, last_frag; + struct pci_dev *pdev = bp->pdev; + struct bnxt_napi *bnapi; + struct bnxt_tx_ring_info *txr; + struct bnxt_sw_tx_bd *tx_buf; + + i = skb_get_queue_mapping(skb); + if (unlikely(i >= bp->tx_nr_rings)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + bnapi = bp->bnapi[i]; + txr = &bnapi->tx_ring; + txq = netdev_get_tx_queue(dev, i); + prod = txr->tx_prod; + + free_size = bnxt_tx_avail(bp, txr); + if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { + netif_tx_stop_queue(txq); + return NETDEV_TX_BUSY; + } + + length = skb->len; + len = skb_headlen(skb); + last_frag = skb_shinfo(skb)->nr_frags; + + txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; + + txbd->tx_bd_opaque = prod; + + tx_buf = &txr->tx_buf_ring[prod]; + tx_buf->skb = skb; + tx_buf->nr_frags = last_frag; + + vlan_tag_flags = 0; + cfa_action = 0; + if (skb_vlan_tag_present(skb)) { + vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | + skb_vlan_tag_get(skb); + /* Currently supports 8021Q, 8021AD vlan offloads + * QINQ1, QINQ2, QINQ3 vlan headers are deprecated + */ + if (skb->vlan_proto == htons(ETH_P_8021Q)) + vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; + } + + if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { + struct tx_push_bd *push = txr->tx_push; + struct tx_bd *tx_push = &push->txbd1; + struct tx_bd_ext *tx_push1 = &push->txbd2; + void *pdata = tx_push1 + 1; + int j; + + /* Set COAL_NOW to be ready quickly for the next push */ + tx_push->tx_bd_len_flags_type = + cpu_to_le32((length << TX_BD_LEN_SHIFT) | + TX_BD_TYPE_LONG_TX_BD | + TX_BD_FLAGS_LHINT_512_AND_SMALLER | + TX_BD_FLAGS_COAL_NOW | + TX_BD_FLAGS_PACKET_END | + (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); + + if (skb->ip_summed == CHECKSUM_PARTIAL) + tx_push1->tx_bd_hsize_lflags = + cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); + else + tx_push1->tx_bd_hsize_lflags = 0; + + tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); + tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action); + + skb_copy_from_linear_data(skb, pdata, len); + pdata += len; + for (j = 0; j < last_frag; j++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; + void *fptr; + + fptr = skb_frag_address_safe(frag); + if (!fptr) + goto normal_tx; + + memcpy(pdata, fptr, skb_frag_size(frag)); + pdata += skb_frag_size(frag); + } + + memcpy(txbd, tx_push, sizeof(*txbd)); + prod = NEXT_TX(prod); + txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; + memcpy(txbd, tx_push1, sizeof(*txbd)); + prod = NEXT_TX(prod); + push->doorbell = + cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); + txr->tx_prod = prod; + + netdev_tx_sent_queue(txq, skb->len); + + __iowrite64_copy(txr->tx_doorbell, push, + (length + sizeof(*push) + 8) / 8); + + tx_buf->is_push = 1; + + goto tx_done; + } + +normal_tx: + if (length < BNXT_MIN_PKT_SIZE) { + pad = BNXT_MIN_PKT_SIZE - length; + if (skb_pad(skb, pad)) { + /* SKB already freed. */ + tx_buf->skb = NULL; + return NETDEV_TX_OK; + } + length = BNXT_MIN_PKT_SIZE; + } + + mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(&pdev->dev, mapping))) { + dev_kfree_skb_any(skb); + tx_buf->skb = NULL; + return NETDEV_TX_OK; + } + + dma_unmap_addr_set(tx_buf, mapping, mapping); + flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | + ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT); + + txbd->tx_bd_haddr = cpu_to_le64(mapping); + + prod = NEXT_TX(prod); + txbd1 = (struct tx_bd_ext *) + &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; + + txbd1->tx_bd_hsize_lflags = 0; + if (skb_is_gso(skb)) { + u32 hdr_len; + + if (skb->encapsulation) + hdr_len = skb_inner_network_offset(skb) + + skb_inner_network_header_len(skb) + + inner_tcp_hdrlen(skb); + else + hdr_len = skb_transport_offset(skb) + + tcp_hdrlen(skb); + + txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO | + TX_BD_FLAGS_T_IPID | + (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); + length = skb_shinfo(skb)->gso_size; + txbd1->tx_bd_mss = cpu_to_le32(length); + length += hdr_len; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + txbd1->tx_bd_hsize_lflags = + cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); + txbd1->tx_bd_mss = 0; + } + + length >>= 9; + flags |= bnxt_lhint_arr[length]; + txbd->tx_bd_len_flags_type = cpu_to_le32(flags); + + txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); + txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action); + for (i = 0; i < last_frag; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + prod = NEXT_TX(prod); + txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; + + len = skb_frag_size(frag); + mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, + DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(&pdev->dev, mapping))) + goto tx_dma_error; + + tx_buf = &txr->tx_buf_ring[prod]; + dma_unmap_addr_set(tx_buf, mapping, mapping); + + txbd->tx_bd_haddr = cpu_to_le64(mapping); + + flags = len << TX_BD_LEN_SHIFT; + txbd->tx_bd_len_flags_type = cpu_to_le32(flags); + } + + flags &= ~TX_BD_LEN; + txbd->tx_bd_len_flags_type = + cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags | + TX_BD_FLAGS_PACKET_END); + + netdev_tx_sent_queue(txq, skb->len); + + /* Sync BD data before updating doorbell */ + wmb(); + + prod = NEXT_TX(prod); + txr->tx_prod = prod; + + writel(DB_KEY_TX | prod, txr->tx_doorbell); + writel(DB_KEY_TX | prod, txr->tx_doorbell); + +tx_done: + + mmiowb(); + + if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { + netif_tx_stop_queue(txq); + + /* netif_tx_stop_queue() must be done before checking + * tx index in bnxt_tx_avail() below, because in + * bnxt_tx_int(), we update tx index before checking for + * netif_tx_queue_stopped(). + */ + smp_mb(); + if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) + netif_tx_wake_queue(txq); + } + return NETDEV_TX_OK; + +tx_dma_error: + last_frag = i; + + /* start back at beginning and unmap skb */ + prod = txr->tx_prod; + tx_buf = &txr->tx_buf_ring[prod]; + tx_buf->skb = NULL; + dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), + skb_headlen(skb), PCI_DMA_TODEVICE); + prod = NEXT_TX(prod); + + /* unmap remaining mapped pages */ + for (i = 0; i < last_frag; i++) { + prod = NEXT_TX(prod); + tx_buf = &txr->tx_buf_ring[prod]; + dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), + skb_frag_size(&skb_shinfo(skb)->frags[i]), + PCI_DMA_TODEVICE); + } + + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) +{ + struct bnxt_tx_ring_info *txr = &bnapi->tx_ring; + int index = bnapi->index; + struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index); + u16 cons = txr->tx_cons; + struct pci_dev *pdev = bp->pdev; + int i; + unsigned int tx_bytes = 0; + + for (i = 0; i < nr_pkts; i++) { + struct bnxt_sw_tx_bd *tx_buf; + struct sk_buff *skb; + int j, last; + + tx_buf = &txr->tx_buf_ring[cons]; + cons = NEXT_TX(cons); + skb = tx_buf->skb; + tx_buf->skb = NULL; + + if (tx_buf->is_push) { + tx_buf->is_push = 0; + goto next_tx_int; + } + + dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), + skb_headlen(skb), PCI_DMA_TODEVICE); + last = tx_buf->nr_frags; + + for (j = 0; j < last; j++) { + cons = NEXT_TX(cons); + tx_buf = &txr->tx_buf_ring[cons]; + dma_unmap_page( + &pdev->dev, + dma_unmap_addr(tx_buf, mapping), + skb_frag_size(&skb_shinfo(skb)->frags[j]), + PCI_DMA_TODEVICE); + } + +next_tx_int: + cons = NEXT_TX(cons); + + tx_bytes += skb->len; + dev_kfree_skb_any(skb); + } + + netdev_tx_completed_queue(txq, nr_pkts, tx_bytes); + txr->tx_cons = cons; + + /* Need to make the tx_cons update visible to bnxt_start_xmit() + * before checking for netif_tx_queue_stopped(). Without the + * memory barrier, there is a small possibility that bnxt_start_xmit() + * will miss it and cause the queue to be stopped forever. + */ + smp_mb(); + + if (unlikely(netif_tx_queue_stopped(txq)) && + (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) { + __netif_tx_lock(txq, smp_processor_id()); + if (netif_tx_queue_stopped(txq) && + bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh && + txr->dev_state != BNXT_DEV_STATE_CLOSING) + netif_tx_wake_queue(txq); + __netif_tx_unlock(txq); + } +} + +static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, + gfp_t gfp) +{ + u8 *data; + struct pci_dev *pdev = bp->pdev; + + data = kmalloc(bp->rx_buf_size, gfp); + if (!data) + return NULL; + + *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET, + bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); + + if (dma_mapping_error(&pdev->dev, *mapping)) { + kfree(data); + data = NULL; + } + return data; +} + +static inline int bnxt_alloc_rx_data(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + u16 prod, gfp_t gfp) +{ + struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; + struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod]; + u8 *data; + dma_addr_t mapping; + + data = __bnxt_alloc_rx_data(bp, &mapping, gfp); + if (!data) + return -ENOMEM; + + rx_buf->data = data; + dma_unmap_addr_set(rx_buf, mapping, mapping); + + rxbd->rx_bd_haddr = cpu_to_le64(mapping); + + return 0; +} + +static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, + u8 *data) +{ + u16 prod = rxr->rx_prod; + struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; + struct rx_bd *cons_bd, *prod_bd; + + prod_rx_buf = &rxr->rx_buf_ring[prod]; + cons_rx_buf = &rxr->rx_buf_ring[cons]; + + prod_rx_buf->data = data; + + dma_unmap_addr_set(prod_rx_buf, mapping, + dma_unmap_addr(cons_rx_buf, mapping)); + + prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; + cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; + + prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; +} + +static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) +{ + u16 next, max = rxr->rx_agg_bmap_size; + + next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx); + if (next >= max) + next = find_first_zero_bit(rxr->rx_agg_bmap, max); + return next; +} + +static inline int bnxt_alloc_rx_page(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + u16 prod, gfp_t gfp) +{ + struct rx_bd *rxbd = + &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; + struct bnxt_sw_rx_agg_bd *rx_agg_buf; + struct pci_dev *pdev = bp->pdev; + struct page *page; + dma_addr_t mapping; + u16 sw_prod = rxr->rx_sw_agg_prod; + + page = alloc_page(gfp); + if (!page) + return -ENOMEM; + + mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&pdev->dev, mapping)) { + __free_page(page); + return -EIO; + } + + if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) + sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); + + __set_bit(sw_prod, rxr->rx_agg_bmap); + rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; + rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod); + + rx_agg_buf->page = page; + rx_agg_buf->mapping = mapping; + rxbd->rx_bd_haddr = cpu_to_le64(mapping); + rxbd->rx_bd_opaque = sw_prod; + return 0; +} + +static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons, + u32 agg_bufs) +{ + struct bnxt *bp = bnapi->bp; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + u16 prod = rxr->rx_agg_prod; + u16 sw_prod = rxr->rx_sw_agg_prod; + u32 i; + + for (i = 0; i < agg_bufs; i++) { + u16 cons; + struct rx_agg_cmp *agg; + struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; + struct rx_bd *prod_bd; + struct page *page; + + agg = (struct rx_agg_cmp *) + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + cons = agg->rx_agg_cmp_opaque; + __clear_bit(cons, rxr->rx_agg_bmap); + + if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) + sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); + + __set_bit(sw_prod, rxr->rx_agg_bmap); + prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; + cons_rx_buf = &rxr->rx_agg_ring[cons]; + + /* It is possible for sw_prod to be equal to cons, so + * set cons_rx_buf->page to NULL first. + */ + page = cons_rx_buf->page; + cons_rx_buf->page = NULL; + prod_rx_buf->page = page; + + prod_rx_buf->mapping = cons_rx_buf->mapping; + + prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; + + prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); + prod_bd->rx_bd_opaque = sw_prod; + + prod = NEXT_RX_AGG(prod); + sw_prod = NEXT_RX_AGG(sw_prod); + cp_cons = NEXT_CMP(cp_cons); + } + rxr->rx_agg_prod = prod; + rxr->rx_sw_agg_prod = sw_prod; +} + +static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, u16 cons, + u16 prod, u8 *data, dma_addr_t dma_addr, + unsigned int len) +{ + int err; + struct sk_buff *skb; + + err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); + if (unlikely(err)) { + bnxt_reuse_rx_data(rxr, cons, data); + return NULL; + } + + skb = build_skb(data, 0); + dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, + PCI_DMA_FROMDEVICE); + if (!skb) { + kfree(data); + return NULL; + } + + skb_reserve(skb, BNXT_RX_OFFSET); + skb_put(skb, len); + return skb; +} + +static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi, + struct sk_buff *skb, u16 cp_cons, + u32 agg_bufs) +{ + struct pci_dev *pdev = bp->pdev; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + u16 prod = rxr->rx_agg_prod; + u32 i; + + for (i = 0; i < agg_bufs; i++) { + u16 cons, frag_len; + struct rx_agg_cmp *agg; + struct bnxt_sw_rx_agg_bd *cons_rx_buf; + struct page *page; + dma_addr_t mapping; + + agg = (struct rx_agg_cmp *) + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + cons = agg->rx_agg_cmp_opaque; + frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & + RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; + + cons_rx_buf = &rxr->rx_agg_ring[cons]; + skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len); + __clear_bit(cons, rxr->rx_agg_bmap); + + /* It is possible for bnxt_alloc_rx_page() to allocate + * a sw_prod index that equals the cons index, so we + * need to clear the cons entry now. + */ + mapping = dma_unmap_addr(cons_rx_buf, mapping); + page = cons_rx_buf->page; + cons_rx_buf->page = NULL; + + if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { + struct skb_shared_info *shinfo; + unsigned int nr_frags; + + shinfo = skb_shinfo(skb); + nr_frags = --shinfo->nr_frags; + __skb_frag_set_page(&shinfo->frags[nr_frags], NULL); + + dev_kfree_skb(skb); + + cons_rx_buf->page = page; + + /* Update prod since possibly some pages have been + * allocated already. + */ + rxr->rx_agg_prod = prod; + bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i); + return NULL; + } + + dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + + skb->data_len += frag_len; + skb->len += frag_len; + skb->truesize += PAGE_SIZE; + + prod = NEXT_RX_AGG(prod); + cp_cons = NEXT_CMP(cp_cons); + } + rxr->rx_agg_prod = prod; + return skb; +} + +static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + u8 agg_bufs, u32 *raw_cons) +{ + u16 last; + struct rx_agg_cmp *agg; + + *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); + last = RING_CMP(*raw_cons); + agg = (struct rx_agg_cmp *) + &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; + return RX_AGG_CMP_VALID(agg, *raw_cons); +} + +static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, + unsigned int len, + dma_addr_t mapping) +{ + struct bnxt *bp = bnapi->bp; + struct pci_dev *pdev = bp->pdev; + struct sk_buff *skb; + + skb = napi_alloc_skb(&bnapi->napi, len); + if (!skb) + return NULL; + + dma_sync_single_for_cpu(&pdev->dev, mapping, + bp->rx_copy_thresh, PCI_DMA_FROMDEVICE); + + memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET); + + dma_sync_single_for_device(&pdev->dev, mapping, + bp->rx_copy_thresh, + PCI_DMA_FROMDEVICE); + + skb_put(skb, len); + return skb; +} + +static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, + struct rx_tpa_start_cmp *tpa_start, + struct rx_tpa_start_cmp_ext *tpa_start1) +{ + u8 agg_id = TPA_START_AGG_ID(tpa_start); + u16 cons, prod; + struct bnxt_tpa_info *tpa_info; + struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; + struct rx_bd *prod_bd; + dma_addr_t mapping; + + cons = tpa_start->rx_tpa_start_cmp_opaque; + prod = rxr->rx_prod; + cons_rx_buf = &rxr->rx_buf_ring[cons]; + prod_rx_buf = &rxr->rx_buf_ring[prod]; + tpa_info = &rxr->rx_tpa[agg_id]; + + prod_rx_buf->data = tpa_info->data; + + mapping = tpa_info->mapping; + dma_unmap_addr_set(prod_rx_buf, mapping, mapping); + + prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; + + prod_bd->rx_bd_haddr = cpu_to_le64(mapping); + + tpa_info->data = cons_rx_buf->data; + cons_rx_buf->data = NULL; + tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping); + + tpa_info->len = + le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> + RX_TPA_START_CMP_LEN_SHIFT; + if (likely(TPA_START_HASH_VALID(tpa_start))) { + u32 hash_type = TPA_START_HASH_TYPE(tpa_start); + + tpa_info->hash_type = PKT_HASH_TYPE_L4; + tpa_info->gso_type = SKB_GSO_TCPV4; + /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ + if (hash_type == 3) + tpa_info->gso_type = SKB_GSO_TCPV6; + tpa_info->rss_hash = + le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); + } else { + tpa_info->hash_type = PKT_HASH_TYPE_NONE; + tpa_info->gso_type = 0; + if (netif_msg_rx_err(bp)) + netdev_warn(bp->dev, "TPA packet without valid hash\n"); + } + tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); + tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); + + rxr->rx_prod = NEXT_RX(prod); + cons = NEXT_RX(cons); + cons_rx_buf = &rxr->rx_buf_ring[cons]; + + bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); + rxr->rx_prod = NEXT_RX(rxr->rx_prod); + cons_rx_buf->data = NULL; +} + +static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi, + u16 cp_cons, u32 agg_bufs) +{ + if (agg_bufs) + bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); +} + +#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) +#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) + +static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info, + struct rx_tpa_end_cmp *tpa_end, + struct rx_tpa_end_cmp_ext *tpa_end1, + struct sk_buff *skb) +{ +#ifdef CONFIG_INET + struct tcphdr *th; + int payload_off, tcp_opt_len = 0; + int len, nw_off; + + NAPI_GRO_CB(skb)->count = TPA_END_TPA_SEGS(tpa_end); + skb_shinfo(skb)->gso_size = + le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); + skb_shinfo(skb)->gso_type = tpa_info->gso_type; + payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & + RX_TPA_END_CMP_PAYLOAD_OFFSET) >> + RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT; + if (TPA_END_GRO_TS(tpa_end)) + tcp_opt_len = 12; + + if (tpa_info->gso_type == SKB_GSO_TCPV4) { + struct iphdr *iph; + + nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len - + ETH_HLEN; + skb_set_network_header(skb, nw_off); + iph = ip_hdr(skb); + skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); + len = skb->len - skb_transport_offset(skb); + th = tcp_hdr(skb); + th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); + } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { + struct ipv6hdr *iph; + + nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len - + ETH_HLEN; + skb_set_network_header(skb, nw_off); + iph = ipv6_hdr(skb); + skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); + len = skb->len - skb_transport_offset(skb); + th = tcp_hdr(skb); + th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); + } else { + dev_kfree_skb_any(skb); + return NULL; + } + tcp_gro_complete(skb); + + if (nw_off) { /* tunnel */ + struct udphdr *uh = NULL; + + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = (struct iphdr *)skb->data; + + if (iph->protocol == IPPROTO_UDP) + uh = (struct udphdr *)(iph + 1); + } else { + struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; + + if (iph->nexthdr == IPPROTO_UDP) + uh = (struct udphdr *)(iph + 1); + } + if (uh) { + if (uh->check) + skb_shinfo(skb)->gso_type |= + SKB_GSO_UDP_TUNNEL_CSUM; + else + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; + } + } +#endif + return skb; +} + +static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, + struct bnxt_napi *bnapi, + u32 *raw_cons, + struct rx_tpa_end_cmp *tpa_end, + struct rx_tpa_end_cmp_ext *tpa_end1, + bool *agg_event) +{ + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + u8 agg_id = TPA_END_AGG_ID(tpa_end); + u8 *data, agg_bufs; + u16 cp_cons = RING_CMP(*raw_cons); + unsigned int len; + struct bnxt_tpa_info *tpa_info; + dma_addr_t mapping; + struct sk_buff *skb; + + tpa_info = &rxr->rx_tpa[agg_id]; + data = tpa_info->data; + prefetch(data); + len = tpa_info->len; + mapping = tpa_info->mapping; + + agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & + RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT; + + if (agg_bufs) { + if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) + return ERR_PTR(-EBUSY); + + *agg_event = true; + cp_cons = NEXT_CMP(cp_cons); + } + + if (unlikely(agg_bufs > MAX_SKB_FRAGS)) { + bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); + netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", + agg_bufs, (int)MAX_SKB_FRAGS); + return NULL; + } + + if (len <= bp->rx_copy_thresh) { + skb = bnxt_copy_skb(bnapi, data, len, mapping); + if (!skb) { + bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); + return NULL; + } + } else { + u8 *new_data; + dma_addr_t new_mapping; + + new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); + if (!new_data) { + bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); + return NULL; + } + + tpa_info->data = new_data; + tpa_info->mapping = new_mapping; + + skb = build_skb(data, 0); + dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size, + PCI_DMA_FROMDEVICE); + + if (!skb) { + kfree(data); + bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); + return NULL; + } + skb_reserve(skb, BNXT_RX_OFFSET); + skb_put(skb, len); + } + + if (agg_bufs) { + skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs); + if (!skb) { + /* Page reuse already handled by bnxt_rx_pages(). */ + return NULL; + } + } + skb->protocol = eth_type_trans(skb, bp->dev); + + if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) + skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); + + if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) { + netdev_features_t features = skb->dev->features; + u16 vlan_proto = tpa_info->metadata >> + RX_CMP_FLAGS2_METADATA_TPID_SFT; + + if (((features & NETIF_F_HW_VLAN_CTAG_RX) && + vlan_proto == ETH_P_8021Q) || + ((features & NETIF_F_HW_VLAN_STAG_RX) && + vlan_proto == ETH_P_8021AD)) { + __vlan_hwaccel_put_tag(skb, htons(vlan_proto), + tpa_info->metadata & + RX_CMP_FLAGS2_METADATA_VID_MASK); + } + } + + skb_checksum_none_assert(skb); + if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = + (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; + } + + if (TPA_END_GRO(tpa_end)) + skb = bnxt_gro_skb(tpa_info, tpa_end, tpa_end1, skb); + + return skb; +} + +/* returns the following: + * 1 - 1 packet successfully received + * 0 - successful TPA_START, packet not completed yet + * -EBUSY - completion ring does not have all the agg buffers yet + * -ENOMEM - packet aborted due to out of memory + * -EIO - packet aborted due to hw error indicated in BD + */ +static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, + bool *agg_event) +{ + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + struct net_device *dev = bp->dev; + struct rx_cmp *rxcmp; + struct rx_cmp_ext *rxcmp1; + u32 tmp_raw_cons = *raw_cons; + u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons); + struct bnxt_sw_rx_bd *rx_buf; + unsigned int len; + u8 *data, agg_bufs, cmp_type; + dma_addr_t dma_addr; + struct sk_buff *skb; + int rc = 0; + + rxcmp = (struct rx_cmp *) + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + + tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); + cp_cons = RING_CMP(tmp_raw_cons); + rxcmp1 = (struct rx_cmp_ext *) + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + + if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) + return -EBUSY; + + cmp_type = RX_CMP_TYPE(rxcmp); + + prod = rxr->rx_prod; + + if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) { + bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp, + (struct rx_tpa_start_cmp_ext *)rxcmp1); + + goto next_rx_no_prod; + + } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { + skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons, + (struct rx_tpa_end_cmp *)rxcmp, + (struct rx_tpa_end_cmp_ext *)rxcmp1, + agg_event); + + if (unlikely(IS_ERR(skb))) + return -EBUSY; + + rc = -ENOMEM; + if (likely(skb)) { + skb_record_rx_queue(skb, bnapi->index); + skb_mark_napi_id(skb, &bnapi->napi); + if (bnxt_busy_polling(bnapi)) + netif_receive_skb(skb); + else + napi_gro_receive(&bnapi->napi, skb); + rc = 1; + } + goto next_rx_no_prod; + } + + cons = rxcmp->rx_cmp_opaque; + rx_buf = &rxr->rx_buf_ring[cons]; + data = rx_buf->data; + prefetch(data); + + agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >> + RX_CMP_AGG_BUFS_SHIFT; + + if (agg_bufs) { + if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) + return -EBUSY; + + cp_cons = NEXT_CMP(cp_cons); + *agg_event = true; + } + + rx_buf->data = NULL; + if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { + bnxt_reuse_rx_data(rxr, cons, data); + if (agg_bufs) + bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); + + rc = -EIO; + goto next_rx; + } + + len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; + dma_addr = dma_unmap_addr(rx_buf, mapping); + + if (len <= bp->rx_copy_thresh) { + skb = bnxt_copy_skb(bnapi, data, len, dma_addr); + bnxt_reuse_rx_data(rxr, cons, data); + if (!skb) { + rc = -ENOMEM; + goto next_rx; + } + } else { + skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len); + if (!skb) { + rc = -ENOMEM; + goto next_rx; + } + } + + if (agg_bufs) { + skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs); + if (!skb) { + rc = -ENOMEM; + goto next_rx; + } + } + + if (RX_CMP_HASH_VALID(rxcmp)) { + u32 hash_type = RX_CMP_HASH_TYPE(rxcmp); + enum pkt_hash_types type = PKT_HASH_TYPE_L4; + + /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ + if (hash_type != 1 && hash_type != 3) + type = PKT_HASH_TYPE_L3; + skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); + } + + skb->protocol = eth_type_trans(skb, dev); + + if (rxcmp1->rx_cmp_flags2 & + cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) { + netdev_features_t features = skb->dev->features; + u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); + u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; + + if (((features & NETIF_F_HW_VLAN_CTAG_RX) && + vlan_proto == ETH_P_8021Q) || + ((features & NETIF_F_HW_VLAN_STAG_RX) && + vlan_proto == ETH_P_8021AD)) + __vlan_hwaccel_put_tag(skb, htons(vlan_proto), + meta_data & + RX_CMP_FLAGS2_METADATA_VID_MASK); + } + + skb_checksum_none_assert(skb); + if (RX_CMP_L4_CS_OK(rxcmp1)) { + if (dev->features & NETIF_F_RXCSUM) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = RX_CMP_ENCAP(rxcmp1); + } + } else { + if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) + cpr->rx_l4_csum_errors++; + } + + skb_record_rx_queue(skb, bnapi->index); + skb_mark_napi_id(skb, &bnapi->napi); + if (bnxt_busy_polling(bnapi)) + netif_receive_skb(skb); + else + napi_gro_receive(&bnapi->napi, skb); + rc = 1; + +next_rx: + rxr->rx_prod = NEXT_RX(prod); + +next_rx_no_prod: + *raw_cons = tmp_raw_cons; + + return rc; +} + +static int bnxt_async_event_process(struct bnxt *bp, + struct hwrm_async_event_cmpl *cmpl) +{ + u16 event_id = le16_to_cpu(cmpl->event_id); + + /* TODO CHIMP_FW: Define event id's for link change, error etc */ + switch (event_id) { + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: + set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); + schedule_work(&bp->sp_task); + break; + default: + netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n", + event_id); + break; + } + return 0; +} + +static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) +{ + u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id; + struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; + struct hwrm_fwd_req_cmpl *fwd_req_cmpl = + (struct hwrm_fwd_req_cmpl *)txcmp; + + switch (cmpl_type) { + case CMPL_BASE_TYPE_HWRM_DONE: + seq_id = le16_to_cpu(h_cmpl->sequence_id); + if (seq_id == bp->hwrm_intr_seq_id) + bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID; + else + netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); + break; + + case CMPL_BASE_TYPE_HWRM_FWD_REQ: + vf_id = le16_to_cpu(fwd_req_cmpl->source_id); + + if ((vf_id < bp->pf.first_vf_id) || + (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { + netdev_err(bp->dev, "Msg contains invalid VF id %x\n", + vf_id); + return -EINVAL; + } + + set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); + set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); + schedule_work(&bp->sp_task); + break; + + case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: + bnxt_async_event_process(bp, + (struct hwrm_async_event_cmpl *)txcmp); + + default: + break; + } + + return 0; +} + +static irqreturn_t bnxt_msix(int irq, void *dev_instance) +{ + struct bnxt_napi *bnapi = dev_instance; + struct bnxt *bp = bnapi->bp; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + u32 cons = RING_CMP(cpr->cp_raw_cons); + + prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); + napi_schedule(&bnapi->napi); + return IRQ_HANDLED; +} + +static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) +{ + u32 raw_cons = cpr->cp_raw_cons; + u16 cons = RING_CMP(raw_cons); + struct tx_cmp *txcmp; + + txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; + + return TX_CMP_VALID(txcmp, raw_cons); +} + +#define CAG_LEGACY_INT_STATUS 0x2014 + +static irqreturn_t bnxt_inta(int irq, void *dev_instance) +{ + struct bnxt_napi *bnapi = dev_instance; + struct bnxt *bp = bnapi->bp; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + u32 cons = RING_CMP(cpr->cp_raw_cons); + u32 int_status; + + prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); + + if (!bnxt_has_work(bp, cpr)) { + int_status = readl(bp->bar0 + CAG_LEGACY_INT_STATUS); + /* return if erroneous interrupt */ + if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id))) + return IRQ_NONE; + } + + /* disable ring IRQ */ + BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell); + + /* Return here if interrupt is shared and is disabled. */ + if (unlikely(atomic_read(&bp->intr_sem) != 0)) + return IRQ_HANDLED; + + napi_schedule(&bnapi->napi); + return IRQ_HANDLED; +} + +static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) +{ + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + u32 raw_cons = cpr->cp_raw_cons; + u32 cons; + int tx_pkts = 0; + int rx_pkts = 0; + bool rx_event = false; + bool agg_event = false; + struct tx_cmp *txcmp; + + while (1) { + int rc; + + cons = RING_CMP(raw_cons); + txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; + + if (!TX_CMP_VALID(txcmp, raw_cons)) + break; + + if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { + tx_pkts++; + /* return full budget so NAPI will complete. */ + if (unlikely(tx_pkts > bp->tx_wake_thresh)) + rx_pkts = budget; + } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { + rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event); + if (likely(rc >= 0)) + rx_pkts += rc; + else if (rc == -EBUSY) /* partial completion */ + break; + rx_event = true; + } else if (unlikely((TX_CMP_TYPE(txcmp) == + CMPL_BASE_TYPE_HWRM_DONE) || + (TX_CMP_TYPE(txcmp) == + CMPL_BASE_TYPE_HWRM_FWD_REQ) || + (TX_CMP_TYPE(txcmp) == + CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) { + bnxt_hwrm_handler(bp, txcmp); + } + raw_cons = NEXT_RAW_CMP(raw_cons); + + if (rx_pkts == budget) + break; + } + + cpr->cp_raw_cons = raw_cons; + /* ACK completion ring before freeing tx ring and producing new + * buffers in rx/agg rings to prevent overflowing the completion + * ring. + */ + BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); + + if (tx_pkts) + bnxt_tx_int(bp, bnapi, tx_pkts); + + if (rx_event) { + struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + + writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); + writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); + if (agg_event) { + writel(DB_KEY_RX | rxr->rx_agg_prod, + rxr->rx_agg_doorbell); + writel(DB_KEY_RX | rxr->rx_agg_prod, + rxr->rx_agg_doorbell); + } + } + return rx_pkts; +} + +static int bnxt_poll(struct napi_struct *napi, int budget) +{ + struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); + struct bnxt *bp = bnapi->bp; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + int work_done = 0; + + if (!bnxt_lock_napi(bnapi)) + return budget; + + while (1) { + work_done += bnxt_poll_work(bp, bnapi, budget - work_done); + + if (work_done >= budget) + break; + + if (!bnxt_has_work(bp, cpr)) { + napi_complete(napi); + BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); + break; + } + } + mmiowb(); + bnxt_unlock_napi(bnapi); + return work_done; +} + +#ifdef CONFIG_NET_RX_BUSY_POLL +static int bnxt_busy_poll(struct napi_struct *napi) +{ + struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); + struct bnxt *bp = bnapi->bp; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + int rx_work, budget = 4; + + if (atomic_read(&bp->intr_sem) != 0) + return LL_FLUSH_FAILED; + + if (!bnxt_lock_poll(bnapi)) + return LL_FLUSH_BUSY; + + rx_work = bnxt_poll_work(bp, bnapi, budget); + + BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); + + bnxt_unlock_poll(bnapi); + return rx_work; +} +#endif + +static void bnxt_free_tx_skbs(struct bnxt *bp) +{ + int i, max_idx; + struct pci_dev *pdev = bp->pdev; + + if (!bp->bnapi) + return; + + max_idx = bp->tx_nr_pages * TX_DESC_CNT; + for (i = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_tx_ring_info *txr; + int j; + + if (!bnapi) + continue; + + txr = &bnapi->tx_ring; + for (j = 0; j < max_idx;) { + struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; + struct sk_buff *skb = tx_buf->skb; + int k, last; + + if (!skb) { + j++; + continue; + } + + tx_buf->skb = NULL; + + if (tx_buf->is_push) { + dev_kfree_skb(skb); + j += 2; + continue; + } + + dma_unmap_single(&pdev->dev, + dma_unmap_addr(tx_buf, mapping), + skb_headlen(skb), + PCI_DMA_TODEVICE); + + last = tx_buf->nr_frags; + j += 2; + for (k = 0; k < last; k++, j = NEXT_TX(j)) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; + + tx_buf = &txr->tx_buf_ring[j]; + dma_unmap_page( + &pdev->dev, + dma_unmap_addr(tx_buf, mapping), + skb_frag_size(frag), PCI_DMA_TODEVICE); + } + dev_kfree_skb(skb); + } + netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); + } +} + +static void bnxt_free_rx_skbs(struct bnxt *bp) +{ + int i, max_idx, max_agg_idx; + struct pci_dev *pdev = bp->pdev; + + if (!bp->bnapi) + return; + + max_idx = bp->rx_nr_pages * RX_DESC_CNT; + max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_rx_ring_info *rxr; + int j; + + if (!bnapi) + continue; + + rxr = &bnapi->rx_ring; + + if (rxr->rx_tpa) { + for (j = 0; j < MAX_TPA; j++) { + struct bnxt_tpa_info *tpa_info = + &rxr->rx_tpa[j]; + u8 *data = tpa_info->data; + + if (!data) + continue; + + dma_unmap_single( + &pdev->dev, + dma_unmap_addr(tpa_info, mapping), + bp->rx_buf_use_size, + PCI_DMA_FROMDEVICE); + + tpa_info->data = NULL; + + kfree(data); + } + } + + for (j = 0; j < max_idx; j++) { + struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; + u8 *data = rx_buf->data; + + if (!data) + continue; + + dma_unmap_single(&pdev->dev, + dma_unmap_addr(rx_buf, mapping), + bp->rx_buf_use_size, + PCI_DMA_FROMDEVICE); + + rx_buf->data = NULL; + + kfree(data); + } + + for (j = 0; j < max_agg_idx; j++) { + struct bnxt_sw_rx_agg_bd *rx_agg_buf = + &rxr->rx_agg_ring[j]; + struct page *page = rx_agg_buf->page; + + if (!page) + continue; + + dma_unmap_page(&pdev->dev, + dma_unmap_addr(rx_agg_buf, mapping), + PAGE_SIZE, PCI_DMA_FROMDEVICE); + + rx_agg_buf->page = NULL; + __clear_bit(j, rxr->rx_agg_bmap); + + __free_page(page); + } + } +} + +static void bnxt_free_skbs(struct bnxt *bp) +{ + bnxt_free_tx_skbs(bp); + bnxt_free_rx_skbs(bp); +} + +static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) +{ + struct pci_dev *pdev = bp->pdev; + int i; + + for (i = 0; i < ring->nr_pages; i++) { + if (!ring->pg_arr[i]) + continue; + + dma_free_coherent(&pdev->dev, ring->page_size, + ring->pg_arr[i], ring->dma_arr[i]); + + ring->pg_arr[i] = NULL; + } + if (ring->pg_tbl) { + dma_free_coherent(&pdev->dev, ring->nr_pages * 8, + ring->pg_tbl, ring->pg_tbl_map); + ring->pg_tbl = NULL; + } + if (ring->vmem_size && *ring->vmem) { + vfree(*ring->vmem); + *ring->vmem = NULL; + } +} + +static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) +{ + int i; + struct pci_dev *pdev = bp->pdev; + + if (ring->nr_pages > 1) { + ring->pg_tbl = dma_alloc_coherent(&pdev->dev, + ring->nr_pages * 8, + &ring->pg_tbl_map, + GFP_KERNEL); + if (!ring->pg_tbl) + return -ENOMEM; + } + + for (i = 0; i < ring->nr_pages; i++) { + ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev, + ring->page_size, + &ring->dma_arr[i], + GFP_KERNEL); + if (!ring->pg_arr[i]) + return -ENOMEM; + + if (ring->nr_pages > 1) + ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]); + } + + if (ring->vmem_size) { + *ring->vmem = vzalloc(ring->vmem_size); + if (!(*ring->vmem)) + return -ENOMEM; + } + return 0; +} + +static void bnxt_free_rx_rings(struct bnxt *bp) +{ + int i; + + if (!bp->bnapi) + return; + + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_rx_ring_info *rxr; + struct bnxt_ring_struct *ring; + + if (!bnapi) + continue; + + rxr = &bnapi->rx_ring; + + kfree(rxr->rx_tpa); + rxr->rx_tpa = NULL; + + kfree(rxr->rx_agg_bmap); + rxr->rx_agg_bmap = NULL; + + ring = &rxr->rx_ring_struct; + bnxt_free_ring(bp, ring); + + ring = &rxr->rx_agg_ring_struct; + bnxt_free_ring(bp, ring); + } +} + +static int bnxt_alloc_rx_rings(struct bnxt *bp) +{ + int i, rc, agg_rings = 0, tpa_rings = 0; + + if (bp->flags & BNXT_FLAG_AGG_RINGS) + agg_rings = 1; + + if (bp->flags & BNXT_FLAG_TPA) + tpa_rings = 1; + + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_rx_ring_info *rxr; + struct bnxt_ring_struct *ring; + + if (!bnapi) + continue; + + rxr = &bnapi->rx_ring; + ring = &rxr->rx_ring_struct; + + rc = bnxt_alloc_ring(bp, ring); + if (rc) + return rc; + + if (agg_rings) { + u16 mem_size; + + ring = &rxr->rx_agg_ring_struct; + rc = bnxt_alloc_ring(bp, ring); + if (rc) + return rc; + + rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; + mem_size = rxr->rx_agg_bmap_size / 8; + rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); + if (!rxr->rx_agg_bmap) + return -ENOMEM; + + if (tpa_rings) { + rxr->rx_tpa = kcalloc(MAX_TPA, + sizeof(struct bnxt_tpa_info), + GFP_KERNEL); + if (!rxr->rx_tpa) + return -ENOMEM; + } + } + } + return 0; +} + +static void bnxt_free_tx_rings(struct bnxt *bp) +{ + int i; + struct pci_dev *pdev = bp->pdev; + + if (!bp->bnapi) + return; + + for (i = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_tx_ring_info *txr; + struct bnxt_ring_struct *ring; + + if (!bnapi) + continue; + + txr = &bnapi->tx_ring; + + if (txr->tx_push) { + dma_free_coherent(&pdev->dev, bp->tx_push_size, + txr->tx_push, txr->tx_push_mapping); + txr->tx_push = NULL; + } + + ring = &txr->tx_ring_struct; + + bnxt_free_ring(bp, ring); + } +} + +static int bnxt_alloc_tx_rings(struct bnxt *bp) +{ + int i, j, rc; + struct pci_dev *pdev = bp->pdev; + + bp->tx_push_size = 0; + if (bp->tx_push_thresh) { + int push_size; + + push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + + bp->tx_push_thresh); + + if (push_size > 128) { + push_size = 0; + bp->tx_push_thresh = 0; + } + + bp->tx_push_size = push_size; + } + + for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_tx_ring_info *txr; + struct bnxt_ring_struct *ring; + + if (!bnapi) + continue; + + txr = &bnapi->tx_ring; + ring = &txr->tx_ring_struct; + + rc = bnxt_alloc_ring(bp, ring); + if (rc) + return rc; + + if (bp->tx_push_size) { + struct tx_bd *txbd; + dma_addr_t mapping; + + /* One pre-allocated DMA buffer to backup + * TX push operation + */ + txr->tx_push = dma_alloc_coherent(&pdev->dev, + bp->tx_push_size, + &txr->tx_push_mapping, + GFP_KERNEL); + + if (!txr->tx_push) + return -ENOMEM; + + txbd = &txr->tx_push->txbd1; + + mapping = txr->tx_push_mapping + + sizeof(struct tx_push_bd); + txbd->tx_bd_haddr = cpu_to_le64(mapping); + + memset(txbd + 1, 0, sizeof(struct tx_bd_ext)); + } + ring->queue_id = bp->q_info[j].queue_id; + if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) + j++; + } + return 0; +} + +static void bnxt_free_cp_rings(struct bnxt *bp) +{ + int i; + + if (!bp->bnapi) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr; + struct bnxt_ring_struct *ring; + + if (!bnapi) + continue; + + cpr = &bnapi->cp_ring; + ring = &cpr->cp_ring_struct; + + bnxt_free_ring(bp, ring); + } +} + +static int bnxt_alloc_cp_rings(struct bnxt *bp) +{ + int i, rc; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr; + struct bnxt_ring_struct *ring; + + if (!bnapi) + continue; + + cpr = &bnapi->cp_ring; + ring = &cpr->cp_ring_struct; + + rc = bnxt_alloc_ring(bp, ring); + if (rc) + return rc; + } + return 0; +} + +static void bnxt_init_ring_struct(struct bnxt *bp) +{ + int i; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr; + struct bnxt_rx_ring_info *rxr; + struct bnxt_tx_ring_info *txr; + struct bnxt_ring_struct *ring; + + if (!bnapi) + continue; + + cpr = &bnapi->cp_ring; + ring = &cpr->cp_ring_struct; + ring->nr_pages = bp->cp_nr_pages; + ring->page_size = HW_CMPD_RING_SIZE; + ring->pg_arr = (void **)cpr->cp_desc_ring; + ring->dma_arr = cpr->cp_desc_mapping; + ring->vmem_size = 0; + + rxr = &bnapi->rx_ring; + ring = &rxr->rx_ring_struct; + ring->nr_pages = bp->rx_nr_pages; + ring->page_size = HW_RXBD_RING_SIZE; + ring->pg_arr = (void **)rxr->rx_desc_ring; + ring->dma_arr = rxr->rx_desc_mapping; + ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; + ring->vmem = (void **)&rxr->rx_buf_ring; + + ring = &rxr->rx_agg_ring_struct; + ring->nr_pages = bp->rx_agg_nr_pages; + ring->page_size = HW_RXBD_RING_SIZE; + ring->pg_arr = (void **)rxr->rx_agg_desc_ring; + ring->dma_arr = rxr->rx_agg_desc_mapping; + ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; + ring->vmem = (void **)&rxr->rx_agg_ring; + + txr = &bnapi->tx_ring; + ring = &txr->tx_ring_struct; + ring->nr_pages = bp->tx_nr_pages; + ring->page_size = HW_RXBD_RING_SIZE; + ring->pg_arr = (void **)txr->tx_desc_ring; + ring->dma_arr = txr->tx_desc_mapping; + ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; + ring->vmem = (void **)&txr->tx_buf_ring; + } +} + +static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) +{ + int i; + u32 prod; + struct rx_bd **rx_buf_ring; + + rx_buf_ring = (struct rx_bd **)ring->pg_arr; + for (i = 0, prod = 0; i < ring->nr_pages; i++) { + int j; + struct rx_bd *rxbd; + + rxbd = rx_buf_ring[i]; + if (!rxbd) + continue; + + for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { + rxbd->rx_bd_len_flags_type = cpu_to_le32(type); + rxbd->rx_bd_opaque = prod; + } + } +} + +static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) +{ + struct net_device *dev = bp->dev; + struct bnxt_napi *bnapi = bp->bnapi[ring_nr]; + struct bnxt_rx_ring_info *rxr; + struct bnxt_ring_struct *ring; + u32 prod, type; + int i; + + if (!bnapi) + return -EINVAL; + + type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | + RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; + + if (NET_IP_ALIGN == 2) + type |= RX_BD_FLAGS_SOP; + + rxr = &bnapi->rx_ring; + ring = &rxr->rx_ring_struct; + bnxt_init_rxbd_pages(ring, type); + + prod = rxr->rx_prod; + for (i = 0; i < bp->rx_ring_size; i++) { + if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) { + netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", + ring_nr, i, bp->rx_ring_size); + break; + } + prod = NEXT_RX(prod); + } + rxr->rx_prod = prod; + ring->fw_ring_id = INVALID_HW_RING_ID; + + if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) + return 0; + + ring = &rxr->rx_agg_ring_struct; + + type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) | + RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; + + bnxt_init_rxbd_pages(ring, type); + + prod = rxr->rx_agg_prod; + for (i = 0; i < bp->rx_agg_ring_size; i++) { + if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) { + netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", + ring_nr, i, bp->rx_ring_size); + break; + } + prod = NEXT_RX_AGG(prod); + } + rxr->rx_agg_prod = prod; + ring->fw_ring_id = INVALID_HW_RING_ID; + + if (bp->flags & BNXT_FLAG_TPA) { + if (rxr->rx_tpa) { + u8 *data; + dma_addr_t mapping; + + for (i = 0; i < MAX_TPA; i++) { + data = __bnxt_alloc_rx_data(bp, &mapping, + GFP_KERNEL); + if (!data) + return -ENOMEM; + + rxr->rx_tpa[i].data = data; + rxr->rx_tpa[i].mapping = mapping; + } + } else { + netdev_err(bp->dev, "No resource allocated for LRO/GRO\n"); + return -ENOMEM; + } + } + + return 0; +} + +static int bnxt_init_rx_rings(struct bnxt *bp) +{ + int i, rc = 0; + + for (i = 0; i < bp->rx_nr_rings; i++) { + rc = bnxt_init_one_rx_ring(bp, i); + if (rc) + break; + } + + return rc; +} + +static int bnxt_init_tx_rings(struct bnxt *bp) +{ + u16 i; + + bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, + MAX_SKB_FRAGS + 1); + + for (i = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_tx_ring_info *txr = &bnapi->tx_ring; + struct bnxt_ring_struct *ring = &txr->tx_ring_struct; + + ring->fw_ring_id = INVALID_HW_RING_ID; + } + + return 0; +} + +static void bnxt_free_ring_grps(struct bnxt *bp) +{ + kfree(bp->grp_info); + bp->grp_info = NULL; +} + +static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) +{ + int i; + + if (irq_re_init) { + bp->grp_info = kcalloc(bp->cp_nr_rings, + sizeof(struct bnxt_ring_grp_info), + GFP_KERNEL); + if (!bp->grp_info) + return -ENOMEM; + } + for (i = 0; i < bp->cp_nr_rings; i++) { + if (irq_re_init) + bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; + bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; + bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; + } + return 0; +} + +static void bnxt_free_vnics(struct bnxt *bp) +{ + kfree(bp->vnic_info); + bp->vnic_info = NULL; + bp->nr_vnics = 0; +} + +static int bnxt_alloc_vnics(struct bnxt *bp) +{ + int num_vnics = 1; + +#ifdef CONFIG_RFS_ACCEL + if (bp->flags & BNXT_FLAG_RFS) + num_vnics += bp->rx_nr_rings; +#endif + + bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), + GFP_KERNEL); + if (!bp->vnic_info) + return -ENOMEM; + + bp->nr_vnics = num_vnics; + return 0; +} + +static void bnxt_init_vnics(struct bnxt *bp) +{ + int i; + + for (i = 0; i < bp->nr_vnics; i++) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + + vnic->fw_vnic_id = INVALID_HW_RING_ID; + vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID; + vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; + + if (bp->vnic_info[i].rss_hash_key) { + if (i == 0) + prandom_bytes(vnic->rss_hash_key, + HW_HASH_KEY_SIZE); + else + memcpy(vnic->rss_hash_key, + bp->vnic_info[0].rss_hash_key, + HW_HASH_KEY_SIZE); + } + } +} + +static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) +{ + int pages; + + pages = ring_size / desc_per_pg; + + if (!pages) + return 1; + + pages++; + + while (pages & (pages - 1)) + pages++; + + return pages; +} + +static void bnxt_set_tpa_flags(struct bnxt *bp) +{ + bp->flags &= ~BNXT_FLAG_TPA; + if (bp->dev->features & NETIF_F_LRO) + bp->flags |= BNXT_FLAG_LRO; + if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0)) + bp->flags |= BNXT_FLAG_GRO; +} + +/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must + * be set on entry. + */ +void bnxt_set_ring_params(struct bnxt *bp) +{ + u32 ring_size, rx_size, rx_space; + u32 agg_factor = 0, agg_ring_size = 0; + + /* 8 for CRC and VLAN */ + rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); + + rx_space = rx_size + NET_SKB_PAD + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; + ring_size = bp->rx_ring_size; + bp->rx_agg_ring_size = 0; + bp->rx_agg_nr_pages = 0; + + if (bp->flags & BNXT_FLAG_TPA) + agg_factor = 4; + + bp->flags &= ~BNXT_FLAG_JUMBO; + if (rx_space > PAGE_SIZE) { + u32 jumbo_factor; + + bp->flags |= BNXT_FLAG_JUMBO; + jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; + if (jumbo_factor > agg_factor) + agg_factor = jumbo_factor; + } + agg_ring_size = ring_size * agg_factor; + + if (agg_ring_size) { + bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, + RX_DESC_CNT); + if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { + u32 tmp = agg_ring_size; + + bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; + agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; + netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", + tmp, agg_ring_size); + } + bp->rx_agg_ring_size = agg_ring_size; + bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; + rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); + rx_space = rx_size + NET_SKB_PAD + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + } + + bp->rx_buf_use_size = rx_size; + bp->rx_buf_size = rx_space; + + bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); + bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; + + ring_size = bp->tx_ring_size; + bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); + bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; + + ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size; + bp->cp_ring_size = ring_size; + + bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); + if (bp->cp_nr_pages > MAX_CP_PAGES) { + bp->cp_nr_pages = MAX_CP_PAGES; + bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; + netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", + ring_size, bp->cp_ring_size); + } + bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; + bp->cp_ring_mask = bp->cp_bit - 1; +} + +static void bnxt_free_vnic_attributes(struct bnxt *bp) +{ + int i; + struct bnxt_vnic_info *vnic; + struct pci_dev *pdev = bp->pdev; + + if (!bp->vnic_info) + return; + + for (i = 0; i < bp->nr_vnics; i++) { + vnic = &bp->vnic_info[i]; + + kfree(vnic->fw_grp_ids); + vnic->fw_grp_ids = NULL; + + kfree(vnic->uc_list); + vnic->uc_list = NULL; + + if (vnic->mc_list) { + dma_free_coherent(&pdev->dev, vnic->mc_list_size, + vnic->mc_list, vnic->mc_list_mapping); + vnic->mc_list = NULL; + } + + if (vnic->rss_table) { + dma_free_coherent(&pdev->dev, PAGE_SIZE, + vnic->rss_table, + vnic->rss_table_dma_addr); + vnic->rss_table = NULL; + } + + vnic->rss_hash_key = NULL; + vnic->flags = 0; + } +} + +static int bnxt_alloc_vnic_attributes(struct bnxt *bp) +{ + int i, rc = 0, size; + struct bnxt_vnic_info *vnic; + struct pci_dev *pdev = bp->pdev; + int max_rings; + + for (i = 0; i < bp->nr_vnics; i++) { + vnic = &bp->vnic_info[i]; + + if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { + int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; + + if (mem_size > 0) { + vnic->uc_list = kmalloc(mem_size, GFP_KERNEL); + if (!vnic->uc_list) { + rc = -ENOMEM; + goto out; + } + } + } + + if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { + vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; + vnic->mc_list = + dma_alloc_coherent(&pdev->dev, + vnic->mc_list_size, + &vnic->mc_list_mapping, + GFP_KERNEL); + if (!vnic->mc_list) { + rc = -ENOMEM; + goto out; + } + } + + if (vnic->flags & BNXT_VNIC_RSS_FLAG) + max_rings = bp->rx_nr_rings; + else + max_rings = 1; + + vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL); + if (!vnic->fw_grp_ids) { + rc = -ENOMEM; + goto out; + } + + /* Allocate rss table and hash key */ + vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, + &vnic->rss_table_dma_addr, + GFP_KERNEL); + if (!vnic->rss_table) { + rc = -ENOMEM; + goto out; + } + + size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); + + vnic->rss_hash_key = ((void *)vnic->rss_table) + size; + vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; + } + return 0; + +out: + return rc; +} + +static void bnxt_free_hwrm_resources(struct bnxt *bp) +{ + struct pci_dev *pdev = bp->pdev; + + dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr, + bp->hwrm_cmd_resp_dma_addr); + + bp->hwrm_cmd_resp_addr = NULL; + if (bp->hwrm_dbg_resp_addr) { + dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE, + bp->hwrm_dbg_resp_addr, + bp->hwrm_dbg_resp_dma_addr); + + bp->hwrm_dbg_resp_addr = NULL; + } +} + +static int bnxt_alloc_hwrm_resources(struct bnxt *bp) +{ + struct pci_dev *pdev = bp->pdev; + + bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, + &bp->hwrm_cmd_resp_dma_addr, + GFP_KERNEL); + if (!bp->hwrm_cmd_resp_addr) + return -ENOMEM; + bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev, + HWRM_DBG_REG_BUF_SIZE, + &bp->hwrm_dbg_resp_dma_addr, + GFP_KERNEL); + if (!bp->hwrm_dbg_resp_addr) + netdev_warn(bp->dev, "fail to alloc debug register dma mem\n"); + + return 0; +} + +static void bnxt_free_stats(struct bnxt *bp) +{ + u32 size, i; + struct pci_dev *pdev = bp->pdev; + + if (!bp->bnapi) + return; + + size = sizeof(struct ctx_hw_stats); + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + + if (cpr->hw_stats) { + dma_free_coherent(&pdev->dev, size, cpr->hw_stats, + cpr->hw_stats_map); + cpr->hw_stats = NULL; + } + } +} + +static int bnxt_alloc_stats(struct bnxt *bp) +{ + u32 size, i; + struct pci_dev *pdev = bp->pdev; + + size = sizeof(struct ctx_hw_stats); + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + + cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size, + &cpr->hw_stats_map, + GFP_KERNEL); + if (!cpr->hw_stats) + return -ENOMEM; + + cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; + } + return 0; +} + +static void bnxt_clear_ring_indices(struct bnxt *bp) +{ + int i; + + if (!bp->bnapi) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr; + struct bnxt_rx_ring_info *rxr; + struct bnxt_tx_ring_info *txr; + + if (!bnapi) + continue; + + cpr = &bnapi->cp_ring; + cpr->cp_raw_cons = 0; + + txr = &bnapi->tx_ring; + txr->tx_prod = 0; + txr->tx_cons = 0; + + rxr = &bnapi->rx_ring; + rxr->rx_prod = 0; + rxr->rx_agg_prod = 0; + rxr->rx_sw_agg_prod = 0; + } +} + +static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit) +{ +#ifdef CONFIG_RFS_ACCEL + int i; + + /* Under rtnl_lock and all our NAPIs have been disabled. It's + * safe to delete the hash table. + */ + for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { + struct hlist_head *head; + struct hlist_node *tmp; + struct bnxt_ntuple_filter *fltr; + + head = &bp->ntp_fltr_hash_tbl[i]; + hlist_for_each_entry_safe(fltr, tmp, head, hash) { + hlist_del(&fltr->hash); + kfree(fltr); + } + } + if (irq_reinit) { + kfree(bp->ntp_fltr_bmap); + bp->ntp_fltr_bmap = NULL; + } + bp->ntp_fltr_count = 0; +#endif +} + +static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) +{ +#ifdef CONFIG_RFS_ACCEL + int i, rc = 0; + + if (!(bp->flags & BNXT_FLAG_RFS)) + return 0; + + for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) + INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); + + bp->ntp_fltr_count = 0; + bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR), + GFP_KERNEL); + + if (!bp->ntp_fltr_bmap) + rc = -ENOMEM; + + return rc; +#else + return 0; +#endif +} + +static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) +{ + bnxt_free_vnic_attributes(bp); + bnxt_free_tx_rings(bp); + bnxt_free_rx_rings(bp); + bnxt_free_cp_rings(bp); + bnxt_free_ntp_fltrs(bp, irq_re_init); + if (irq_re_init) { + bnxt_free_stats(bp); + bnxt_free_ring_grps(bp); + bnxt_free_vnics(bp); + kfree(bp->bnapi); + bp->bnapi = NULL; + } else { + bnxt_clear_ring_indices(bp); + } +} + +static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) +{ + int i, rc, size, arr_size; + void *bnapi; + + if (irq_re_init) { + /* Allocate bnapi mem pointer array and mem block for + * all queues + */ + arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) * + bp->cp_nr_rings); + size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi)); + bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); + if (!bnapi) + return -ENOMEM; + + bp->bnapi = bnapi; + bnapi += arr_size; + for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { + bp->bnapi[i] = bnapi; + bp->bnapi[i]->index = i; + bp->bnapi[i]->bp = bp; + } + + rc = bnxt_alloc_stats(bp); + if (rc) + goto alloc_mem_err; + + rc = bnxt_alloc_ntp_fltrs(bp); + if (rc) + goto alloc_mem_err; + + rc = bnxt_alloc_vnics(bp); + if (rc) + goto alloc_mem_err; + } + + bnxt_init_ring_struct(bp); + + rc = bnxt_alloc_rx_rings(bp); + if (rc) + goto alloc_mem_err; + + rc = bnxt_alloc_tx_rings(bp); + if (rc) + goto alloc_mem_err; + + rc = bnxt_alloc_cp_rings(bp); + if (rc) + goto alloc_mem_err; + + bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | + BNXT_VNIC_UCAST_FLAG; + rc = bnxt_alloc_vnic_attributes(bp); + if (rc) + goto alloc_mem_err; + return 0; + +alloc_mem_err: + bnxt_free_mem(bp, true); + return rc; +} + +void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, + u16 cmpl_ring, u16 target_id) +{ + struct hwrm_cmd_req_hdr *req = request; + + req->cmpl_ring_req_type = + cpu_to_le32(req_type | (cmpl_ring << HWRM_CMPL_RING_SFT)); + req->target_id_seq_id = cpu_to_le32(target_id << HWRM_TARGET_FID_SFT); + req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); +} + +int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) +{ + int i, intr_process, rc; + struct hwrm_cmd_req_hdr *req = msg; + u32 *data = msg; + __le32 *resp_len, *valid; + u16 cp_ring_id, len = 0; + struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; + + req->target_id_seq_id |= cpu_to_le32(bp->hwrm_cmd_seq++); + memset(resp, 0, PAGE_SIZE); + cp_ring_id = (le32_to_cpu(req->cmpl_ring_req_type) & + HWRM_CMPL_RING_MASK) >> + HWRM_CMPL_RING_SFT; + intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; + + /* Write request msg to hwrm channel */ + __iowrite32_copy(bp->bar0, data, msg_len / 4); + + /* currently supports only one outstanding message */ + if (intr_process) + bp->hwrm_intr_seq_id = le32_to_cpu(req->target_id_seq_id) & + HWRM_SEQ_ID_MASK; + + /* Ring channel doorbell */ + writel(1, bp->bar0 + 0x100); + + i = 0; + if (intr_process) { + /* Wait until hwrm response cmpl interrupt is processed */ + while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID && + i++ < timeout) { + usleep_range(600, 800); + } + + if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) { + netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", + req->cmpl_ring_req_type); + return -1; + } + } else { + /* Check if response len is updated */ + resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; + for (i = 0; i < timeout; i++) { + len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> + HWRM_RESP_LEN_SFT; + if (len) + break; + usleep_range(600, 800); + } + + if (i >= timeout) { + netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", + timeout, req->cmpl_ring_req_type, + req->target_id_seq_id, *resp_len); + return -1; + } + + /* Last word of resp contains valid bit */ + valid = bp->hwrm_cmd_resp_addr + len - 4; + for (i = 0; i < timeout; i++) { + if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK) + break; + usleep_range(600, 800); + } + + if (i >= timeout) { + netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", + timeout, req->cmpl_ring_req_type, + req->target_id_seq_id, len, *valid); + return -1; + } + } + + rc = le16_to_cpu(resp->error_code); + if (rc) { + netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", + le16_to_cpu(resp->req_type), + le16_to_cpu(resp->seq_id), rc); + return rc; + } + return 0; +} + +int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) +{ + int rc; + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, msg, msg_len, timeout); + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) +{ + struct hwrm_func_drv_rgtr_input req = {0}; + int i; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); + + req.enables = + cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | + FUNC_DRV_RGTR_REQ_ENABLES_VER | + FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); + + /* TODO: current async event fwd bits are not defined and the firmware + * only checks if it is non-zero to enable async event forwarding + */ + req.async_event_fwd[0] |= cpu_to_le32(1); + req.os_type = cpu_to_le16(1); + req.ver_maj = DRV_VER_MAJ; + req.ver_min = DRV_VER_MIN; + req.ver_upd = DRV_VER_UPD; + + if (BNXT_PF(bp)) { + unsigned long vf_req_snif_bmap[4]; + u32 *data = (u32 *)vf_req_snif_bmap; + + memset(vf_req_snif_bmap, 0, 32); + for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) + __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap); + + for (i = 0; i < 8; i++) { + req.vf_req_fwd[i] = cpu_to_le32(*data); + data++; + } + req.enables |= + cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); + } + + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) +{ + u32 rc = 0; + struct hwrm_tunnel_dst_port_free_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1); + req.tunnel_type = tunnel_type; + + switch (tunnel_type) { + case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: + req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id; + break; + case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: + req.tunnel_dst_port_id = bp->nge_fw_dst_port_id; + break; + default: + break; + } + + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", + rc); + return rc; +} + +static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, + u8 tunnel_type) +{ + u32 rc = 0; + struct hwrm_tunnel_dst_port_alloc_input req = {0}; + struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1); + + req.tunnel_type = tunnel_type; + req.tunnel_dst_port_val = port; + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) { + netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", + rc); + goto err_out; + } + + if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN) + bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; + + else if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE) + bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id; +err_out: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) +{ + struct hwrm_cfa_l2_set_rx_mask_input req = {0}; + struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1); + req.dflt_vnic_id = cpu_to_le32(vnic->fw_vnic_id); + + req.num_mc_entries = cpu_to_le32(vnic->mc_list_count); + req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); + req.mask = cpu_to_le32(vnic->rx_mask); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +#ifdef CONFIG_RFS_ACCEL +static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, + struct bnxt_ntuple_filter *fltr) +{ + struct hwrm_cfa_ntuple_filter_free_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1); + req.ntuple_filter_id = fltr->filter_id; + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +#define BNXT_NTP_FLTR_FLAGS \ + (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID) + +static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, + struct bnxt_ntuple_filter *fltr) +{ + int rc = 0; + struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; + struct hwrm_cfa_ntuple_filter_alloc_output *resp = + bp->hwrm_cmd_resp_addr; + struct flow_keys *keys = &fltr->fkeys; + struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1]; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); + req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[0]; + + req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); + + req.ethertype = htons(ETH_P_IP); + memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN); + req.ipaddr_type = 4; + req.ip_protocol = keys->basic.ip_proto; + + req.src_ipaddr[0] = keys->addrs.v4addrs.src; + req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); + req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; + req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); + + req.src_port = keys->ports.src; + req.src_port_mask = cpu_to_be16(0xffff); + req.dst_port = keys->ports.dst; + req.dst_port_mask = cpu_to_be16(0xffff); + + req.dst_vnic_id = cpu_to_le16(vnic->fw_vnic_id); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + fltr->filter_id = resp->ntuple_filter_id; + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} +#endif + +static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, + u8 *mac_addr) +{ + u32 rc = 0; + struct hwrm_cfa_l2_filter_alloc_input req = {0}; + struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1); + req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX | + CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); + req.dst_vnic_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); + req.enables = + cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | + CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID | + CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); + memcpy(req.l2_addr, mac_addr, ETH_ALEN); + req.l2_addr_mask[0] = 0xff; + req.l2_addr_mask[1] = 0xff; + req.l2_addr_mask[2] = 0xff; + req.l2_addr_mask[3] = 0xff; + req.l2_addr_mask[4] = 0xff; + req.l2_addr_mask[5] = 0xff; + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + bp->vnic_info[vnic_id].fw_l2_filter_id[idx] = + resp->l2_filter_id; + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) +{ + u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ + int rc = 0; + + /* Any associated ntuple filters will also be cleared by firmware. */ + mutex_lock(&bp->hwrm_cmd_lock); + for (i = 0; i < num_of_vnics; i++) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + + for (j = 0; j < vnic->uc_filter_count; j++) { + struct hwrm_cfa_l2_filter_free_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, + HWRM_CFA_L2_FILTER_FREE, -1, -1); + + req.l2_filter_id = vnic->fw_l2_filter_id[j]; + + rc = _hwrm_send_message(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); + } + vnic->uc_filter_count = 0; + } + mutex_unlock(&bp->hwrm_cmd_lock); + + return rc; +} + +static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) +{ + struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + struct hwrm_vnic_tpa_cfg_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); + + if (tpa_flags) { + u16 mss = bp->dev->mtu - 40; + u32 nsegs, n, segs = 0, flags; + + flags = VNIC_TPA_CFG_REQ_FLAGS_TPA | + VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA | + VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE | + VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN | + VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ; + if (tpa_flags & BNXT_FLAG_GRO) + flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; + + req.flags = cpu_to_le32(flags); + + req.enables = + cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | + VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS); + + /* Number of segs are log2 units, and first packet is not + * included as part of this units. + */ + if (mss <= PAGE_SIZE) { + n = PAGE_SIZE / mss; + nsegs = (MAX_SKB_FRAGS - 1) * n; + } else { + n = mss / PAGE_SIZE; + if (mss & (PAGE_SIZE - 1)) + n++; + nsegs = (MAX_SKB_FRAGS - n) / n; + } + + segs = ilog2(nsegs); + req.max_agg_segs = cpu_to_le16(segs); + req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX); + } + req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); + + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) +{ + u32 i, j, max_rings; + struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + struct hwrm_vnic_rss_cfg_input req = {0}; + + if (vnic->fw_rss_cos_lb_ctx == INVALID_HW_RING_ID) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); + if (set_rss) { + vnic->hash_type = BNXT_RSS_HASH_TYPE_FLAG_IPV4 | + BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 | + BNXT_RSS_HASH_TYPE_FLAG_IPV6 | + BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6; + + req.hash_type = cpu_to_le32(vnic->hash_type); + + if (vnic->flags & BNXT_VNIC_RSS_FLAG) + max_rings = bp->rx_nr_rings; + else + max_rings = 1; + + /* Fill the RSS indirection table with ring group ids */ + for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) { + if (j == max_rings) + j = 0; + vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); + } + + req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); + req.hash_key_tbl_addr = + cpu_to_le64(vnic->rss_hash_key_dma_addr); + } + req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) +{ + struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + struct hwrm_vnic_plcmodes_cfg_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1); + req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT | + VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | + VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); + req.enables = + cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID | + VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); + /* thresholds not implemented in firmware yet */ + req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); + req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh); + req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id) +{ + struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1); + req.rss_cos_lb_ctx_id = + cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx); + + hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID; +} + +static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) +{ + int i; + + for (i = 0; i < bp->nr_vnics; i++) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + + if (vnic->fw_rss_cos_lb_ctx != INVALID_HW_RING_ID) + bnxt_hwrm_vnic_ctx_free_one(bp, i); + } + bp->rsscos_nr_ctxs = 0; +} + +static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id) +{ + int rc; + struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0}; + struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = + bp->hwrm_cmd_resp_addr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1, + -1); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = + le16_to_cpu(resp->rss_cos_lb_ctx_id); + mutex_unlock(&bp->hwrm_cmd_lock); + + return rc; +} + +static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) +{ + int grp_idx = 0; + struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + struct hwrm_vnic_cfg_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); + /* Only RSS support for now TBD: COS & LB */ + req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP | + VNIC_CFG_REQ_ENABLES_RSS_RULE); + req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx); + req.cos_rule = cpu_to_le16(0xffff); + if (vnic->flags & BNXT_VNIC_RSS_FLAG) + grp_idx = 0; + else if (vnic->flags & BNXT_VNIC_RFS_FLAG) + grp_idx = vnic_id - 1; + + req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); + req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); + + req.lb_rule = cpu_to_le16(0xffff); + req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + + VLAN_HLEN); + + if (bp->flags & BNXT_FLAG_STRIP_VLAN) + req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); + + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) +{ + u32 rc = 0; + + if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { + struct hwrm_vnic_free_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1); + req.vnic_id = + cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); + + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + return rc; + bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; + } + return rc; +} + +static void bnxt_hwrm_vnic_free(struct bnxt *bp) +{ + u16 i; + + for (i = 0; i < bp->nr_vnics; i++) + bnxt_hwrm_vnic_free_one(bp, i); +} + +static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, u16 start_grp_id, + u16 end_grp_id) +{ + u32 rc = 0, i, j; + struct hwrm_vnic_alloc_input req = {0}; + struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; + + /* map ring groups to this vnic */ + for (i = start_grp_id, j = 0; i < end_grp_id; i++, j++) { + if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) { + netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", + j, (end_grp_id - start_grp_id)); + break; + } + bp->vnic_info[vnic_id].fw_grp_ids[j] = + bp->grp_info[i].fw_grp_id; + } + + bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID; + if (vnic_id == 0) + req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id); + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) +{ + u16 i; + u32 rc = 0; + + mutex_lock(&bp->hwrm_cmd_lock); + for (i = 0; i < bp->rx_nr_rings; i++) { + struct hwrm_ring_grp_alloc_input req = {0}; + struct hwrm_ring_grp_alloc_output *resp = + bp->hwrm_cmd_resp_addr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1); + + req.cr = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id); + req.rr = cpu_to_le16(bp->grp_info[i].rx_fw_ring_id); + req.ar = cpu_to_le16(bp->grp_info[i].agg_fw_ring_id); + req.sc = cpu_to_le16(bp->grp_info[i].fw_stats_ctx); + + rc = _hwrm_send_message(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); + if (rc) + break; + + bp->grp_info[i].fw_grp_id = le32_to_cpu(resp->ring_group_id); + } + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_ring_grp_free(struct bnxt *bp) +{ + u16 i; + u32 rc = 0; + struct hwrm_ring_grp_free_input req = {0}; + + if (!bp->grp_info) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1); + + mutex_lock(&bp->hwrm_cmd_lock); + for (i = 0; i < bp->cp_nr_rings; i++) { + if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) + continue; + req.ring_group_id = + cpu_to_le32(bp->grp_info[i].fw_grp_id); + + rc = _hwrm_send_message(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); + if (rc) + break; + bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; + } + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int hwrm_ring_alloc_send_msg(struct bnxt *bp, + struct bnxt_ring_struct *ring, + u32 ring_type, u32 map_index, + u32 stats_ctx_id) +{ + int rc = 0, err = 0; + struct hwrm_ring_alloc_input req = {0}; + struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; + u16 ring_id; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); + + req.enables = 0; + if (ring->nr_pages > 1) { + req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map); + /* Page size is in log2 units */ + req.page_size = BNXT_PAGE_SHIFT; + req.page_tbl_depth = 1; + } else { + req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]); + } + req.fbo = 0; + /* Association of ring index with doorbell index and MSIX number */ + req.logical_id = cpu_to_le16(map_index); + + switch (ring_type) { + case HWRM_RING_ALLOC_TX: + req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX; + /* Association of transmit ring with completion ring */ + req.cmpl_ring_id = + cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id); + req.length = cpu_to_le32(bp->tx_ring_mask + 1); + req.stat_ctx_id = cpu_to_le32(stats_ctx_id); + req.queue_id = cpu_to_le16(ring->queue_id); + break; + case HWRM_RING_ALLOC_RX: + req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; + req.length = cpu_to_le32(bp->rx_ring_mask + 1); + break; + case HWRM_RING_ALLOC_AGG: + req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; + req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); + break; + case HWRM_RING_ALLOC_CMPL: + req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL; + req.length = cpu_to_le32(bp->cp_ring_mask + 1); + if (bp->flags & BNXT_FLAG_USING_MSIX) + req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; + break; + default: + netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", + ring_type); + return -1; + } + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + err = le16_to_cpu(resp->error_code); + ring_id = le16_to_cpu(resp->ring_id); + mutex_unlock(&bp->hwrm_cmd_lock); + + if (rc || err) { + switch (ring_type) { + case RING_FREE_REQ_RING_TYPE_CMPL: + netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n", + rc, err); + return -1; + + case RING_FREE_REQ_RING_TYPE_RX: + netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n", + rc, err); + return -1; + + case RING_FREE_REQ_RING_TYPE_TX: + netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n", + rc, err); + return -1; + + default: + netdev_err(bp->dev, "Invalid ring\n"); + return -1; + } + } + ring->fw_ring_id = ring_id; + return rc; +} + +static int bnxt_hwrm_ring_alloc(struct bnxt *bp) +{ + int i, rc = 0; + + if (bp->cp_nr_rings) { + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + + rc = hwrm_ring_alloc_send_msg(bp, ring, + HWRM_RING_ALLOC_CMPL, i, + INVALID_STATS_CTX_ID); + if (rc) + goto err_out; + cpr->cp_doorbell = bp->bar1 + i * 0x80; + BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); + bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; + } + } + + if (bp->tx_nr_rings) { + for (i = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_tx_ring_info *txr = &bnapi->tx_ring; + struct bnxt_ring_struct *ring = &txr->tx_ring_struct; + u16 fw_stats_ctx = bp->grp_info[i].fw_stats_ctx; + + rc = hwrm_ring_alloc_send_msg(bp, ring, + HWRM_RING_ALLOC_TX, i, + fw_stats_ctx); + if (rc) + goto err_out; + txr->tx_doorbell = bp->bar1 + i * 0x80; + } + } + + if (bp->rx_nr_rings) { + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; + + rc = hwrm_ring_alloc_send_msg(bp, ring, + HWRM_RING_ALLOC_RX, i, + INVALID_STATS_CTX_ID); + if (rc) + goto err_out; + rxr->rx_doorbell = bp->bar1 + i * 0x80; + writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); + bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id; + } + } + + if (bp->flags & BNXT_FLAG_AGG_RINGS) { + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + struct bnxt_ring_struct *ring = + &rxr->rx_agg_ring_struct; + + rc = hwrm_ring_alloc_send_msg(bp, ring, + HWRM_RING_ALLOC_AGG, + bp->rx_nr_rings + i, + INVALID_STATS_CTX_ID); + if (rc) + goto err_out; + + rxr->rx_agg_doorbell = + bp->bar1 + (bp->rx_nr_rings + i) * 0x80; + writel(DB_KEY_RX | rxr->rx_agg_prod, + rxr->rx_agg_doorbell); + bp->grp_info[i].agg_fw_ring_id = ring->fw_ring_id; + } + } +err_out: + return rc; +} + +static int hwrm_ring_free_send_msg(struct bnxt *bp, + struct bnxt_ring_struct *ring, + u32 ring_type, int cmpl_ring_id) +{ + int rc; + struct hwrm_ring_free_input req = {0}; + struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; + u16 error_code; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, -1, -1); + req.ring_type = ring_type; + req.ring_id = cpu_to_le16(ring->fw_ring_id); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + error_code = le16_to_cpu(resp->error_code); + mutex_unlock(&bp->hwrm_cmd_lock); + + if (rc || error_code) { + switch (ring_type) { + case RING_FREE_REQ_RING_TYPE_CMPL: + netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n", + rc); + return rc; + case RING_FREE_REQ_RING_TYPE_RX: + netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n", + rc); + return rc; + case RING_FREE_REQ_RING_TYPE_TX: + netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n", + rc); + return rc; + default: + netdev_err(bp->dev, "Invalid ring\n"); + return -1; + } + } + return 0; +} + +static int bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) +{ + int i, rc = 0; + + if (!bp->bnapi) + return 0; + + if (bp->tx_nr_rings) { + for (i = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_tx_ring_info *txr = &bnapi->tx_ring; + struct bnxt_ring_struct *ring = &txr->tx_ring_struct; + u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + hwrm_ring_free_send_msg( + bp, ring, + RING_FREE_REQ_RING_TYPE_TX, + close_path ? cmpl_ring_id : + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + } + } + } + + if (bp->rx_nr_rings) { + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; + u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + hwrm_ring_free_send_msg( + bp, ring, + RING_FREE_REQ_RING_TYPE_RX, + close_path ? cmpl_ring_id : + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[i].rx_fw_ring_id = + INVALID_HW_RING_ID; + } + } + } + + if (bp->rx_agg_nr_pages) { + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring; + struct bnxt_ring_struct *ring = + &rxr->rx_agg_ring_struct; + u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + hwrm_ring_free_send_msg( + bp, ring, + RING_FREE_REQ_RING_TYPE_RX, + close_path ? cmpl_ring_id : + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[i].agg_fw_ring_id = + INVALID_HW_RING_ID; + } + } + } + + if (bp->cp_nr_rings) { + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + hwrm_ring_free_send_msg( + bp, ring, + RING_FREE_REQ_RING_TYPE_CMPL, + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[i].cp_fw_ring_id = + INVALID_HW_RING_ID; + } + } + } + + return rc; +} + +int bnxt_hwrm_set_coal(struct bnxt *bp) +{ + int i, rc = 0; + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0}; + u16 max_buf, max_buf_irq; + u16 buf_tmr, buf_tmr_irq; + u32 flags; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, + -1, -1); + + /* Each rx completion (2 records) should be DMAed immediately */ + max_buf = min_t(u16, bp->coal_bufs / 4, 2); + /* max_buf must not be zero */ + max_buf = clamp_t(u16, max_buf, 1, 63); + max_buf_irq = clamp_t(u16, bp->coal_bufs_irq, 1, 63); + buf_tmr = max_t(u16, bp->coal_ticks / 4, 1); + buf_tmr_irq = max_t(u16, bp->coal_ticks_irq, 1); + + flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; + + /* RING_IDLE generates more IRQs for lower latency. Enable it only + * if coal_ticks is less than 25 us. + */ + if (BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks) < 25) + flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; + + req.flags = cpu_to_le16(flags); + req.num_cmpl_dma_aggr = cpu_to_le16(max_buf); + req.num_cmpl_dma_aggr_during_int = cpu_to_le16(max_buf_irq); + req.cmpl_aggr_dma_tmr = cpu_to_le16(buf_tmr); + req.cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmr_irq); + req.int_lat_tmr_min = cpu_to_le16(buf_tmr); + req.int_lat_tmr_max = cpu_to_le16(bp->coal_ticks); + req.num_cmpl_aggr_int = cpu_to_le16(bp->coal_bufs); + + mutex_lock(&bp->hwrm_cmd_lock); + for (i = 0; i < bp->cp_nr_rings; i++) { + req.ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id); + + rc = _hwrm_send_message(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); + if (rc) + break; + } + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp) +{ + int rc = 0, i; + struct hwrm_stat_ctx_free_input req = {0}; + + if (!bp->bnapi) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); + + mutex_lock(&bp->hwrm_cmd_lock); + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + + if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { + req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); + + rc = _hwrm_send_message(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); + if (rc) + break; + + cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; + } + } + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) +{ + int rc = 0, i; + struct hwrm_stat_ctx_alloc_input req = {0}; + struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); + + req.update_period_ms = cpu_to_le32(1000); + + mutex_lock(&bp->hwrm_cmd_lock); + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + + req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map); + + rc = _hwrm_send_message(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); + if (rc) + break; + + cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); + + bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; + } + mutex_unlock(&bp->hwrm_cmd_lock); + return 0; +} + +static int bnxt_hwrm_func_qcaps(struct bnxt *bp) +{ + int rc = 0; + struct hwrm_func_qcaps_input req = {0}; + struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); + req.fid = cpu_to_le16(0xffff); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + goto hwrm_func_qcaps_exit; + + if (BNXT_PF(bp)) { + struct bnxt_pf_info *pf = &bp->pf; + + pf->fw_fid = le16_to_cpu(resp->fid); + pf->port_id = le16_to_cpu(resp->port_id); + memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN); + pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); + pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); + pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); + pf->max_pf_tx_rings = pf->max_tx_rings; + pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings); + pf->max_pf_rx_rings = pf->max_rx_rings; + pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); + pf->max_vnics = le16_to_cpu(resp->max_vnics); + pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); + pf->first_vf_id = le16_to_cpu(resp->first_vf_id); + pf->max_vfs = le16_to_cpu(resp->max_vfs); + pf->max_encap_records = le32_to_cpu(resp->max_encap_records); + pf->max_decap_records = le32_to_cpu(resp->max_decap_records); + pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); + pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); + pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); + pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); + } else { +#ifdef CONFIG_BNXT_SRIOV + struct bnxt_vf_info *vf = &bp->vf; + + vf->fw_fid = le16_to_cpu(resp->fid); + memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN); + if (!is_valid_ether_addr(vf->mac_addr)) + random_ether_addr(vf->mac_addr); + + vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); + vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); + vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); + vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings); + vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); + vf->max_vnics = le16_to_cpu(resp->max_vnics); + vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); +#endif + } + + bp->tx_push_thresh = 0; + if (resp->flags & + cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)) + bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; + +hwrm_func_qcaps_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_func_reset(struct bnxt *bp) +{ + struct hwrm_func_reset_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1); + req.enables = 0; + + return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT); +} + +static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) +{ + int rc = 0; + struct hwrm_queue_qportcfg_input req = {0}; + struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; + u8 i, *qptr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + goto qportcfg_exit; + + if (!resp->max_configurable_queues) { + rc = -EINVAL; + goto qportcfg_exit; + } + bp->max_tc = resp->max_configurable_queues; + if (bp->max_tc > BNXT_MAX_QUEUE) + bp->max_tc = BNXT_MAX_QUEUE; + + qptr = &resp->queue_id0; + for (i = 0; i < bp->max_tc; i++) { + bp->q_info[i].queue_id = *qptr++; + bp->q_info[i].queue_profile = *qptr++; + } + +qportcfg_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_ver_get(struct bnxt *bp) +{ + int rc; + struct hwrm_ver_get_input req = {0}; + struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); + req.hwrm_intf_maj = HWRM_VERSION_MAJOR; + req.hwrm_intf_min = HWRM_VERSION_MINOR; + req.hwrm_intf_upd = HWRM_VERSION_UPDATE; + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + goto hwrm_ver_get_exit; + + memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); + + if (req.hwrm_intf_maj != resp->hwrm_intf_maj || + req.hwrm_intf_min != resp->hwrm_intf_min || + req.hwrm_intf_upd != resp->hwrm_intf_upd) { + netdev_warn(bp->dev, "HWRM interface %d.%d.%d does not match driver interface %d.%d.%d.\n", + resp->hwrm_intf_maj, resp->hwrm_intf_min, + resp->hwrm_intf_upd, req.hwrm_intf_maj, + req.hwrm_intf_min, req.hwrm_intf_upd); + netdev_warn(bp->dev, "Please update driver or firmware with matching interface versions.\n"); + } + snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "bc %d.%d.%d rm %d.%d.%d", + resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld, + resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd); + +hwrm_ver_get_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) +{ + if (bp->vxlan_port_cnt) { + bnxt_hwrm_tunnel_dst_port_free( + bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); + } + bp->vxlan_port_cnt = 0; + if (bp->nge_port_cnt) { + bnxt_hwrm_tunnel_dst_port_free( + bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); + } + bp->nge_port_cnt = 0; +} + +static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) +{ + int rc, i; + u32 tpa_flags = 0; + + if (set_tpa) + tpa_flags = bp->flags & BNXT_FLAG_TPA; + for (i = 0; i < bp->nr_vnics; i++) { + rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); + if (rc) { + netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", + rc, i); + return rc; + } + } + return 0; +} + +static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) +{ + int i; + + for (i = 0; i < bp->nr_vnics; i++) + bnxt_hwrm_vnic_set_rss(bp, i, false); +} + +static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, + bool irq_re_init) +{ + if (bp->vnic_info) { + bnxt_hwrm_clear_vnic_filter(bp); + /* clear all RSS setting before free vnic ctx */ + bnxt_hwrm_clear_vnic_rss(bp); + bnxt_hwrm_vnic_ctx_free(bp); + /* before free the vnic, undo the vnic tpa settings */ + if (bp->flags & BNXT_FLAG_TPA) + bnxt_set_tpa(bp, false); + bnxt_hwrm_vnic_free(bp); + } + bnxt_hwrm_ring_free(bp, close_path); + bnxt_hwrm_ring_grp_free(bp); + if (irq_re_init) { + bnxt_hwrm_stat_ctx_free(bp); + bnxt_hwrm_free_tunnel_ports(bp); + } +} + +static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) +{ + int rc; + + /* allocate context for vnic */ + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", + vnic_id, rc); + goto vnic_setup_err; + } + bp->rsscos_nr_ctxs++; + + /* configure default vnic, ring grp */ + rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", + vnic_id, rc); + goto vnic_setup_err; + } + + /* Enable RSS hashing on vnic */ + rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", + vnic_id, rc); + goto vnic_setup_err; + } + + if (bp->flags & BNXT_FLAG_AGG_RINGS) { + rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", + vnic_id, rc); + } + } + +vnic_setup_err: + return rc; +} + +static int bnxt_alloc_rfs_vnics(struct bnxt *bp) +{ +#ifdef CONFIG_RFS_ACCEL + int i, rc = 0; + + for (i = 0; i < bp->rx_nr_rings; i++) { + u16 vnic_id = i + 1; + u16 ring_id = i; + + if (vnic_id >= bp->nr_vnics) + break; + + bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG; + rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, ring_id + 1); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", + vnic_id, rc); + break; + } + rc = bnxt_setup_vnic(bp, vnic_id); + if (rc) + break; + } + return rc; +#else + return 0; +#endif +} + +static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) +{ + int rc = 0; + + if (irq_re_init) { + rc = bnxt_hwrm_stat_ctx_alloc(bp); + if (rc) { + netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", + rc); + goto err_out; + } + } + + rc = bnxt_hwrm_ring_alloc(bp); + if (rc) { + netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); + goto err_out; + } + + rc = bnxt_hwrm_ring_grp_alloc(bp); + if (rc) { + netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); + goto err_out; + } + + /* default vnic 0 */ + rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, bp->rx_nr_rings); + if (rc) { + netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); + goto err_out; + } + + rc = bnxt_setup_vnic(bp, 0); + if (rc) + goto err_out; + + if (bp->flags & BNXT_FLAG_RFS) { + rc = bnxt_alloc_rfs_vnics(bp); + if (rc) + goto err_out; + } + + if (bp->flags & BNXT_FLAG_TPA) { + rc = bnxt_set_tpa(bp, true); + if (rc) + goto err_out; + } + + if (BNXT_VF(bp)) + bnxt_update_vf_mac(bp); + + /* Filter for default vnic 0 */ + rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); + if (rc) { + netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); + goto err_out; + } + bp->vnic_info[0].uc_filter_count = 1; + + bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST | + CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; + + if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp)) + bp->vnic_info[0].rx_mask |= + CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; + + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); + if (rc) { + netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", rc); + goto err_out; + } + + rc = bnxt_hwrm_set_coal(bp); + if (rc) + netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", + rc); + + return 0; + +err_out: + bnxt_hwrm_resource_free(bp, 0, true); + + return rc; +} + +static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) +{ + bnxt_hwrm_resource_free(bp, 1, irq_re_init); + return 0; +} + +static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) +{ + bnxt_init_rx_rings(bp); + bnxt_init_tx_rings(bp); + bnxt_init_ring_grps(bp, irq_re_init); + bnxt_init_vnics(bp); + + return bnxt_init_chip(bp, irq_re_init); +} + +static void bnxt_disable_int(struct bnxt *bp) +{ + int i; + + if (!bp->bnapi) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + + BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); + } +} + +static void bnxt_enable_int(struct bnxt *bp) +{ + int i; + + atomic_set(&bp->intr_sem, 0); + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + + BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); + } +} + +static int bnxt_set_real_num_queues(struct bnxt *bp) +{ + int rc; + struct net_device *dev = bp->dev; + + rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings); + if (rc) + return rc; + + rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); + if (rc) + return rc; + +#ifdef CONFIG_RFS_ACCEL + if (bp->rx_nr_rings) + dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); + if (!dev->rx_cpu_rmap) + rc = -ENOMEM; +#endif + + return rc; +} + +static int bnxt_setup_msix(struct bnxt *bp) +{ + struct msix_entry *msix_ent; + struct net_device *dev = bp->dev; + int i, total_vecs, rc = 0; + const int len = sizeof(bp->irq_tbl[0].name); + + bp->flags &= ~BNXT_FLAG_USING_MSIX; + total_vecs = bp->cp_nr_rings; + + msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); + if (!msix_ent) + return -ENOMEM; + + for (i = 0; i < total_vecs; i++) { + msix_ent[i].entry = i; + msix_ent[i].vector = 0; + } + + total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, 1, total_vecs); + if (total_vecs < 0) { + rc = -ENODEV; + goto msix_setup_exit; + } + + bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL); + if (bp->irq_tbl) { + int tcs; + + /* Trim rings based upon num of vectors allocated */ + bp->rx_nr_rings = min_t(int, total_vecs, bp->rx_nr_rings); + bp->tx_nr_rings = min_t(int, total_vecs, bp->tx_nr_rings); + bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + tcs = netdev_get_num_tc(dev); + if (tcs > 1) { + bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs; + if (bp->tx_nr_rings_per_tc == 0) { + netdev_reset_tc(dev); + bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + } else { + int i, off, count; + + bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs; + for (i = 0; i < tcs; i++) { + count = bp->tx_nr_rings_per_tc; + off = i * count; + netdev_set_tc_queue(dev, i, count, off); + } + } + } + bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings); + + for (i = 0; i < bp->cp_nr_rings; i++) { + bp->irq_tbl[i].vector = msix_ent[i].vector; + snprintf(bp->irq_tbl[i].name, len, + "%s-%s-%d", dev->name, "TxRx", i); + bp->irq_tbl[i].handler = bnxt_msix; + } + rc = bnxt_set_real_num_queues(bp); + if (rc) + goto msix_setup_exit; + } else { + rc = -ENOMEM; + goto msix_setup_exit; + } + bp->flags |= BNXT_FLAG_USING_MSIX; + kfree(msix_ent); + return 0; + +msix_setup_exit: + netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc); + pci_disable_msix(bp->pdev); + kfree(msix_ent); + return rc; +} + +static int bnxt_setup_inta(struct bnxt *bp) +{ + int rc; + const int len = sizeof(bp->irq_tbl[0].name); + + if (netdev_get_num_tc(bp->dev)) + netdev_reset_tc(bp->dev); + + bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL); + if (!bp->irq_tbl) { + rc = -ENOMEM; + return rc; + } + bp->rx_nr_rings = 1; + bp->tx_nr_rings = 1; + bp->cp_nr_rings = 1; + bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + bp->irq_tbl[0].vector = bp->pdev->irq; + snprintf(bp->irq_tbl[0].name, len, + "%s-%s-%d", bp->dev->name, "TxRx", 0); + bp->irq_tbl[0].handler = bnxt_inta; + rc = bnxt_set_real_num_queues(bp); + return rc; +} + +static int bnxt_setup_int_mode(struct bnxt *bp) +{ + int rc = 0; + + if (bp->flags & BNXT_FLAG_MSIX_CAP) + rc = bnxt_setup_msix(bp); + + if (!(bp->flags & BNXT_FLAG_USING_MSIX)) { + /* fallback to INTA */ + rc = bnxt_setup_inta(bp); + } + return rc; +} + +static void bnxt_free_irq(struct bnxt *bp) +{ + struct bnxt_irq *irq; + int i; + +#ifdef CONFIG_RFS_ACCEL + free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); + bp->dev->rx_cpu_rmap = NULL; +#endif + if (!bp->irq_tbl) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + irq = &bp->irq_tbl[i]; + if (irq->requested) + free_irq(irq->vector, bp->bnapi[i]); + irq->requested = 0; + } + if (bp->flags & BNXT_FLAG_USING_MSIX) + pci_disable_msix(bp->pdev); + kfree(bp->irq_tbl); + bp->irq_tbl = NULL; +} + +static int bnxt_request_irq(struct bnxt *bp) +{ + int i, rc = 0; + unsigned long flags = 0; +#ifdef CONFIG_RFS_ACCEL + struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap; +#endif + + if (!(bp->flags & BNXT_FLAG_USING_MSIX)) + flags = IRQF_SHARED; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_irq *irq = &bp->irq_tbl[i]; +#ifdef CONFIG_RFS_ACCEL + if (rmap && (i < bp->rx_nr_rings)) { + rc = irq_cpu_rmap_add(rmap, irq->vector); + if (rc) + netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", + i); + } +#endif + rc = request_irq(irq->vector, irq->handler, flags, irq->name, + bp->bnapi[i]); + if (rc) + break; + + irq->requested = 1; + } + return rc; +} + +static void bnxt_del_napi(struct bnxt *bp) +{ + int i; + + if (!bp->bnapi) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + + napi_hash_del(&bnapi->napi); + netif_napi_del(&bnapi->napi); + } +} + +static void bnxt_init_napi(struct bnxt *bp) +{ + int i; + struct bnxt_napi *bnapi; + + if (bp->flags & BNXT_FLAG_USING_MSIX) { + for (i = 0; i < bp->cp_nr_rings; i++) { + bnapi = bp->bnapi[i]; + netif_napi_add(bp->dev, &bnapi->napi, + bnxt_poll, 64); + napi_hash_add(&bnapi->napi); + } + } else { + bnapi = bp->bnapi[0]; + netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); + napi_hash_add(&bnapi->napi); + } +} + +static void bnxt_disable_napi(struct bnxt *bp) +{ + int i; + + if (!bp->bnapi) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + napi_disable(&bp->bnapi[i]->napi); + bnxt_disable_poll(bp->bnapi[i]); + } +} + +static void bnxt_enable_napi(struct bnxt *bp) +{ + int i; + + for (i = 0; i < bp->cp_nr_rings; i++) { + bnxt_enable_poll(bp->bnapi[i]); + napi_enable(&bp->bnapi[i]->napi); + } +} + +static void bnxt_tx_disable(struct bnxt *bp) +{ + int i; + struct bnxt_napi *bnapi; + struct bnxt_tx_ring_info *txr; + struct netdev_queue *txq; + + if (bp->bnapi) { + for (i = 0; i < bp->tx_nr_rings; i++) { + bnapi = bp->bnapi[i]; + txr = &bnapi->tx_ring; + txq = netdev_get_tx_queue(bp->dev, i); + __netif_tx_lock(txq, smp_processor_id()); + txr->dev_state = BNXT_DEV_STATE_CLOSING; + __netif_tx_unlock(txq); + } + } + /* Stop all TX queues */ + netif_tx_disable(bp->dev); + netif_carrier_off(bp->dev); +} + +static void bnxt_tx_enable(struct bnxt *bp) +{ + int i; + struct bnxt_napi *bnapi; + struct bnxt_tx_ring_info *txr; + struct netdev_queue *txq; + + for (i = 0; i < bp->tx_nr_rings; i++) { + bnapi = bp->bnapi[i]; + txr = &bnapi->tx_ring; + txq = netdev_get_tx_queue(bp->dev, i); + txr->dev_state = 0; + } + netif_tx_wake_all_queues(bp->dev); + if (bp->link_info.link_up) + netif_carrier_on(bp->dev); +} + +static void bnxt_report_link(struct bnxt *bp) +{ + if (bp->link_info.link_up) { + const char *duplex; + const char *flow_ctrl; + u16 speed; + + netif_carrier_on(bp->dev); + if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) + duplex = "full"; + else + duplex = "half"; + if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) + flow_ctrl = "ON - receive & transmit"; + else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) + flow_ctrl = "ON - transmit"; + else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) + flow_ctrl = "ON - receive"; + else + flow_ctrl = "none"; + speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); + netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", + speed, duplex, flow_ctrl); + } else { + netif_carrier_off(bp->dev); + netdev_err(bp->dev, "NIC Link is Down\n"); + } +} + +static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) +{ + int rc = 0; + struct bnxt_link_info *link_info = &bp->link_info; + struct hwrm_port_phy_qcfg_input req = {0}; + struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + u8 link_up = link_info->link_up; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) { + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; + } + + memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); + link_info->phy_link_status = resp->link; + link_info->duplex = resp->duplex; + link_info->pause = resp->pause; + link_info->auto_mode = resp->auto_mode; + link_info->auto_pause_setting = resp->auto_pause; + link_info->force_pause_setting = resp->force_pause; + link_info->duplex_setting = resp->duplex_setting; + if (link_info->phy_link_status == BNXT_LINK_LINK) + link_info->link_speed = le16_to_cpu(resp->link_speed); + else + link_info->link_speed = 0; + link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); + link_info->auto_link_speed = le16_to_cpu(resp->auto_link_speed); + link_info->support_speeds = le16_to_cpu(resp->support_speeds); + link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); + link_info->preemphasis = le32_to_cpu(resp->preemphasis); + link_info->phy_ver[0] = resp->phy_maj; + link_info->phy_ver[1] = resp->phy_min; + link_info->phy_ver[2] = resp->phy_bld; + link_info->media_type = resp->media_type; + link_info->transceiver = resp->transceiver_type; + link_info->phy_addr = resp->phy_addr; + + /* TODO: need to add more logic to report VF link */ + if (chng_link_state) { + if (link_info->phy_link_status == BNXT_LINK_LINK) + link_info->link_up = 1; + else + link_info->link_up = 0; + if (link_up != link_info->link_up) + bnxt_report_link(bp); + } else { + /* alwasy link down if not require to update link state */ + link_info->link_up = 0; + } + mutex_unlock(&bp->hwrm_cmd_lock); + return 0; +} + +static void +bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) +{ + if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { + if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) + req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; + if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) + req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; + req->enables |= + cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); + } else { + if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) + req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; + if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) + req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; + req->enables |= + cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); + } +} + +static void bnxt_hwrm_set_link_common(struct bnxt *bp, + struct hwrm_port_phy_cfg_input *req) +{ + u8 autoneg = bp->link_info.autoneg; + u16 fw_link_speed = bp->link_info.req_link_speed; + u32 advertising = bp->link_info.advertising; + + if (autoneg & BNXT_AUTONEG_SPEED) { + req->auto_mode |= + PORT_PHY_CFG_REQ_AUTO_MODE_MASK; + + req->enables |= cpu_to_le32( + PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); + req->auto_link_speed_mask = cpu_to_le16(advertising); + + req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); + req->flags |= + cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); + } else { + req->force_link_speed = cpu_to_le16(fw_link_speed); + req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); + } + + /* currently don't support half duplex */ + req->auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL; + req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX); + /* tell chimp that the setting takes effect immediately */ + req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); +} + +int bnxt_hwrm_set_pause(struct bnxt *bp) +{ + struct hwrm_port_phy_cfg_input req = {0}; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); + bnxt_hwrm_set_pause_common(bp, &req); + + if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || + bp->link_info.force_link_chng) + bnxt_hwrm_set_link_common(bp, &req); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { + /* since changing of pause setting doesn't trigger any link + * change event, the driver needs to update the current pause + * result upon successfully return of the phy_cfg command + */ + bp->link_info.pause = + bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; + bp->link_info.auto_pause_setting = 0; + if (!bp->link_info.force_link_chng) + bnxt_report_link(bp); + } + bp->link_info.force_link_chng = false; + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause) +{ + struct hwrm_port_phy_cfg_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); + if (set_pause) + bnxt_hwrm_set_pause_common(bp, &req); + + bnxt_hwrm_set_link_common(bp, &req); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +static int bnxt_update_phy_setting(struct bnxt *bp) +{ + int rc; + bool update_link = false; + bool update_pause = false; + struct bnxt_link_info *link_info = &bp->link_info; + + rc = bnxt_update_link(bp, true); + if (rc) { + netdev_err(bp->dev, "failed to update link (rc: %x)\n", + rc); + return rc; + } + if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && + link_info->auto_pause_setting != link_info->req_flow_ctrl) + update_pause = true; + if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && + link_info->force_pause_setting != link_info->req_flow_ctrl) + update_pause = true; + if (link_info->req_duplex != link_info->duplex_setting) + update_link = true; + if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { + if (BNXT_AUTO_MODE(link_info->auto_mode)) + update_link = true; + if (link_info->req_link_speed != link_info->force_link_speed) + update_link = true; + } else { + if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) + update_link = true; + if (link_info->advertising != link_info->auto_link_speeds) + update_link = true; + if (link_info->req_link_speed != link_info->auto_link_speed) + update_link = true; + } + + if (update_link) + rc = bnxt_hwrm_set_link_setting(bp, update_pause); + else if (update_pause) + rc = bnxt_hwrm_set_pause(bp); + if (rc) { + netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", + rc); + return rc; + } + + return rc; +} + +static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) +{ + int rc = 0; + + netif_carrier_off(bp->dev); + if (irq_re_init) { + rc = bnxt_setup_int_mode(bp); + if (rc) { + netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", + rc); + return rc; + } + } + if ((bp->flags & BNXT_FLAG_RFS) && + !(bp->flags & BNXT_FLAG_USING_MSIX)) { + /* disable RFS if falling back to INTA */ + bp->dev->hw_features &= ~NETIF_F_NTUPLE; + bp->flags &= ~BNXT_FLAG_RFS; + } + + rc = bnxt_alloc_mem(bp, irq_re_init); + if (rc) { + netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); + goto open_err_free_mem; + } + + if (irq_re_init) { + bnxt_init_napi(bp); + rc = bnxt_request_irq(bp); + if (rc) { + netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); + goto open_err; + } + } + + bnxt_enable_napi(bp); + + rc = bnxt_init_nic(bp, irq_re_init); + if (rc) { + netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); + goto open_err; + } + + if (link_re_init) { + rc = bnxt_update_phy_setting(bp); + if (rc) + goto open_err; + } + + if (irq_re_init) { +#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE) + vxlan_get_rx_port(bp->dev); +#endif + if (!bnxt_hwrm_tunnel_dst_port_alloc( + bp, htons(0x17c1), + TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE)) + bp->nge_port_cnt = 1; + } + + bp->state = BNXT_STATE_OPEN; + bnxt_enable_int(bp); + /* Enable TX queues */ + bnxt_tx_enable(bp); + mod_timer(&bp->timer, jiffies + bp->current_interval); + + return 0; + +open_err: + bnxt_disable_napi(bp); + bnxt_del_napi(bp); + +open_err_free_mem: + bnxt_free_skbs(bp); + bnxt_free_irq(bp); + bnxt_free_mem(bp, true); + return rc; +} + +/* rtnl_lock held */ +int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) +{ + int rc = 0; + + rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); + if (rc) { + netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); + dev_close(bp->dev); + } + return rc; +} + +static int bnxt_open(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + int rc = 0; + + rc = bnxt_hwrm_func_reset(bp); + if (rc) { + netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n", + rc); + rc = -1; + return rc; + } + return __bnxt_open_nic(bp, true, true); +} + +static void bnxt_disable_int_sync(struct bnxt *bp) +{ + int i; + + atomic_inc(&bp->intr_sem); + if (!netif_running(bp->dev)) + return; + + bnxt_disable_int(bp); + for (i = 0; i < bp->cp_nr_rings; i++) + synchronize_irq(bp->irq_tbl[i].vector); +} + +int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) +{ + int rc = 0; + +#ifdef CONFIG_BNXT_SRIOV + if (bp->sriov_cfg) { + rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, + !bp->sriov_cfg, + BNXT_SRIOV_CFG_WAIT_TMO); + if (rc) + netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); + } +#endif + /* Change device state to avoid TX queue wake up's */ + bnxt_tx_disable(bp); + + bp->state = BNXT_STATE_CLOSED; + cancel_work_sync(&bp->sp_task); + + /* Flush rings before disabling interrupts */ + bnxt_shutdown_nic(bp, irq_re_init); + + /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ + + bnxt_disable_napi(bp); + bnxt_disable_int_sync(bp); + del_timer_sync(&bp->timer); + bnxt_free_skbs(bp); + + if (irq_re_init) { + bnxt_free_irq(bp); + bnxt_del_napi(bp); + } + bnxt_free_mem(bp, irq_re_init); + return rc; +} + +static int bnxt_close(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + + bnxt_close_nic(bp, true, true); + return 0; +} + +/* rtnl_lock held */ +static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + /* fallthru */ + case SIOCGMIIREG: { + if (!netif_running(dev)) + return -EAGAIN; + + return 0; + } + + case SIOCSMIIREG: + if (!netif_running(dev)) + return -EAGAIN; + + return 0; + + default: + /* do nothing */ + break; + } + return -EOPNOTSUPP; +} + +static struct rtnl_link_stats64 * +bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + u32 i; + struct bnxt *bp = netdev_priv(dev); + + memset(stats, 0, sizeof(struct rtnl_link_stats64)); + + if (!bp->bnapi) + return stats; + + /* TODO check if we need to synchronize with bnxt_close path */ + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct ctx_hw_stats *hw_stats = cpr->hw_stats; + + stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts); + stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts); + stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts); + + stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts); + stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts); + stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts); + + stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes); + stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes); + stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes); + + stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes); + stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes); + stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes); + + stats->rx_missed_errors += + le64_to_cpu(hw_stats->rx_discard_pkts); + + stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts); + + stats->rx_dropped += le64_to_cpu(hw_stats->rx_drop_pkts); + + stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); + } + + return stats; +} + +static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) +{ + struct net_device *dev = bp->dev; + struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + struct netdev_hw_addr *ha; + u8 *haddr; + int mc_count = 0; + bool update = false; + int off = 0; + + netdev_for_each_mc_addr(ha, dev) { + if (mc_count >= BNXT_MAX_MC_ADDRS) { + *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; + vnic->mc_list_count = 0; + return false; + } + haddr = ha->addr; + if (!ether_addr_equal(haddr, vnic->mc_list + off)) { + memcpy(vnic->mc_list + off, haddr, ETH_ALEN); + update = true; + } + off += ETH_ALEN; + mc_count++; + } + if (mc_count) + *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; + + if (mc_count != vnic->mc_list_count) { + vnic->mc_list_count = mc_count; + update = true; + } + return update; +} + +static bool bnxt_uc_list_updated(struct bnxt *bp) +{ + struct net_device *dev = bp->dev; + struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + struct netdev_hw_addr *ha; + int off = 0; + + if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) + return true; + + netdev_for_each_uc_addr(ha, dev) { + if (!ether_addr_equal(ha->addr, vnic->uc_list + off)) + return true; + + off += ETH_ALEN; + } + return false; +} + +static void bnxt_set_rx_mode(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + u32 mask = vnic->rx_mask; + bool mc_update = false; + bool uc_update; + + if (!netif_running(dev)) + return; + + mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | + CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | + CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST); + + /* Only allow PF to be in promiscuous mode */ + if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp)) + mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; + + uc_update = bnxt_uc_list_updated(bp); + + if (dev->flags & IFF_ALLMULTI) { + mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; + vnic->mc_list_count = 0; + } else { + mc_update = bnxt_mc_list_updated(bp, &mask); + } + + if (mask != vnic->rx_mask || uc_update || mc_update) { + vnic->rx_mask = mask; + + set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); + schedule_work(&bp->sp_task); + } +} + +static void bnxt_cfg_rx_mode(struct bnxt *bp) +{ + struct net_device *dev = bp->dev; + struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + struct netdev_hw_addr *ha; + int i, off = 0, rc; + bool uc_update; + + netif_addr_lock_bh(dev); + uc_update = bnxt_uc_list_updated(bp); + netif_addr_unlock_bh(dev); + + if (!uc_update) + goto skip_uc; + + mutex_lock(&bp->hwrm_cmd_lock); + for (i = 1; i < vnic->uc_filter_count; i++) { + struct hwrm_cfa_l2_filter_free_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1, + -1); + + req.l2_filter_id = vnic->fw_l2_filter_id[i]; + + rc = _hwrm_send_message(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); + } + mutex_unlock(&bp->hwrm_cmd_lock); + + vnic->uc_filter_count = 1; + + netif_addr_lock_bh(dev); + if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; + } else { + netdev_for_each_uc_addr(ha, dev) { + memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); + off += ETH_ALEN; + vnic->uc_filter_count++; + } + } + netif_addr_unlock_bh(dev); + + for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { + rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); + if (rc) { + netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", + rc); + vnic->uc_filter_count = i; + } + } + +skip_uc: + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); + if (rc) + netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", + rc); +} + +static netdev_features_t bnxt_fix_features(struct net_device *dev, + netdev_features_t features) +{ + return features; +} + +static int bnxt_set_features(struct net_device *dev, netdev_features_t features) +{ + struct bnxt *bp = netdev_priv(dev); + u32 flags = bp->flags; + u32 changes; + int rc = 0; + bool re_init = false; + bool update_tpa = false; + + flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; + if ((features & NETIF_F_GRO) && (bp->pdev->revision > 0)) + flags |= BNXT_FLAG_GRO; + if (features & NETIF_F_LRO) + flags |= BNXT_FLAG_LRO; + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + flags |= BNXT_FLAG_STRIP_VLAN; + + if (features & NETIF_F_NTUPLE) + flags |= BNXT_FLAG_RFS; + + changes = flags ^ bp->flags; + if (changes & BNXT_FLAG_TPA) { + update_tpa = true; + if ((bp->flags & BNXT_FLAG_TPA) == 0 || + (flags & BNXT_FLAG_TPA) == 0) + re_init = true; + } + + if (changes & ~BNXT_FLAG_TPA) + re_init = true; + + if (flags != bp->flags) { + u32 old_flags = bp->flags; + + bp->flags = flags; + + if (!netif_running(dev)) { + if (update_tpa) + bnxt_set_ring_params(bp); + return rc; + } + + if (re_init) { + bnxt_close_nic(bp, false, false); + if (update_tpa) + bnxt_set_ring_params(bp); + + return bnxt_open_nic(bp, false, false); + } + if (update_tpa) { + rc = bnxt_set_tpa(bp, + (flags & BNXT_FLAG_TPA) ? + true : false); + if (rc) + bp->flags = old_flags; + } + } + return rc; +} + +static void bnxt_dbg_dump_states(struct bnxt *bp) +{ + int i; + struct bnxt_napi *bnapi; + struct bnxt_tx_ring_info *txr; + struct bnxt_rx_ring_info *rxr; + struct bnxt_cp_ring_info *cpr; + + for (i = 0; i < bp->cp_nr_rings; i++) { + bnapi = bp->bnapi[i]; + txr = &bnapi->tx_ring; + rxr = &bnapi->rx_ring; + cpr = &bnapi->cp_ring; + if (netif_msg_drv(bp)) { + netdev_info(bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n", + i, txr->tx_ring_struct.fw_ring_id, + txr->tx_prod, txr->tx_cons); + netdev_info(bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", + i, rxr->rx_ring_struct.fw_ring_id, + rxr->rx_prod, + rxr->rx_agg_ring_struct.fw_ring_id, + rxr->rx_agg_prod, rxr->rx_sw_agg_prod); + netdev_info(bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", + i, cpr->cp_ring_struct.fw_ring_id, + cpr->cp_raw_cons); + } + } +} + +static void bnxt_reset_task(struct bnxt *bp) +{ + bnxt_dbg_dump_states(bp); + if (netif_running(bp->dev)) + bnxt_tx_disable(bp); /* prevent tx timout again */ +} + +static void bnxt_tx_timeout(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + + netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); + set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); + schedule_work(&bp->sp_task); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void bnxt_poll_controller(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + int i; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_irq *irq = &bp->irq_tbl[i]; + + disable_irq(irq->vector); + irq->handler(irq->vector, bp->bnapi[i]); + enable_irq(irq->vector); + } +} +#endif + +static void bnxt_timer(unsigned long data) +{ + struct bnxt *bp = (struct bnxt *)data; + struct net_device *dev = bp->dev; + + if (!netif_running(dev)) + return; + + if (atomic_read(&bp->intr_sem) != 0) + goto bnxt_restart_timer; + +bnxt_restart_timer: + mod_timer(&bp->timer, jiffies + bp->current_interval); +} + +static void bnxt_cfg_ntp_filters(struct bnxt *); + +static void bnxt_sp_task(struct work_struct *work) +{ + struct bnxt *bp = container_of(work, struct bnxt, sp_task); + int rc; + + if (bp->state != BNXT_STATE_OPEN) + return; + + if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) + bnxt_cfg_rx_mode(bp); + + if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) + bnxt_cfg_ntp_filters(bp); + if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { + rc = bnxt_update_link(bp, true); + if (rc) + netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", + rc); + } + if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) + bnxt_hwrm_exec_fwd_req(bp); + if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { + bnxt_hwrm_tunnel_dst_port_alloc( + bp, bp->vxlan_port, + TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); + } + if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) { + bnxt_hwrm_tunnel_dst_port_free( + bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); + } + if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) + bnxt_reset_task(bp); +} + +static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) +{ + int rc; + struct bnxt *bp = netdev_priv(dev); + + SET_NETDEV_DEV(dev, &pdev->dev); + + /* enable device (incl. PCI PM wakeup), and bus-mastering */ + rc = pci_enable_device(pdev); + if (rc) { + dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); + goto init_err; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, + "Cannot find PCI device base address, aborting\n"); + rc = -ENODEV; + goto init_err_disable; + } + + rc = pci_request_regions(pdev, DRV_MODULE_NAME); + if (rc) { + dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); + goto init_err_disable; + } + + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { + dev_err(&pdev->dev, "System does not support DMA, aborting\n"); + goto init_err_disable; + } + + pci_set_master(pdev); + + bp->dev = dev; + bp->pdev = pdev; + + bp->bar0 = pci_ioremap_bar(pdev, 0); + if (!bp->bar0) { + dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); + rc = -ENOMEM; + goto init_err_release; + } + + bp->bar1 = pci_ioremap_bar(pdev, 2); + if (!bp->bar1) { + dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n"); + rc = -ENOMEM; + goto init_err_release; + } + + bp->bar2 = pci_ioremap_bar(pdev, 4); + if (!bp->bar2) { + dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); + rc = -ENOMEM; + goto init_err_release; + } + + INIT_WORK(&bp->sp_task, bnxt_sp_task); + + spin_lock_init(&bp->ntp_fltr_lock); + + bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; + bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; + + bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(4); + bp->coal_bufs = 20; + bp->coal_ticks_irq = BNXT_USEC_TO_COAL_TIMER(1); + bp->coal_bufs_irq = 2; + + init_timer(&bp->timer); + bp->timer.data = (unsigned long)bp; + bp->timer.function = bnxt_timer; + bp->current_interval = BNXT_TIMER_INTERVAL; + + bp->state = BNXT_STATE_CLOSED; + + return 0; + +init_err_release: + if (bp->bar2) { + pci_iounmap(pdev, bp->bar2); + bp->bar2 = NULL; + } + + if (bp->bar1) { + pci_iounmap(pdev, bp->bar1); + bp->bar1 = NULL; + } + + if (bp->bar0) { + pci_iounmap(pdev, bp->bar0); + bp->bar0 = NULL; + } + + pci_release_regions(pdev); + +init_err_disable: + pci_disable_device(pdev); + +init_err: + return rc; +} + +/* rtnl_lock held */ +static int bnxt_change_mac_addr(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + return 0; +} + +/* rtnl_lock held */ +static int bnxt_change_mtu(struct net_device *dev, int new_mtu) +{ + struct bnxt *bp = netdev_priv(dev); + + if (new_mtu < 60 || new_mtu > 9000) + return -EINVAL; + + if (netif_running(dev)) + bnxt_close_nic(bp, false, false); + + dev->mtu = new_mtu; + bnxt_set_ring_params(bp); + + if (netif_running(dev)) + return bnxt_open_nic(bp, false, false); + + return 0; +} + +static int bnxt_setup_tc(struct net_device *dev, u8 tc) +{ + struct bnxt *bp = netdev_priv(dev); + + if (tc > bp->max_tc) { + netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n", + tc, bp->max_tc); + return -EINVAL; + } + + if (netdev_get_num_tc(dev) == tc) + return 0; + + if (tc) { + int max_rx_rings, max_tx_rings; + + bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); + if (bp->tx_nr_rings_per_tc * tc > max_tx_rings) + return -ENOMEM; + } + + /* Needs to close the device and do hw resource re-allocations */ + if (netif_running(bp->dev)) + bnxt_close_nic(bp, true, false); + + if (tc) { + bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; + netdev_set_num_tc(dev, tc); + } else { + bp->tx_nr_rings = bp->tx_nr_rings_per_tc; + netdev_reset_tc(dev); + } + bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); + bp->num_stat_ctxs = bp->cp_nr_rings; + + if (netif_running(bp->dev)) + return bnxt_open_nic(bp, true, false); + + return 0; +} + +#ifdef CONFIG_RFS_ACCEL +static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, + struct bnxt_ntuple_filter *f2) +{ + struct flow_keys *keys1 = &f1->fkeys; + struct flow_keys *keys2 = &f2->fkeys; + + if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src && + keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst && + keys1->ports.ports == keys2->ports.ports && + keys1->basic.ip_proto == keys2->basic.ip_proto && + keys1->basic.n_proto == keys2->basic.n_proto && + ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr)) + return true; + + return false; +} + +static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_ntuple_filter *fltr, *new_fltr; + struct flow_keys *fkeys; + struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); + int rc = 0, idx; + struct hlist_head *head; + + if (skb->encapsulation) + return -EPROTONOSUPPORT; + + new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); + if (!new_fltr) + return -ENOMEM; + + fkeys = &new_fltr->fkeys; + if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { + rc = -EPROTONOSUPPORT; + goto err_free; + } + + if ((fkeys->basic.n_proto != htons(ETH_P_IP)) || + ((fkeys->basic.ip_proto != IPPROTO_TCP) && + (fkeys->basic.ip_proto != IPPROTO_UDP))) { + rc = -EPROTONOSUPPORT; + goto err_free; + } + + memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); + + idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; + head = &bp->ntp_fltr_hash_tbl[idx]; + rcu_read_lock(); + hlist_for_each_entry_rcu(fltr, head, hash) { + if (bnxt_fltr_match(fltr, new_fltr)) { + rcu_read_unlock(); + rc = 0; + goto err_free; + } + } + rcu_read_unlock(); + + spin_lock_bh(&bp->ntp_fltr_lock); + new_fltr->sw_id = bitmap_find_free_region(bp->ntp_fltr_bmap, + BNXT_NTP_FLTR_MAX_FLTR, 0); + if (new_fltr->sw_id < 0) { + spin_unlock_bh(&bp->ntp_fltr_lock); + rc = -ENOMEM; + goto err_free; + } + + new_fltr->flow_id = flow_id; + new_fltr->rxq = rxq_index; + hlist_add_head_rcu(&new_fltr->hash, head); + bp->ntp_fltr_count++; + spin_unlock_bh(&bp->ntp_fltr_lock); + + set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); + schedule_work(&bp->sp_task); + + return new_fltr->sw_id; + +err_free: + kfree(new_fltr); + return rc; +} + +static void bnxt_cfg_ntp_filters(struct bnxt *bp) +{ + int i; + + for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { + struct hlist_head *head; + struct hlist_node *tmp; + struct bnxt_ntuple_filter *fltr; + int rc; + + head = &bp->ntp_fltr_hash_tbl[i]; + hlist_for_each_entry_safe(fltr, tmp, head, hash) { + bool del = false; + + if (test_bit(BNXT_FLTR_VALID, &fltr->state)) { + if (rps_may_expire_flow(bp->dev, fltr->rxq, + fltr->flow_id, + fltr->sw_id)) { + bnxt_hwrm_cfa_ntuple_filter_free(bp, + fltr); + del = true; + } + } else { + rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, + fltr); + if (rc) + del = true; + else + set_bit(BNXT_FLTR_VALID, &fltr->state); + } + + if (del) { + spin_lock_bh(&bp->ntp_fltr_lock); + hlist_del_rcu(&fltr->hash); + bp->ntp_fltr_count--; + spin_unlock_bh(&bp->ntp_fltr_lock); + synchronize_rcu(); + clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); + kfree(fltr); + } + } + } +} + +#else + +static void bnxt_cfg_ntp_filters(struct bnxt *bp) +{ +} + +#endif /* CONFIG_RFS_ACCEL */ + +static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, + __be16 port) +{ + struct bnxt *bp = netdev_priv(dev); + + if (!netif_running(dev)) + return; + + if (sa_family != AF_INET6 && sa_family != AF_INET) + return; + + if (bp->vxlan_port_cnt && bp->vxlan_port != port) + return; + + bp->vxlan_port_cnt++; + if (bp->vxlan_port_cnt == 1) { + bp->vxlan_port = port; + set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); + schedule_work(&bp->sp_task); + } +} + +static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, + __be16 port) +{ + struct bnxt *bp = netdev_priv(dev); + + if (!netif_running(dev)) + return; + + if (sa_family != AF_INET6 && sa_family != AF_INET) + return; + + if (bp->vxlan_port_cnt && bp->vxlan_port == port) { + bp->vxlan_port_cnt--; + + if (bp->vxlan_port_cnt == 0) { + set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); + schedule_work(&bp->sp_task); + } + } +} + +static const struct net_device_ops bnxt_netdev_ops = { + .ndo_open = bnxt_open, + .ndo_start_xmit = bnxt_start_xmit, + .ndo_stop = bnxt_close, + .ndo_get_stats64 = bnxt_get_stats64, + .ndo_set_rx_mode = bnxt_set_rx_mode, + .ndo_do_ioctl = bnxt_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = bnxt_change_mac_addr, + .ndo_change_mtu = bnxt_change_mtu, + .ndo_fix_features = bnxt_fix_features, + .ndo_set_features = bnxt_set_features, + .ndo_tx_timeout = bnxt_tx_timeout, +#ifdef CONFIG_BNXT_SRIOV + .ndo_get_vf_config = bnxt_get_vf_config, + .ndo_set_vf_mac = bnxt_set_vf_mac, + .ndo_set_vf_vlan = bnxt_set_vf_vlan, + .ndo_set_vf_rate = bnxt_set_vf_bw, + .ndo_set_vf_link_state = bnxt_set_vf_link_state, + .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = bnxt_poll_controller, +#endif + .ndo_setup_tc = bnxt_setup_tc, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = bnxt_rx_flow_steer, +#endif + .ndo_add_vxlan_port = bnxt_add_vxlan_port, + .ndo_del_vxlan_port = bnxt_del_vxlan_port, +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = bnxt_busy_poll, +#endif +}; + +static void bnxt_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnxt *bp = netdev_priv(dev); + + if (BNXT_PF(bp)) + bnxt_sriov_disable(bp); + + unregister_netdev(dev); + cancel_work_sync(&bp->sp_task); + bp->sp_event = 0; + + bnxt_free_hwrm_resources(bp); + pci_iounmap(pdev, bp->bar2); + pci_iounmap(pdev, bp->bar1); + pci_iounmap(pdev, bp->bar0); + free_netdev(dev); + + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static int bnxt_probe_phy(struct bnxt *bp) +{ + int rc = 0; + struct bnxt_link_info *link_info = &bp->link_info; + char phy_ver[PHY_VER_STR_LEN]; + + rc = bnxt_update_link(bp, false); + if (rc) { + netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", + rc); + return rc; + } + + /*initialize the ethool setting copy with NVM settings */ + if (BNXT_AUTO_MODE(link_info->auto_mode)) + link_info->autoneg |= BNXT_AUTONEG_SPEED; + + if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) { + if (link_info->auto_pause_setting == BNXT_LINK_PAUSE_BOTH) + link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; + link_info->req_flow_ctrl = link_info->auto_pause_setting; + } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) { + link_info->req_flow_ctrl = link_info->force_pause_setting; + } + link_info->req_duplex = link_info->duplex_setting; + if (link_info->autoneg & BNXT_AUTONEG_SPEED) + link_info->req_link_speed = link_info->auto_link_speed; + else + link_info->req_link_speed = link_info->force_link_speed; + link_info->advertising = link_info->auto_link_speeds; + snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d", + link_info->phy_ver[0], + link_info->phy_ver[1], + link_info->phy_ver[2]); + strcat(bp->fw_ver_str, phy_ver); + return rc; +} + +static int bnxt_get_max_irq(struct pci_dev *pdev) +{ + u16 ctrl; + + if (!pdev->msix_cap) + return 1; + + pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); + return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; +} + +void bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx) +{ + int max_rings = 0; + + if (BNXT_PF(bp)) { + *max_tx = bp->pf.max_pf_tx_rings; + *max_rx = bp->pf.max_pf_rx_rings; + max_rings = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings); + max_rings = min_t(int, max_rings, bp->pf.max_stat_ctxs); + } else { +#ifdef CONFIG_BNXT_SRIOV + *max_tx = bp->vf.max_tx_rings; + *max_rx = bp->vf.max_rx_rings; + max_rings = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings); + max_rings = min_t(int, max_rings, bp->vf.max_stat_ctxs); +#endif + } + if (bp->flags & BNXT_FLAG_AGG_RINGS) + *max_rx >>= 1; + + *max_rx = min_t(int, *max_rx, max_rings); + *max_tx = min_t(int, *max_tx, max_rings); +} + +static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int version_printed; + struct net_device *dev; + struct bnxt *bp; + int rc, max_rx_rings, max_tx_rings, max_irqs, dflt_rings; + + if (version_printed++ == 0) + pr_info("%s", version); + + max_irqs = bnxt_get_max_irq(pdev); + dev = alloc_etherdev_mq(sizeof(*bp), max_irqs); + if (!dev) + return -ENOMEM; + + bp = netdev_priv(dev); + + if (bnxt_vf_pciid(ent->driver_data)) + bp->flags |= BNXT_FLAG_VF; + + if (pdev->msix_cap) { + bp->flags |= BNXT_FLAG_MSIX_CAP; + if (BNXT_PF(bp)) + bp->flags |= BNXT_FLAG_RFS; + } + + rc = bnxt_init_board(pdev, dev); + if (rc < 0) + goto init_err_free; + + dev->netdev_ops = &bnxt_netdev_ops; + dev->watchdog_timeo = BNXT_TX_TIMEOUT; + dev->ethtool_ops = &bnxt_ethtool_ops; + + pci_set_drvdata(pdev, dev); + + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | + NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT | + NETIF_F_RXHASH | + NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO; + + if (bp->flags & BNXT_FLAG_RFS) + dev->hw_features |= NETIF_F_NTUPLE; + + dev->hw_enc_features = + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | + NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT; + dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; + dev->features |= dev->hw_features | NETIF_F_HIGHDMA; + dev->priv_flags |= IFF_UNICAST_FLT; + +#ifdef CONFIG_BNXT_SRIOV + init_waitqueue_head(&bp->sriov_cfg_wait); +#endif + rc = bnxt_alloc_hwrm_resources(bp); + if (rc) + goto init_err; + + mutex_init(&bp->hwrm_cmd_lock); + bnxt_hwrm_ver_get(bp); + + rc = bnxt_hwrm_func_drv_rgtr(bp); + if (rc) + goto init_err; + + /* Get the MAX capabilities for this function */ + rc = bnxt_hwrm_func_qcaps(bp); + if (rc) { + netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", + rc); + rc = -1; + goto init_err; + } + + rc = bnxt_hwrm_queue_qportcfg(bp); + if (rc) { + netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", + rc); + rc = -1; + goto init_err; + } + + bnxt_set_tpa_flags(bp); + bnxt_set_ring_params(bp); + dflt_rings = netif_get_num_default_rss_queues(); + if (BNXT_PF(bp)) { + memcpy(dev->dev_addr, bp->pf.mac_addr, ETH_ALEN); + bp->pf.max_irqs = max_irqs; + } else { +#if defined(CONFIG_BNXT_SRIOV) + memcpy(dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); + bp->vf.max_irqs = max_irqs; +#endif + } + bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); + bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); + bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); + bp->tx_nr_rings = bp->tx_nr_rings_per_tc; + bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings); + bp->num_stat_ctxs = bp->cp_nr_rings; + + if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX) + bp->flags |= BNXT_FLAG_STRIP_VLAN; + + rc = bnxt_probe_phy(bp); + if (rc) + goto init_err; + + rc = register_netdev(dev); + if (rc) + goto init_err; + + netdev_info(dev, "%s found at mem %lx, node addr %pM\n", + board_info[ent->driver_data].name, + (long)pci_resource_start(pdev, 0), dev->dev_addr); + + return 0; + +init_err: + pci_iounmap(pdev, bp->bar0); + pci_release_regions(pdev); + pci_disable_device(pdev); + +init_err_free: + free_netdev(dev); + return rc; +} + +static struct pci_driver bnxt_pci_driver = { + .name = DRV_MODULE_NAME, + .id_table = bnxt_pci_tbl, + .probe = bnxt_init_one, + .remove = bnxt_remove_one, +#if defined(CONFIG_BNXT_SRIOV) + .sriov_configure = bnxt_sriov_configure, +#endif +}; + +module_pci_driver(bnxt_pci_driver); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h new file mode 100644 index 000000000000..4f2267ca482d --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -0,0 +1,1086 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_H +#define BNXT_H + +#define DRV_MODULE_NAME "bnxt_en" +#define DRV_MODULE_VERSION "0.1.24" + +#define DRV_VER_MAJ 0 +#define DRV_VER_MIN 1 +#define DRV_VER_UPD 24 + +struct tx_bd { + __le32 tx_bd_len_flags_type; + #define TX_BD_TYPE (0x3f << 0) + #define TX_BD_TYPE_SHORT_TX_BD (0x00 << 0) + #define TX_BD_TYPE_LONG_TX_BD (0x10 << 0) + #define TX_BD_FLAGS_PACKET_END (1 << 6) + #define TX_BD_FLAGS_NO_CMPL (1 << 7) + #define TX_BD_FLAGS_BD_CNT (0x1f << 8) + #define TX_BD_FLAGS_BD_CNT_SHIFT 8 + #define TX_BD_FLAGS_LHINT (3 << 13) + #define TX_BD_FLAGS_LHINT_SHIFT 13 + #define TX_BD_FLAGS_LHINT_512_AND_SMALLER (0 << 13) + #define TX_BD_FLAGS_LHINT_512_TO_1023 (1 << 13) + #define TX_BD_FLAGS_LHINT_1024_TO_2047 (2 << 13) + #define TX_BD_FLAGS_LHINT_2048_AND_LARGER (3 << 13) + #define TX_BD_FLAGS_COAL_NOW (1 << 15) + #define TX_BD_LEN (0xffff << 16) + #define TX_BD_LEN_SHIFT 16 + + u32 tx_bd_opaque; + __le64 tx_bd_haddr; +} __packed; + +struct tx_bd_ext { + __le32 tx_bd_hsize_lflags; + #define TX_BD_FLAGS_TCP_UDP_CHKSUM (1 << 0) + #define TX_BD_FLAGS_IP_CKSUM (1 << 1) + #define TX_BD_FLAGS_NO_CRC (1 << 2) + #define TX_BD_FLAGS_STAMP (1 << 3) + #define TX_BD_FLAGS_T_IP_CHKSUM (1 << 4) + #define TX_BD_FLAGS_LSO (1 << 5) + #define TX_BD_FLAGS_IPID_FMT (1 << 6) + #define TX_BD_FLAGS_T_IPID (1 << 7) + #define TX_BD_HSIZE (0xff << 16) + #define TX_BD_HSIZE_SHIFT 16 + + __le32 tx_bd_mss; + __le32 tx_bd_cfa_action; + #define TX_BD_CFA_ACTION (0xffff << 16) + #define TX_BD_CFA_ACTION_SHIFT 16 + + __le32 tx_bd_cfa_meta; + #define TX_BD_CFA_META_MASK 0xfffffff + #define TX_BD_CFA_META_VID_MASK 0xfff + #define TX_BD_CFA_META_PRI_MASK (0xf << 12) + #define TX_BD_CFA_META_PRI_SHIFT 12 + #define TX_BD_CFA_META_TPID_MASK (3 << 16) + #define TX_BD_CFA_META_TPID_SHIFT 16 + #define TX_BD_CFA_META_KEY (0xf << 28) + #define TX_BD_CFA_META_KEY_SHIFT 28 + #define TX_BD_CFA_META_KEY_VLAN (1 << 28) +}; + +struct rx_bd { + __le32 rx_bd_len_flags_type; + #define RX_BD_TYPE (0x3f << 0) + #define RX_BD_TYPE_RX_PACKET_BD 0x4 + #define RX_BD_TYPE_RX_BUFFER_BD 0x5 + #define RX_BD_TYPE_RX_AGG_BD 0x6 + #define RX_BD_TYPE_16B_BD_SIZE (0 << 4) + #define RX_BD_TYPE_32B_BD_SIZE (1 << 4) + #define RX_BD_TYPE_48B_BD_SIZE (2 << 4) + #define RX_BD_TYPE_64B_BD_SIZE (3 << 4) + #define RX_BD_FLAGS_SOP (1 << 6) + #define RX_BD_FLAGS_EOP (1 << 7) + #define RX_BD_FLAGS_BUFFERS (3 << 8) + #define RX_BD_FLAGS_1_BUFFER_PACKET (0 << 8) + #define RX_BD_FLAGS_2_BUFFER_PACKET (1 << 8) + #define RX_BD_FLAGS_3_BUFFER_PACKET (2 << 8) + #define RX_BD_FLAGS_4_BUFFER_PACKET (3 << 8) + #define RX_BD_LEN (0xffff << 16) + #define RX_BD_LEN_SHIFT 16 + + u32 rx_bd_opaque; + __le64 rx_bd_haddr; +}; + +struct tx_cmp { + __le32 tx_cmp_flags_type; + #define CMP_TYPE (0x3f << 0) + #define CMP_TYPE_TX_L2_CMP 0 + #define CMP_TYPE_RX_L2_CMP 17 + #define CMP_TYPE_RX_AGG_CMP 18 + #define CMP_TYPE_RX_L2_TPA_START_CMP 19 + #define CMP_TYPE_RX_L2_TPA_END_CMP 21 + #define CMP_TYPE_STATUS_CMP 32 + #define CMP_TYPE_REMOTE_DRIVER_REQ 34 + #define CMP_TYPE_REMOTE_DRIVER_RESP 36 + #define CMP_TYPE_ERROR_STATUS 48 + #define CMPL_BASE_TYPE_STAT_EJECT (0x1aUL << 0) + #define CMPL_BASE_TYPE_HWRM_DONE (0x20UL << 0) + #define CMPL_BASE_TYPE_HWRM_FWD_REQ (0x22UL << 0) + #define CMPL_BASE_TYPE_HWRM_FWD_RESP (0x24UL << 0) + #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + + #define TX_CMP_FLAGS_ERROR (1 << 6) + #define TX_CMP_FLAGS_PUSH (1 << 7) + + u32 tx_cmp_opaque; + __le32 tx_cmp_errors_v; + #define TX_CMP_V (1 << 0) + #define TX_CMP_ERRORS_BUFFER_ERROR (7 << 1) + #define TX_CMP_ERRORS_BUFFER_ERROR_NO_ERROR 0 + #define TX_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT 2 + #define TX_CMP_ERRORS_BUFFER_ERROR_INVALID_STAG 4 + #define TX_CMP_ERRORS_BUFFER_ERROR_STAG_BOUNDS 5 + #define TX_CMP_ERRORS_ZERO_LENGTH_PKT (1 << 4) + #define TX_CMP_ERRORS_EXCESSIVE_BD_LEN (1 << 5) + #define TX_CMP_ERRORS_DMA_ERROR (1 << 6) + #define TX_CMP_ERRORS_HINT_TOO_SHORT (1 << 7) + + __le32 tx_cmp_unsed_3; +}; + +struct rx_cmp { + __le32 rx_cmp_len_flags_type; + #define RX_CMP_CMP_TYPE (0x3f << 0) + #define RX_CMP_FLAGS_ERROR (1 << 6) + #define RX_CMP_FLAGS_PLACEMENT (7 << 7) + #define RX_CMP_FLAGS_RSS_VALID (1 << 10) + #define RX_CMP_FLAGS_UNUSED (1 << 11) + #define RX_CMP_FLAGS_ITYPES_SHIFT 12 + #define RX_CMP_FLAGS_ITYPE_UNKNOWN (0 << 12) + #define RX_CMP_FLAGS_ITYPE_IP (1 << 12) + #define RX_CMP_FLAGS_ITYPE_TCP (2 << 12) + #define RX_CMP_FLAGS_ITYPE_UDP (3 << 12) + #define RX_CMP_FLAGS_ITYPE_FCOE (4 << 12) + #define RX_CMP_FLAGS_ITYPE_ROCE (5 << 12) + #define RX_CMP_FLAGS_ITYPE_PTP_WO_TS (8 << 12) + #define RX_CMP_FLAGS_ITYPE_PTP_W_TS (9 << 12) + #define RX_CMP_LEN (0xffff << 16) + #define RX_CMP_LEN_SHIFT 16 + + u32 rx_cmp_opaque; + __le32 rx_cmp_misc_v1; + #define RX_CMP_V1 (1 << 0) + #define RX_CMP_AGG_BUFS (0x1f << 1) + #define RX_CMP_AGG_BUFS_SHIFT 1 + #define RX_CMP_RSS_HASH_TYPE (0x7f << 9) + #define RX_CMP_RSS_HASH_TYPE_SHIFT 9 + #define RX_CMP_PAYLOAD_OFFSET (0xff << 16) + #define RX_CMP_PAYLOAD_OFFSET_SHIFT 16 + + __le32 rx_cmp_rss_hash; +}; + +#define RX_CMP_HASH_VALID(rxcmp) \ + ((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID)) + +#define RX_CMP_HASH_TYPE(rxcmp) \ + ((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\ + RX_CMP_RSS_HASH_TYPE_SHIFT) + +struct rx_cmp_ext { + __le32 rx_cmp_flags2; + #define RX_CMP_FLAGS2_IP_CS_CALC 0x1 + #define RX_CMP_FLAGS2_L4_CS_CALC (0x1 << 1) + #define RX_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2) + #define RX_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3) + #define RX_CMP_FLAGS2_META_FORMAT_VLAN (0x1 << 4) + __le32 rx_cmp_meta_data; + #define RX_CMP_FLAGS2_METADATA_VID_MASK 0xfff + #define RX_CMP_FLAGS2_METADATA_TPID_MASK 0xffff0000 + #define RX_CMP_FLAGS2_METADATA_TPID_SFT 16 + __le32 rx_cmp_cfa_code_errors_v2; + #define RX_CMP_V (1 << 0) + #define RX_CMPL_ERRORS_MASK (0x7fff << 1) + #define RX_CMPL_ERRORS_SFT 1 + #define RX_CMPL_ERRORS_BUFFER_ERROR_MASK (0x7 << 1) + #define RX_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1) + #define RX_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1 << 1) + #define RX_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (0x2 << 1) + #define RX_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1) + #define RX_CMPL_ERRORS_IP_CS_ERROR (0x1 << 4) + #define RX_CMPL_ERRORS_L4_CS_ERROR (0x1 << 5) + #define RX_CMPL_ERRORS_T_IP_CS_ERROR (0x1 << 6) + #define RX_CMPL_ERRORS_T_L4_CS_ERROR (0x1 << 7) + #define RX_CMPL_ERRORS_CRC_ERROR (0x1 << 8) + #define RX_CMPL_ERRORS_T_PKT_ERROR_MASK (0x7 << 9) + #define RX_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR (0x0 << 9) + #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (0x1 << 9) + #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (0x2 << 9) + #define RX_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR (0x3 << 9) + #define RX_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (0x4 << 9) + #define RX_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (0x5 << 9) + #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (0x6 << 9) + #define RX_CMPL_ERRORS_PKT_ERROR_MASK (0xf << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_NO_ERROR (0x0 << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION (0x1 << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (0x2 << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL (0x3 << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (0x4 << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (0x5 << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (0x6 << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7 << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN (0x8 << 12) + + #define RX_CMPL_CFA_CODE_MASK (0xffff << 16) + #define RX_CMPL_CFA_CODE_SFT 16 + + __le32 rx_cmp_unused3; +}; + +#define RX_CMP_L2_ERRORS \ + cpu_to_le32(RX_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_CMPL_ERRORS_CRC_ERROR) + +#define RX_CMP_L4_CS_BITS \ + (cpu_to_le32(RX_CMP_FLAGS2_L4_CS_CALC | RX_CMP_FLAGS2_T_L4_CS_CALC)) + +#define RX_CMP_L4_CS_ERR_BITS \ + (cpu_to_le32(RX_CMPL_ERRORS_L4_CS_ERROR | RX_CMPL_ERRORS_T_L4_CS_ERROR)) + +#define RX_CMP_L4_CS_OK(rxcmp1) \ + (((rxcmp1)->rx_cmp_flags2 & RX_CMP_L4_CS_BITS) && \ + !((rxcmp1)->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS)) + +#define RX_CMP_ENCAP(rxcmp1) \ + ((le32_to_cpu((rxcmp1)->rx_cmp_flags2) & \ + RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3) + +struct rx_agg_cmp { + __le32 rx_agg_cmp_len_flags_type; + #define RX_AGG_CMP_TYPE (0x3f << 0) + #define RX_AGG_CMP_LEN (0xffff << 16) + #define RX_AGG_CMP_LEN_SHIFT 16 + u32 rx_agg_cmp_opaque; + __le32 rx_agg_cmp_v; + #define RX_AGG_CMP_V (1 << 0) + __le32 rx_agg_cmp_unused; +}; + +struct rx_tpa_start_cmp { + __le32 rx_tpa_start_cmp_len_flags_type; + #define RX_TPA_START_CMP_TYPE (0x3f << 0) + #define RX_TPA_START_CMP_FLAGS (0x3ff << 6) + #define RX_TPA_START_CMP_FLAGS_SHIFT 6 + #define RX_TPA_START_CMP_FLAGS_PLACEMENT (0x7 << 7) + #define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT 7 + #define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO (0x1 << 7) + #define RX_TPA_START_CMP_FLAGS_PLACEMENT_HDS (0x2 << 7) + #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7) + #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7) + #define RX_TPA_START_CMP_FLAGS_RSS_VALID (0x1 << 10) + #define RX_TPA_START_CMP_FLAGS_ITYPES (0xf << 12) + #define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT 12 + #define RX_TPA_START_CMP_FLAGS_ITYPE_TCP (0x2 << 12) + #define RX_TPA_START_CMP_LEN (0xffff << 16) + #define RX_TPA_START_CMP_LEN_SHIFT 16 + + u32 rx_tpa_start_cmp_opaque; + __le32 rx_tpa_start_cmp_misc_v1; + #define RX_TPA_START_CMP_V1 (0x1 << 0) + #define RX_TPA_START_CMP_RSS_HASH_TYPE (0x7f << 9) + #define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT 9 + #define RX_TPA_START_CMP_AGG_ID (0x7f << 25) + #define RX_TPA_START_CMP_AGG_ID_SHIFT 25 + + __le32 rx_tpa_start_cmp_rss_hash; +}; + +#define TPA_START_HASH_VALID(rx_tpa_start) \ + ((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \ + cpu_to_le32(RX_TPA_START_CMP_FLAGS_RSS_VALID)) + +#define TPA_START_HASH_TYPE(rx_tpa_start) \ + ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \ + RX_TPA_START_CMP_RSS_HASH_TYPE) >> \ + RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT) + +#define TPA_START_AGG_ID(rx_tpa_start) \ + ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \ + RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT) + +struct rx_tpa_start_cmp_ext { + __le32 rx_tpa_start_cmp_flags2; + #define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC (0x1 << 0) + #define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC (0x1 << 1) + #define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2) + #define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3) + + __le32 rx_tpa_start_cmp_metadata; + __le32 rx_tpa_start_cmp_cfa_code_v2; + #define RX_TPA_START_CMP_V2 (0x1 << 0) + #define RX_TPA_START_CMP_CFA_CODE (0xffff << 16) + #define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16 + __le32 rx_tpa_start_cmp_unused5; +}; + +struct rx_tpa_end_cmp { + __le32 rx_tpa_end_cmp_len_flags_type; + #define RX_TPA_END_CMP_TYPE (0x3f << 0) + #define RX_TPA_END_CMP_FLAGS (0x3ff << 6) + #define RX_TPA_END_CMP_FLAGS_SHIFT 6 + #define RX_TPA_END_CMP_FLAGS_PLACEMENT (0x7 << 7) + #define RX_TPA_END_CMP_FLAGS_PLACEMENT_SHIFT 7 + #define RX_TPA_END_CMP_FLAGS_PLACEMENT_JUMBO (0x1 << 7) + #define RX_TPA_END_CMP_FLAGS_PLACEMENT_HDS (0x2 << 7) + #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7) + #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7) + #define RX_TPA_END_CMP_FLAGS_RSS_VALID (0x1 << 10) + #define RX_TPA_END_CMP_FLAGS_ITYPES (0xf << 12) + #define RX_TPA_END_CMP_FLAGS_ITYPES_SHIFT 12 + #define RX_TPA_END_CMP_FLAGS_ITYPE_TCP (0x2 << 12) + #define RX_TPA_END_CMP_LEN (0xffff << 16) + #define RX_TPA_END_CMP_LEN_SHIFT 16 + + u32 rx_tpa_end_cmp_opaque; + __le32 rx_tpa_end_cmp_misc_v1; + #define RX_TPA_END_CMP_V1 (0x1 << 0) + #define RX_TPA_END_CMP_AGG_BUFS (0x3f << 1) + #define RX_TPA_END_CMP_AGG_BUFS_SHIFT 1 + #define RX_TPA_END_CMP_TPA_SEGS (0xff << 8) + #define RX_TPA_END_CMP_TPA_SEGS_SHIFT 8 + #define RX_TPA_END_CMP_PAYLOAD_OFFSET (0xff << 16) + #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16 + #define RX_TPA_END_CMP_AGG_ID (0x7f << 25) + #define RX_TPA_END_CMP_AGG_ID_SHIFT 25 + + __le32 rx_tpa_end_cmp_tsdelta; + #define RX_TPA_END_GRO_TS (0x1 << 31) +}; + +#define TPA_END_AGG_ID(rx_tpa_end) \ + ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ + RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT) + +#define TPA_END_TPA_SEGS(rx_tpa_end) \ + ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ + RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT) + +#define RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO \ + cpu_to_le32(RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO & \ + RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS) + +#define TPA_END_GRO(rx_tpa_end) \ + ((rx_tpa_end)->rx_tpa_end_cmp_len_flags_type & \ + RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO) + +#define TPA_END_GRO_TS(rx_tpa_end) \ + ((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & cpu_to_le32(RX_TPA_END_GRO_TS)) + +struct rx_tpa_end_cmp_ext { + __le32 rx_tpa_end_cmp_dup_acks; + #define RX_TPA_END_CMP_TPA_DUP_ACKS (0xf << 0) + + __le32 rx_tpa_end_cmp_seg_len; + #define RX_TPA_END_CMP_TPA_SEG_LEN (0xffff << 0) + + __le32 rx_tpa_end_cmp_errors_v2; + #define RX_TPA_END_CMP_V2 (0x1 << 0) + #define RX_TPA_END_CMP_ERRORS (0x7fff << 1) + #define RX_TPA_END_CMPL_ERRORS_SHIFT 1 + + u32 rx_tpa_end_cmp_start_opaque; +}; + +#define DB_IDX_MASK 0xffffff +#define DB_IDX_VALID (0x1 << 26) +#define DB_IRQ_DIS (0x1 << 27) +#define DB_KEY_TX (0x0 << 28) +#define DB_KEY_RX (0x1 << 28) +#define DB_KEY_CP (0x2 << 28) +#define DB_KEY_ST (0x3 << 28) +#define DB_KEY_TX_PUSH (0x4 << 28) +#define DB_LONG_TX_PUSH (0x2 << 24) + +#define INVALID_HW_RING_ID ((u16)-1) + +#define BNXT_RSS_HASH_TYPE_FLAG_IPV4 0x01 +#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 0x02 +#define BNXT_RSS_HASH_TYPE_FLAG_IPV6 0x04 +#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6 0x08 + +/* The hardware supports certain page sizes. Use the supported page sizes + * to allocate the rings. + */ +#if (PAGE_SHIFT < 12) +#define BNXT_PAGE_SHIFT 12 +#elif (PAGE_SHIFT <= 13) +#define BNXT_PAGE_SHIFT PAGE_SHIFT +#elif (PAGE_SHIFT < 16) +#define BNXT_PAGE_SHIFT 13 +#else +#define BNXT_PAGE_SHIFT 16 +#endif + +#define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT) + +#define BNXT_MIN_PKT_SIZE 45 + +#define BNXT_NUM_TESTS(bp) 0 + +#define BNXT_DEFAULT_RX_RING_SIZE 1023 +#define BNXT_DEFAULT_TX_RING_SIZE 512 + +#define MAX_TPA 64 + +#define MAX_RX_PAGES 8 +#define MAX_RX_AGG_PAGES 32 +#define MAX_TX_PAGES 8 +#define MAX_CP_PAGES 64 + +#define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd)) +#define TX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_bd)) +#define CP_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_cmp)) + +#define SW_RXBD_RING_SIZE (sizeof(struct bnxt_sw_rx_bd) * RX_DESC_CNT) +#define HW_RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT) + +#define SW_RXBD_AGG_RING_SIZE (sizeof(struct bnxt_sw_rx_agg_bd) * RX_DESC_CNT) + +#define SW_TXBD_RING_SIZE (sizeof(struct bnxt_sw_tx_bd) * TX_DESC_CNT) +#define HW_TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT) + +#define HW_CMPD_RING_SIZE (sizeof(struct tx_cmp) * CP_DESC_CNT) + +#define BNXT_MAX_RX_DESC_CNT (RX_DESC_CNT * MAX_RX_PAGES - 1) +#define BNXT_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1) +#define BNXT_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1) + +#define RX_RING(x) (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4)) +#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1)) + +#define TX_RING(x) (((x) & ~(TX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4)) +#define TX_IDX(x) ((x) & (TX_DESC_CNT - 1)) + +#define CP_RING(x) (((x) & ~(CP_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4)) +#define CP_IDX(x) ((x) & (CP_DESC_CNT - 1)) + +#define TX_CMP_VALID(txcmp, raw_cons) \ + (!!((txcmp)->tx_cmp_errors_v & cpu_to_le32(TX_CMP_V)) == \ + !((raw_cons) & bp->cp_bit)) + +#define RX_CMP_VALID(rxcmp1, raw_cons) \ + (!!((rxcmp1)->rx_cmp_cfa_code_errors_v2 & cpu_to_le32(RX_CMP_V)) ==\ + !((raw_cons) & bp->cp_bit)) + +#define RX_AGG_CMP_VALID(agg, raw_cons) \ + (!!((agg)->rx_agg_cmp_v & cpu_to_le32(RX_AGG_CMP_V)) == \ + !((raw_cons) & bp->cp_bit)) + +#define TX_CMP_TYPE(txcmp) \ + (le32_to_cpu((txcmp)->tx_cmp_flags_type) & CMP_TYPE) + +#define RX_CMP_TYPE(rxcmp) \ + (le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_CMP_TYPE) + +#define NEXT_RX(idx) (((idx) + 1) & bp->rx_ring_mask) + +#define NEXT_RX_AGG(idx) (((idx) + 1) & bp->rx_agg_ring_mask) + +#define NEXT_TX(idx) (((idx) + 1) & bp->tx_ring_mask) + +#define ADV_RAW_CMP(idx, n) ((idx) + (n)) +#define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1) +#define RING_CMP(idx) ((idx) & bp->cp_ring_mask) +#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1)) + +#define HWRM_CMD_TIMEOUT 500 +#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) +#define HWRM_RESP_ERR_CODE_MASK 0xffff +#define HWRM_RESP_LEN_MASK 0xffff0000 +#define HWRM_RESP_LEN_SFT 16 +#define HWRM_RESP_VALID_MASK 0xff000000 +#define BNXT_HWRM_REQ_MAX_SIZE 128 +#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ + BNXT_HWRM_REQ_MAX_SIZE) + +struct bnxt_sw_tx_bd { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(mapping); + u8 is_gso; + u8 is_push; + unsigned short nr_frags; +}; + +struct bnxt_sw_rx_bd { + u8 *data; + DEFINE_DMA_UNMAP_ADDR(mapping); +}; + +struct bnxt_sw_rx_agg_bd { + struct page *page; + dma_addr_t mapping; +}; + +struct bnxt_ring_struct { + int nr_pages; + int page_size; + void **pg_arr; + dma_addr_t *dma_arr; + + __le64 *pg_tbl; + dma_addr_t pg_tbl_map; + + int vmem_size; + void **vmem; + + u16 fw_ring_id; /* Ring id filled by Chimp FW */ + u8 queue_id; +}; + +struct tx_push_bd { + __le32 doorbell; + struct tx_bd txbd1; + struct tx_bd_ext txbd2; +}; + +struct bnxt_tx_ring_info { + u16 tx_prod; + u16 tx_cons; + void __iomem *tx_doorbell; + + struct tx_bd *tx_desc_ring[MAX_TX_PAGES]; + struct bnxt_sw_tx_bd *tx_buf_ring; + + dma_addr_t tx_desc_mapping[MAX_TX_PAGES]; + + struct tx_push_bd *tx_push; + dma_addr_t tx_push_mapping; + +#define BNXT_DEV_STATE_CLOSING 0x1 + u32 dev_state; + + struct bnxt_ring_struct tx_ring_struct; +}; + +struct bnxt_tpa_info { + u8 *data; + dma_addr_t mapping; + u16 len; + unsigned short gso_type; + u32 flags2; + u32 metadata; + enum pkt_hash_types hash_type; + u32 rss_hash; +}; + +struct bnxt_rx_ring_info { + u16 rx_prod; + u16 rx_agg_prod; + u16 rx_sw_agg_prod; + void __iomem *rx_doorbell; + void __iomem *rx_agg_doorbell; + + struct rx_bd *rx_desc_ring[MAX_RX_PAGES]; + struct bnxt_sw_rx_bd *rx_buf_ring; + + struct rx_bd *rx_agg_desc_ring[MAX_RX_AGG_PAGES]; + struct bnxt_sw_rx_agg_bd *rx_agg_ring; + + unsigned long *rx_agg_bmap; + u16 rx_agg_bmap_size; + + dma_addr_t rx_desc_mapping[MAX_RX_PAGES]; + dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES]; + + struct bnxt_tpa_info *rx_tpa; + + struct bnxt_ring_struct rx_ring_struct; + struct bnxt_ring_struct rx_agg_ring_struct; +}; + +struct bnxt_cp_ring_info { + u32 cp_raw_cons; + void __iomem *cp_doorbell; + + struct tx_cmp *cp_desc_ring[MAX_CP_PAGES]; + + dma_addr_t cp_desc_mapping[MAX_CP_PAGES]; + + struct ctx_hw_stats *hw_stats; + dma_addr_t hw_stats_map; + u32 hw_stats_ctx_id; + u64 rx_l4_csum_errors; + + struct bnxt_ring_struct cp_ring_struct; +}; + +struct bnxt_napi { + struct napi_struct napi; + struct bnxt *bp; + + int index; + struct bnxt_cp_ring_info cp_ring; + struct bnxt_rx_ring_info rx_ring; + struct bnxt_tx_ring_info tx_ring; + +#ifdef CONFIG_NET_RX_BUSY_POLL + atomic_t poll_state; +#endif +}; + +#ifdef CONFIG_NET_RX_BUSY_POLL +enum bnxt_poll_state_t { + BNXT_STATE_IDLE = 0, + BNXT_STATE_NAPI, + BNXT_STATE_POLL, + BNXT_STATE_DISABLE, +}; +#endif + +struct bnxt_irq { + irq_handler_t handler; + unsigned int vector; + u8 requested; + char name[IFNAMSIZ + 2]; +}; + +#define HWRM_RING_ALLOC_TX 0x1 +#define HWRM_RING_ALLOC_RX 0x2 +#define HWRM_RING_ALLOC_AGG 0x4 +#define HWRM_RING_ALLOC_CMPL 0x8 + +#define INVALID_STATS_CTX_ID -1 + +struct hwrm_cmd_req_hdr { +#define HWRM_CMPL_RING_MASK 0xffff0000 +#define HWRM_CMPL_RING_SFT 16 + __le32 cmpl_ring_req_type; +#define HWRM_SEQ_ID_MASK 0xffff +#define HWRM_SEQ_ID_INVALID -1 +#define HWRM_RESP_LEN_OFFSET 4 +#define HWRM_TARGET_FID_MASK 0xffff0000 +#define HWRM_TARGET_FID_SFT 16 + __le32 target_id_seq_id; + __le64 resp_addr; +}; + +struct bnxt_ring_grp_info { + u16 fw_stats_ctx; + u16 fw_grp_id; + u16 rx_fw_ring_id; + u16 agg_fw_ring_id; + u16 cp_fw_ring_id; +}; + +struct bnxt_vnic_info { + u16 fw_vnic_id; /* returned by Chimp during alloc */ + u16 fw_rss_cos_lb_ctx; + u16 fw_l2_ctx_id; +#define BNXT_MAX_UC_ADDRS 4 + __le64 fw_l2_filter_id[BNXT_MAX_UC_ADDRS]; + /* index 0 always dev_addr */ + u16 uc_filter_count; + u8 *uc_list; + + u16 *fw_grp_ids; + u16 hash_type; + dma_addr_t rss_table_dma_addr; + __le16 *rss_table; + dma_addr_t rss_hash_key_dma_addr; + u64 *rss_hash_key; + u32 rx_mask; + + u8 *mc_list; + int mc_list_size; + int mc_list_count; + dma_addr_t mc_list_mapping; +#define BNXT_MAX_MC_ADDRS 16 + + u32 flags; +#define BNXT_VNIC_RSS_FLAG 1 +#define BNXT_VNIC_RFS_FLAG 2 +#define BNXT_VNIC_MCAST_FLAG 4 +#define BNXT_VNIC_UCAST_FLAG 8 +}; + +#if defined(CONFIG_BNXT_SRIOV) +struct bnxt_vf_info { + u16 fw_fid; + u8 mac_addr[ETH_ALEN]; + u16 max_rsscos_ctxs; + u16 max_cp_rings; + u16 max_tx_rings; + u16 max_rx_rings; + u16 max_l2_ctxs; + u16 max_irqs; + u16 max_vnics; + u16 max_stat_ctxs; + u16 vlan; + u32 flags; +#define BNXT_VF_QOS 0x1 +#define BNXT_VF_SPOOFCHK 0x2 +#define BNXT_VF_LINK_FORCED 0x4 +#define BNXT_VF_LINK_UP 0x8 + u32 func_flags; /* func cfg flags */ + u32 min_tx_rate; + u32 max_tx_rate; + void *hwrm_cmd_req_addr; + dma_addr_t hwrm_cmd_req_dma_addr; +}; +#endif + +struct bnxt_pf_info { +#define BNXT_FIRST_PF_FID 1 +#define BNXT_FIRST_VF_FID 128 + u32 fw_fid; + u8 port_id; + u8 mac_addr[ETH_ALEN]; + u16 max_rsscos_ctxs; + u16 max_cp_rings; + u16 max_tx_rings; /* HW assigned max tx rings for this PF */ + u16 max_pf_tx_rings; /* runtime max tx rings owned by PF */ + u16 max_rx_rings; /* HW assigned max rx rings for this PF */ + u16 max_pf_rx_rings; /* runtime max rx rings owned by PF */ + u16 max_irqs; + u16 max_l2_ctxs; + u16 max_vnics; + u16 max_stat_ctxs; + u32 first_vf_id; + u16 active_vfs; + u16 max_vfs; + u32 max_encap_records; + u32 max_decap_records; + u32 max_tx_em_flows; + u32 max_tx_wm_flows; + u32 max_rx_em_flows; + u32 max_rx_wm_flows; + unsigned long *vf_event_bmap; + u16 hwrm_cmd_req_pages; + void *hwrm_cmd_req_addr[4]; + dma_addr_t hwrm_cmd_req_dma_addr[4]; + struct bnxt_vf_info *vf; +}; + +struct bnxt_ntuple_filter { + struct hlist_node hash; + u8 src_mac_addr[ETH_ALEN]; + struct flow_keys fkeys; + __le64 filter_id; + u16 sw_id; + u16 rxq; + u32 flow_id; + unsigned long state; +#define BNXT_FLTR_VALID 0 +#define BNXT_FLTR_UPDATE 1 +}; + +#define BNXT_ALL_COPPER_ETHTOOL_SPEED \ + (ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full | \ + ADVERTISED_10000baseT_Full) + +struct bnxt_link_info { + u8 media_type; + u8 transceiver; + u8 phy_addr; + u8 phy_link_status; +#define BNXT_LINK_NO_LINK PORT_PHY_QCFG_RESP_LINK_NO_LINK +#define BNXT_LINK_SIGNAL PORT_PHY_QCFG_RESP_LINK_SIGNAL +#define BNXT_LINK_LINK PORT_PHY_QCFG_RESP_LINK_LINK + u8 wire_speed; + u8 loop_back; + u8 link_up; + u8 duplex; +#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_HALF +#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_FULL + u8 pause; +#define BNXT_LINK_PAUSE_TX PORT_PHY_QCFG_RESP_PAUSE_TX +#define BNXT_LINK_PAUSE_RX PORT_PHY_QCFG_RESP_PAUSE_RX +#define BNXT_LINK_PAUSE_BOTH (PORT_PHY_QCFG_RESP_PAUSE_RX | \ + PORT_PHY_QCFG_RESP_PAUSE_TX) + u8 auto_pause_setting; + u8 force_pause_setting; + u8 duplex_setting; + u8 auto_mode; +#define BNXT_AUTO_MODE(mode) ((mode) > BNXT_LINK_AUTO_NONE && \ + (mode) <= BNXT_LINK_AUTO_MSK) +#define BNXT_LINK_AUTO_NONE PORT_PHY_QCFG_RESP_AUTO_MODE_NONE +#define BNXT_LINK_AUTO_ALLSPDS PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS +#define BNXT_LINK_AUTO_ONESPD PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED +#define BNXT_LINK_AUTO_ONEORBELOW PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW +#define BNXT_LINK_AUTO_MSK PORT_PHY_QCFG_RESP_AUTO_MODE_MASK +#define PHY_VER_LEN 3 + u8 phy_ver[PHY_VER_LEN]; + u16 link_speed; +#define BNXT_LINK_SPEED_100MB PORT_PHY_QCFG_RESP_LINK_SPEED_100MB +#define BNXT_LINK_SPEED_1GB PORT_PHY_QCFG_RESP_LINK_SPEED_1GB +#define BNXT_LINK_SPEED_2GB PORT_PHY_QCFG_RESP_LINK_SPEED_2GB +#define BNXT_LINK_SPEED_2_5GB PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB +#define BNXT_LINK_SPEED_10GB PORT_PHY_QCFG_RESP_LINK_SPEED_10GB +#define BNXT_LINK_SPEED_20GB PORT_PHY_QCFG_RESP_LINK_SPEED_20GB +#define BNXT_LINK_SPEED_25GB PORT_PHY_QCFG_RESP_LINK_SPEED_25GB +#define BNXT_LINK_SPEED_40GB PORT_PHY_QCFG_RESP_LINK_SPEED_40GB +#define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB + u16 support_speeds; + u16 auto_link_speeds; +#define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB +#define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB +#define BNXT_LINK_SPEED_MSK_2GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB +#define BNXT_LINK_SPEED_MSK_10GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB +#define BNXT_LINK_SPEED_MSK_2_5GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB +#define BNXT_LINK_SPEED_MSK_20GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB +#define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB +#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB +#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB + u16 auto_link_speed; + u16 force_link_speed; + u32 preemphasis; + + /* copy of requested setting from ethtool cmd */ + u8 autoneg; +#define BNXT_AUTONEG_SPEED 1 +#define BNXT_AUTONEG_FLOW_CTRL 2 + u8 req_duplex; + u8 req_flow_ctrl; + u16 req_link_speed; + u32 advertising; + bool force_link_chng; + /* a copy of phy_qcfg output used to report link + * info to VF + */ + struct hwrm_port_phy_qcfg_output phy_qcfg_resp; +}; + +#define BNXT_MAX_QUEUE 8 + +struct bnxt_queue_info { + u8 queue_id; + u8 queue_profile; +}; + +struct bnxt { + void __iomem *bar0; + void __iomem *bar1; + void __iomem *bar2; + + u32 reg_base; + + struct net_device *dev; + struct pci_dev *pdev; + + atomic_t intr_sem; + + u32 flags; + #define BNXT_FLAG_DCB_ENABLED 0x1 + #define BNXT_FLAG_VF 0x2 + #define BNXT_FLAG_LRO 0x4 +#ifdef CONFIG_INET + #define BNXT_FLAG_GRO 0x8 +#else + /* Cannot support hardware GRO if CONFIG_INET is not set */ + #define BNXT_FLAG_GRO 0x0 +#endif + #define BNXT_FLAG_TPA (BNXT_FLAG_LRO | BNXT_FLAG_GRO) + #define BNXT_FLAG_JUMBO 0x10 + #define BNXT_FLAG_STRIP_VLAN 0x20 + #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \ + BNXT_FLAG_LRO) + #define BNXT_FLAG_USING_MSIX 0x40 + #define BNXT_FLAG_MSIX_CAP 0x80 + #define BNXT_FLAG_RFS 0x100 + #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ + BNXT_FLAG_RFS | \ + BNXT_FLAG_STRIP_VLAN) + +#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF)) +#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF) + + struct bnxt_napi **bnapi; + + u32 rx_buf_size; + u32 rx_buf_use_size; /* useable size */ + u32 rx_ring_size; + u32 rx_agg_ring_size; + u32 rx_copy_thresh; + u32 rx_ring_mask; + u32 rx_agg_ring_mask; + int rx_nr_pages; + int rx_agg_nr_pages; + int rx_nr_rings; + int rsscos_nr_ctxs; + + u32 tx_ring_size; + u32 tx_ring_mask; + int tx_nr_pages; + int tx_nr_rings; + int tx_nr_rings_per_tc; + + int tx_wake_thresh; + int tx_push_thresh; + int tx_push_size; + + u32 cp_ring_size; + u32 cp_ring_mask; + u32 cp_bit; + int cp_nr_pages; + int cp_nr_rings; + + int num_stat_ctxs; + struct bnxt_ring_grp_info *grp_info; + struct bnxt_vnic_info *vnic_info; + int nr_vnics; + + u8 max_tc; + struct bnxt_queue_info q_info[BNXT_MAX_QUEUE]; + + unsigned int current_interval; +#define BNXT_TIMER_INTERVAL (HZ / 2) + + struct timer_list timer; + + int state; +#define BNXT_STATE_CLOSED 0 +#define BNXT_STATE_OPEN 1 + + struct bnxt_irq *irq_tbl; + u8 mac_addr[ETH_ALEN]; + + u32 msg_enable; + + u16 hwrm_cmd_seq; + u32 hwrm_intr_seq_id; + void *hwrm_cmd_resp_addr; + dma_addr_t hwrm_cmd_resp_dma_addr; + void *hwrm_dbg_resp_addr; + dma_addr_t hwrm_dbg_resp_dma_addr; +#define HWRM_DBG_REG_BUF_SIZE 128 + struct mutex hwrm_cmd_lock; /* serialize hwrm messages */ + struct hwrm_ver_get_output ver_resp; +#define FW_VER_STR_LEN 32 +#define BC_HWRM_STR_LEN 21 +#define PHY_VER_STR_LEN (FW_VER_STR_LEN - BC_HWRM_STR_LEN) + char fw_ver_str[FW_VER_STR_LEN]; + __be16 vxlan_port; + u8 vxlan_port_cnt; + __le16 vxlan_fw_dst_port_id; + u8 nge_port_cnt; + __le16 nge_fw_dst_port_id; + u16 coal_ticks; + u16 coal_ticks_irq; + u16 coal_bufs; + u16 coal_bufs_irq; + +#define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2) +#define BNXT_COAL_TIMER_TO_USEC(x) ((x) * 2 / 25) + + struct work_struct sp_task; + unsigned long sp_event; +#define BNXT_RX_MASK_SP_EVENT 0 +#define BNXT_RX_NTP_FLTR_SP_EVENT 1 +#define BNXT_LINK_CHNG_SP_EVENT 2 +#define BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT 4 +#define BNXT_VXLAN_ADD_PORT_SP_EVENT 8 +#define BNXT_VXLAN_DEL_PORT_SP_EVENT 16 +#define BNXT_RESET_TASK_SP_EVENT 32 +#define BNXT_RST_RING_SP_EVENT 64 + + struct bnxt_pf_info pf; +#ifdef CONFIG_BNXT_SRIOV + int nr_vfs; + struct bnxt_vf_info vf; + wait_queue_head_t sriov_cfg_wait; + bool sriov_cfg; +#define BNXT_SRIOV_CFG_WAIT_TMO msecs_to_jiffies(10000) +#endif + +#define BNXT_NTP_FLTR_MAX_FLTR 4096 +#define BNXT_NTP_FLTR_HASH_SIZE 512 +#define BNXT_NTP_FLTR_HASH_MASK (BNXT_NTP_FLTR_HASH_SIZE - 1) + struct hlist_head ntp_fltr_hash_tbl[BNXT_NTP_FLTR_HASH_SIZE]; + spinlock_t ntp_fltr_lock; /* for hash table add, del */ + + unsigned long *ntp_fltr_bmap; + int ntp_fltr_count; + + struct bnxt_link_info link_info; +}; + +#ifdef CONFIG_NET_RX_BUSY_POLL +static inline void bnxt_enable_poll(struct bnxt_napi *bnapi) +{ + atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE); +} + +/* called from the NAPI poll routine to get ownership of a bnapi */ +static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi) +{ + int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE, + BNXT_STATE_NAPI); + + return rc == BNXT_STATE_IDLE; +} + +static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi) +{ + atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE); +} + +/* called from the busy poll routine to get ownership of a bnapi */ +static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi) +{ + int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE, + BNXT_STATE_POLL); + + return rc == BNXT_STATE_IDLE; +} + +static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi) +{ + atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE); +} + +static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi) +{ + return atomic_read(&bnapi->poll_state) == BNXT_STATE_POLL; +} + +static inline void bnxt_disable_poll(struct bnxt_napi *bnapi) +{ + int old; + + while (1) { + old = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE, + BNXT_STATE_DISABLE); + if (old == BNXT_STATE_IDLE) + break; + usleep_range(500, 5000); + } +} + +#else + +static inline void bnxt_enable_poll(struct bnxt_napi *bnapi) +{ +} + +static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi) +{ + return true; +} + +static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi) +{ +} + +static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi) +{ + return false; +} + +static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi) +{ +} + +static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi) +{ + return false; +} + +static inline void bnxt_disable_poll(struct bnxt_napi *bnapi) +{ +} + +#endif + +void bnxt_set_ring_params(struct bnxt *); +void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); +int _hwrm_send_message(struct bnxt *, void *, u32, int); +int hwrm_send_message(struct bnxt *, void *, u32, int); +int bnxt_hwrm_set_coal(struct bnxt *); +int bnxt_hwrm_set_pause(struct bnxt *); +int bnxt_hwrm_set_link_setting(struct bnxt *, bool); +int bnxt_open_nic(struct bnxt *, bool, bool); +int bnxt_close_nic(struct bnxt *, bool, bool); +void bnxt_get_max_rings(struct bnxt *, int *, int *); +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c new file mode 100644 index 000000000000..45bd628eaf3a --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -0,0 +1,1149 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include <linux/ethtool.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/etherdevice.h> +#include <linux/crc32.h> +#include <linux/firmware.h> +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_ethtool.h" +#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */ +#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */ +#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100) + +static u32 bnxt_get_msglevel(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + + return bp->msg_enable; +} + +static void bnxt_set_msglevel(struct net_device *dev, u32 value) +{ + struct bnxt *bp = netdev_priv(dev); + + bp->msg_enable = value; +} + +static int bnxt_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct bnxt *bp = netdev_priv(dev); + + memset(coal, 0, sizeof(*coal)); + + coal->rx_coalesce_usecs = + max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks), 1); + coal->rx_max_coalesced_frames = bp->coal_bufs / 2; + coal->rx_coalesce_usecs_irq = + max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks_irq), 1); + coal->rx_max_coalesced_frames_irq = bp->coal_bufs_irq / 2; + + return 0; +} + +static int bnxt_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct bnxt *bp = netdev_priv(dev); + int rc = 0; + + bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs); + bp->coal_bufs = coal->rx_max_coalesced_frames * 2; + bp->coal_ticks_irq = + BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs_irq); + bp->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2; + + if (netif_running(dev)) + rc = bnxt_hwrm_set_coal(bp); + + return rc; +} + +#define BNXT_NUM_STATS 21 + +static int bnxt_get_sset_count(struct net_device *dev, int sset) +{ + struct bnxt *bp = netdev_priv(dev); + + switch (sset) { + case ETH_SS_STATS: + return BNXT_NUM_STATS * bp->cp_nr_rings; + default: + return -EOPNOTSUPP; + } +} + +static void bnxt_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *buf) +{ + u32 i, j = 0; + struct bnxt *bp = netdev_priv(dev); + u32 buf_size = sizeof(struct ctx_hw_stats) * bp->cp_nr_rings; + u32 stat_fields = sizeof(struct ctx_hw_stats) / 8; + + memset(buf, 0, buf_size); + + if (!bp->bnapi) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + __le64 *hw_stats = (__le64 *)cpr->hw_stats; + int k; + + for (k = 0; k < stat_fields; j++, k++) + buf[j] = le64_to_cpu(hw_stats[k]); + buf[j++] = cpr->rx_l4_csum_errors; + } +} + +static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + struct bnxt *bp = netdev_priv(dev); + u32 i; + + switch (stringset) { + /* The number of strings must match BNXT_NUM_STATS defined above. */ + case ETH_SS_STATS: + for (i = 0; i < bp->cp_nr_rings; i++) { + sprintf(buf, "[%d]: rx_ucast_packets", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: rx_mcast_packets", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: rx_bcast_packets", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: rx_discards", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: rx_drops", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: rx_ucast_bytes", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: rx_mcast_bytes", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: rx_bcast_bytes", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tx_ucast_packets", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tx_mcast_packets", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tx_bcast_packets", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tx_discards", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tx_drops", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tx_ucast_bytes", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tx_mcast_bytes", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tx_bcast_bytes", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tpa_packets", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tpa_bytes", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tpa_events", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: tpa_aborts", i); + buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: rx_l4_csum_errors", i); + buf += ETH_GSTRING_LEN; + } + break; + default: + netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n", + stringset); + break; + } +} + +static void bnxt_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct bnxt *bp = netdev_priv(dev); + + ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT; + ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT; + ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT; + + ering->rx_pending = bp->rx_ring_size; + ering->rx_jumbo_pending = bp->rx_agg_ring_size; + ering->tx_pending = bp->tx_ring_size; +} + +static int bnxt_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct bnxt *bp = netdev_priv(dev); + + if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) || + (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) || + (ering->tx_pending <= MAX_SKB_FRAGS)) + return -EINVAL; + + if (netif_running(dev)) + bnxt_close_nic(bp, false, false); + + bp->rx_ring_size = ering->rx_pending; + bp->tx_ring_size = ering->tx_pending; + bnxt_set_ring_params(bp); + + if (netif_running(dev)) + return bnxt_open_nic(bp, false, false); + + return 0; +} + +static void bnxt_get_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct bnxt *bp = netdev_priv(dev); + int max_rx_rings, max_tx_rings, tcs; + + bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); + tcs = netdev_get_num_tc(dev); + if (tcs > 1) + max_tx_rings /= tcs; + + channel->max_rx = max_rx_rings; + channel->max_tx = max_tx_rings; + channel->max_other = 0; + channel->max_combined = 0; + channel->rx_count = bp->rx_nr_rings; + channel->tx_count = bp->tx_nr_rings_per_tc; +} + +static int bnxt_set_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct bnxt *bp = netdev_priv(dev); + int max_rx_rings, max_tx_rings, tcs; + u32 rc = 0; + + if (channel->other_count || channel->combined_count || + !channel->rx_count || !channel->tx_count) + return -EINVAL; + + bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); + tcs = netdev_get_num_tc(dev); + if (tcs > 1) + max_tx_rings /= tcs; + + if (channel->rx_count > max_rx_rings || + channel->tx_count > max_tx_rings) + return -EINVAL; + + if (netif_running(dev)) { + if (BNXT_PF(bp)) { + /* TODO CHIMP_FW: Send message to all VF's + * before PF unload + */ + } + rc = bnxt_close_nic(bp, true, false); + if (rc) { + netdev_err(bp->dev, "Set channel failure rc :%x\n", + rc); + return rc; + } + } + + bp->rx_nr_rings = channel->rx_count; + bp->tx_nr_rings_per_tc = channel->tx_count; + bp->tx_nr_rings = bp->tx_nr_rings_per_tc; + if (tcs > 1) + bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs; + bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); + bp->num_stat_ctxs = bp->cp_nr_rings; + + if (netif_running(dev)) { + rc = bnxt_open_nic(bp, true, false); + if ((!rc) && BNXT_PF(bp)) { + /* TODO CHIMP_FW: Send message to all VF's + * to renable + */ + } + } + + return rc; +} + +#ifdef CONFIG_RFS_ACCEL +static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + int i, j = 0; + + cmd->data = bp->ntp_fltr_count; + for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { + struct hlist_head *head; + struct bnxt_ntuple_filter *fltr; + + head = &bp->ntp_fltr_hash_tbl[i]; + rcu_read_lock(); + hlist_for_each_entry_rcu(fltr, head, hash) { + if (j == cmd->rule_cnt) + break; + rule_locs[j++] = fltr->sw_id; + } + rcu_read_unlock(); + if (j == cmd->rule_cnt) + break; + } + cmd->rule_cnt = j; + return 0; +} + +static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fs = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct bnxt_ntuple_filter *fltr; + struct flow_keys *fkeys; + int i, rc = -EINVAL; + + if (fs->location < 0 || fs->location >= BNXT_NTP_FLTR_MAX_FLTR) + return rc; + + for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { + struct hlist_head *head; + + head = &bp->ntp_fltr_hash_tbl[i]; + rcu_read_lock(); + hlist_for_each_entry_rcu(fltr, head, hash) { + if (fltr->sw_id == fs->location) + goto fltr_found; + } + rcu_read_unlock(); + } + return rc; + +fltr_found: + fkeys = &fltr->fkeys; + if (fkeys->basic.ip_proto == IPPROTO_TCP) + fs->flow_type = TCP_V4_FLOW; + else if (fkeys->basic.ip_proto == IPPROTO_UDP) + fs->flow_type = UDP_V4_FLOW; + else + goto fltr_err; + + fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; + fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0); + + fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; + fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0); + + fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src; + fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0); + + fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst; + fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0); + + fs->ring_cookie = fltr->rxq; + rc = 0; + +fltr_err: + rcu_read_unlock(); + + return rc; +} + +static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct bnxt *bp = netdev_priv(dev); + int rc = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = bp->rx_nr_rings; + break; + + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = bp->ntp_fltr_count; + cmd->data = BNXT_NTP_FLTR_MAX_FLTR; + break; + + case ETHTOOL_GRXCLSRLALL: + rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs); + break; + + case ETHTOOL_GRXCLSRULE: + rc = bnxt_grxclsrule(bp, cmd); + break; + + default: + rc = -EOPNOTSUPP; + break; + } + + return rc; +} +#endif + +static u32 bnxt_get_rxfh_indir_size(struct net_device *dev) +{ + return HW_HASH_INDEX_SIZE; +} + +static u32 bnxt_get_rxfh_key_size(struct net_device *dev) +{ + return HW_HASH_KEY_SIZE; +} + +static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + int i = 0; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (indir) + for (i = 0; i < HW_HASH_INDEX_SIZE; i++) + indir[i] = le16_to_cpu(vnic->rss_table[i]); + + if (key) + memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE); + + return 0; +} + +static void bnxt_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct bnxt *bp = netdev_priv(dev); + + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); + strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version)); + strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); + info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings; + info->testinfo_len = BNXT_NUM_TESTS(bp); + /* TODO CHIMP_FW: eeprom dump details */ + info->eedump_len = 0; + /* TODO CHIMP FW: reg dump details */ + info->regdump_len = 0; +} + +static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info) +{ + u16 fw_speeds = link_info->support_speeds; + u32 speed_mask = 0; + + if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB) + speed_mask |= SUPPORTED_100baseT_Full; + if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB) + speed_mask |= SUPPORTED_1000baseT_Full; + if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB) + speed_mask |= SUPPORTED_2500baseX_Full; + if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) + speed_mask |= SUPPORTED_10000baseT_Full; + /* TODO: support 25GB, 50GB with different cable type */ + if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB) + speed_mask |= SUPPORTED_20000baseMLD2_Full | + SUPPORTED_20000baseKR2_Full; + if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) + speed_mask |= SUPPORTED_40000baseKR4_Full | + SUPPORTED_40000baseCR4_Full | + SUPPORTED_40000baseSR4_Full | + SUPPORTED_40000baseLR4_Full; + + return speed_mask; +} + +static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info) +{ + u16 fw_speeds = link_info->auto_link_speeds; + u32 speed_mask = 0; + + /* TODO: support 25GB, 40GB, 50GB with different cable type */ + /* set the advertised speeds */ + if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB) + speed_mask |= ADVERTISED_100baseT_Full; + if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB) + speed_mask |= ADVERTISED_1000baseT_Full; + if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB) + speed_mask |= ADVERTISED_2500baseX_Full; + if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) + speed_mask |= ADVERTISED_10000baseT_Full; + /* TODO: how to advertise 20, 25, 40, 50GB with different cable type ?*/ + if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB) + speed_mask |= ADVERTISED_20000baseMLD2_Full | + ADVERTISED_20000baseKR2_Full; + if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) + speed_mask |= ADVERTISED_40000baseKR4_Full | + ADVERTISED_40000baseCR4_Full | + ADVERTISED_40000baseSR4_Full | + ADVERTISED_40000baseLR4_Full; + return speed_mask; +} + +u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) +{ + switch (fw_link_speed) { + case BNXT_LINK_SPEED_100MB: + return SPEED_100; + case BNXT_LINK_SPEED_1GB: + return SPEED_1000; + case BNXT_LINK_SPEED_2_5GB: + return SPEED_2500; + case BNXT_LINK_SPEED_10GB: + return SPEED_10000; + case BNXT_LINK_SPEED_20GB: + return SPEED_20000; + case BNXT_LINK_SPEED_25GB: + return SPEED_25000; + case BNXT_LINK_SPEED_40GB: + return SPEED_40000; + case BNXT_LINK_SPEED_50GB: + return SPEED_50000; + default: + return SPEED_UNKNOWN; + } +} + +static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_link_info *link_info = &bp->link_info; + u16 ethtool_speed; + + cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info); + + if (link_info->auto_link_speeds) + cmd->supported |= SUPPORTED_Autoneg; + + if (BNXT_AUTO_MODE(link_info->auto_mode)) { + cmd->advertising = + bnxt_fw_to_ethtool_advertised_spds(link_info); + cmd->advertising |= ADVERTISED_Autoneg; + cmd->autoneg = AUTONEG_ENABLE; + } else { + cmd->autoneg = AUTONEG_DISABLE; + cmd->advertising = 0; + } + if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) { + if ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == + BNXT_LINK_PAUSE_BOTH) { + cmd->advertising |= ADVERTISED_Pause; + cmd->supported |= SUPPORTED_Pause; + } else { + cmd->advertising |= ADVERTISED_Asym_Pause; + cmd->supported |= SUPPORTED_Asym_Pause; + if (link_info->auto_pause_setting & + BNXT_LINK_PAUSE_RX) + cmd->advertising |= ADVERTISED_Pause; + } + } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) { + if ((link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) == + BNXT_LINK_PAUSE_BOTH) { + cmd->supported |= SUPPORTED_Pause; + } else { + cmd->supported |= SUPPORTED_Asym_Pause; + if (link_info->force_pause_setting & + BNXT_LINK_PAUSE_RX) + cmd->supported |= SUPPORTED_Pause; + } + } + + cmd->port = PORT_NONE; + if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { + cmd->port = PORT_TP; + cmd->supported |= SUPPORTED_TP; + cmd->advertising |= ADVERTISED_TP; + } else { + cmd->supported |= SUPPORTED_FIBRE; + cmd->advertising |= ADVERTISED_FIBRE; + + if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC) + cmd->port = PORT_DA; + else if (link_info->media_type == + PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE) + cmd->port = PORT_FIBRE; + } + + if (link_info->phy_link_status == BNXT_LINK_LINK) { + if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) + cmd->duplex = DUPLEX_FULL; + } else { + cmd->duplex = DUPLEX_UNKNOWN; + } + ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); + ethtool_cmd_speed_set(cmd, ethtool_speed); + if (link_info->transceiver == + PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL) + cmd->transceiver = XCVR_INTERNAL; + else + cmd->transceiver = XCVR_EXTERNAL; + cmd->phy_address = link_info->phy_addr; + + return 0; +} + +static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed) +{ + switch (ethtool_speed) { + case SPEED_100: + return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB; + case SPEED_1000: + return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB; + case SPEED_2500: + return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB; + case SPEED_10000: + return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB; + case SPEED_20000: + return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB; + case SPEED_25000: + return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB; + case SPEED_40000: + return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB; + case SPEED_50000: + return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB; + default: + netdev_err(dev, "unsupported speed!\n"); + break; + } + return 0; +} + +static u16 bnxt_get_fw_auto_link_speeds(u32 advertising) +{ + u16 fw_speed_mask = 0; + + /* only support autoneg at speed 100, 1000, and 10000 */ + if (advertising & (ADVERTISED_100baseT_Full | + ADVERTISED_100baseT_Half)) { + fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB; + } + if (advertising & (ADVERTISED_1000baseT_Full | + ADVERTISED_1000baseT_Half)) { + fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB; + } + if (advertising & ADVERTISED_10000baseT_Full) + fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB; + + return fw_speed_mask; +} + +static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + int rc = 0; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_link_info *link_info = &bp->link_info; + u32 speed, fw_advertising = 0; + bool set_pause = false; + + if (BNXT_VF(bp)) + return rc; + + if (cmd->autoneg == AUTONEG_ENABLE) { + if (link_info->media_type != PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { + netdev_err(dev, "Media type doesn't support autoneg\n"); + rc = -EINVAL; + goto set_setting_exit; + } + if (cmd->advertising & ~(BNXT_ALL_COPPER_ETHTOOL_SPEED | + ADVERTISED_Autoneg | + ADVERTISED_TP | + ADVERTISED_Pause | + ADVERTISED_Asym_Pause)) { + netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n", + cmd->advertising); + rc = -EINVAL; + goto set_setting_exit; + } + fw_advertising = bnxt_get_fw_auto_link_speeds(cmd->advertising); + if (fw_advertising & ~link_info->support_speeds) { + netdev_err(dev, "Advertising parameters are not supported! (adv: 0x%x)\n", + cmd->advertising); + rc = -EINVAL; + goto set_setting_exit; + } + link_info->autoneg |= BNXT_AUTONEG_SPEED; + if (!fw_advertising) + link_info->advertising = link_info->support_speeds; + else + link_info->advertising = fw_advertising; + /* any change to autoneg will cause link change, therefore the + * driver should put back the original pause setting in autoneg + */ + set_pause = true; + } else { + /* TODO: currently don't support half duplex */ + if (cmd->duplex == DUPLEX_HALF) { + netdev_err(dev, "HALF DUPLEX is not supported!\n"); + rc = -EINVAL; + goto set_setting_exit; + } + /* If received a request for an unknown duplex, assume full*/ + if (cmd->duplex == DUPLEX_UNKNOWN) + cmd->duplex = DUPLEX_FULL; + speed = ethtool_cmd_speed(cmd); + link_info->req_link_speed = bnxt_get_fw_speed(dev, speed); + link_info->req_duplex = BNXT_LINK_DUPLEX_FULL; + link_info->autoneg &= ~BNXT_AUTONEG_SPEED; + link_info->advertising = 0; + } + + if (netif_running(dev)) + rc = bnxt_hwrm_set_link_setting(bp, set_pause); + +set_setting_exit: + return rc; +} + +static void bnxt_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_link_info *link_info = &bp->link_info; + + if (BNXT_VF(bp)) + return; + epause->autoneg = !!(link_info->auto_pause_setting & + BNXT_LINK_PAUSE_BOTH); + epause->rx_pause = ((link_info->pause & BNXT_LINK_PAUSE_RX) != 0); + epause->tx_pause = ((link_info->pause & BNXT_LINK_PAUSE_TX) != 0); +} + +static int bnxt_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + int rc = 0; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_link_info *link_info = &bp->link_info; + + if (BNXT_VF(bp)) + return rc; + + if (epause->autoneg) { + link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; + link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH; + } else { + /* when transition from auto pause to force pause, + * force a link change + */ + if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) + link_info->force_link_chng = true; + link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL; + link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_BOTH; + } + if (epause->rx_pause) + link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX; + else + link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_RX; + + if (epause->tx_pause) + link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; + else + link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_TX; + + if (netif_running(dev)) + rc = bnxt_hwrm_set_pause(bp); + return rc; +} + +static u32 bnxt_get_link(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + + /* TODO: handle MF, VF, driver close case */ + return bp->link_info.link_up; +} + +static int bnxt_flash_nvram(struct net_device *dev, + u16 dir_type, + u16 dir_ordinal, + u16 dir_ext, + u16 dir_attr, + const u8 *data, + size_t data_len) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + struct hwrm_nvm_write_input req = {0}; + dma_addr_t dma_handle; + u8 *kmem; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1); + + req.dir_type = cpu_to_le16(dir_type); + req.dir_ordinal = cpu_to_le16(dir_ordinal); + req.dir_ext = cpu_to_le16(dir_ext); + req.dir_attr = cpu_to_le16(dir_attr); + req.dir_data_length = cpu_to_le32(data_len); + + kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle, + GFP_KERNEL); + if (!kmem) { + netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", + (unsigned)data_len); + return -ENOMEM; + } + memcpy(kmem, data, data_len); + req.host_src_addr = cpu_to_le64(dma_handle); + + rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT); + dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle); + + return rc; +} + +static int bnxt_flash_firmware(struct net_device *dev, + u16 dir_type, + const u8 *fw_data, + size_t fw_size) +{ + int rc = 0; + u16 code_type; + u32 stored_crc; + u32 calculated_crc; + struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data; + + switch (dir_type) { + case BNX_DIR_TYPE_BOOTCODE: + case BNX_DIR_TYPE_BOOTCODE_2: + code_type = CODE_BOOT; + break; + default: + netdev_err(dev, "Unsupported directory entry type: %u\n", + dir_type); + return -EINVAL; + } + if (fw_size < sizeof(struct bnxt_fw_header)) { + netdev_err(dev, "Invalid firmware file size: %u\n", + (unsigned int)fw_size); + return -EINVAL; + } + if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) { + netdev_err(dev, "Invalid firmware signature: %08X\n", + le32_to_cpu(header->signature)); + return -EINVAL; + } + if (header->code_type != code_type) { + netdev_err(dev, "Expected firmware type: %d, read: %d\n", + code_type, header->code_type); + return -EINVAL; + } + if (header->device != DEVICE_CUMULUS_FAMILY) { + netdev_err(dev, "Expected firmware device family %d, read: %d\n", + DEVICE_CUMULUS_FAMILY, header->device); + return -EINVAL; + } + /* Confirm the CRC32 checksum of the file: */ + stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size - + sizeof(stored_crc))); + calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc)); + if (calculated_crc != stored_crc) { + netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n", + (unsigned long)stored_crc, + (unsigned long)calculated_crc); + return -EINVAL; + } + /* TODO: Validate digital signature (RSA-encrypted SHA-256 hash) here */ + rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, + 0, 0, fw_data, fw_size); + if (rc == 0) { /* Firmware update successful */ + /* TODO: Notify processor it needs to reset itself + */ + } + return rc; +} + +static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type) +{ + switch (dir_type) { + case BNX_DIR_TYPE_CHIMP_PATCH: + case BNX_DIR_TYPE_BOOTCODE: + case BNX_DIR_TYPE_BOOTCODE_2: + case BNX_DIR_TYPE_APE_FW: + case BNX_DIR_TYPE_APE_PATCH: + case BNX_DIR_TYPE_KONG_FW: + case BNX_DIR_TYPE_KONG_PATCH: + return true; + } + + return false; +} + +static bool bnxt_dir_type_is_unprotected_exec_format(u16 dir_type) +{ + switch (dir_type) { + case BNX_DIR_TYPE_AVS: + case BNX_DIR_TYPE_EXP_ROM_MBA: + case BNX_DIR_TYPE_PCIE: + case BNX_DIR_TYPE_TSCF_UCODE: + case BNX_DIR_TYPE_EXT_PHY: + case BNX_DIR_TYPE_CCM: + case BNX_DIR_TYPE_ISCSI_BOOT: + case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: + case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: + return true; + } + + return false; +} + +static bool bnxt_dir_type_is_executable(u16 dir_type) +{ + return bnxt_dir_type_is_ape_bin_format(dir_type) || + bnxt_dir_type_is_unprotected_exec_format(dir_type); +} + +static int bnxt_flash_firmware_from_file(struct net_device *dev, + u16 dir_type, + const char *filename) +{ + const struct firmware *fw; + int rc; + + if (bnxt_dir_type_is_executable(dir_type) == false) + return -EINVAL; + + rc = request_firmware(&fw, filename, &dev->dev); + if (rc != 0) { + netdev_err(dev, "Error %d requesting firmware file: %s\n", + rc, filename); + return rc; + } + if (bnxt_dir_type_is_ape_bin_format(dir_type) == true) + rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size); + else + rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, + 0, 0, fw->data, fw->size); + release_firmware(fw); + return rc; +} + +static int bnxt_flash_package_from_file(struct net_device *dev, + char *filename) +{ + netdev_err(dev, "packages are not yet supported\n"); + return -EINVAL; +} + +static int bnxt_flash_device(struct net_device *dev, + struct ethtool_flash *flash) +{ + if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) { + netdev_err(dev, "flashdev not supported from a virtual function\n"); + return -EINVAL; + } + + if (flash->region == ETHTOOL_FLASH_ALL_REGIONS) + return bnxt_flash_package_from_file(dev, flash->data); + + return bnxt_flash_firmware_from_file(dev, flash->region, flash->data); +} + +static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + struct hwrm_nvm_get_dir_info_input req = {0}; + struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + *entries = le32_to_cpu(output->entries); + *length = le32_to_cpu(output->entry_length); + } + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_get_eeprom_len(struct net_device *dev) +{ + /* The -1 return value allows the entire 32-bit range of offsets to be + * passed via the ethtool command-line utility. + */ + return -1; +} + +static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + u32 dir_entries; + u32 entry_length; + u8 *buf; + size_t buflen; + dma_addr_t dma_handle; + struct hwrm_nvm_get_dir_entries_input req = {0}; + + rc = nvm_get_dir_info(dev, &dir_entries, &entry_length); + if (rc != 0) + return rc; + + /* Insert 2 bytes of directory info (count and size of entries) */ + if (len < 2) + return -EINVAL; + + *data++ = dir_entries; + *data++ = entry_length; + len -= 2; + memset(data, 0xff, len); + + buflen = dir_entries * entry_length; + buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle, + GFP_KERNEL); + if (!buf) { + netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", + (unsigned)buflen); + return -ENOMEM; + } + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1); + req.host_dest_addr = cpu_to_le64(dma_handle); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc == 0) + memcpy(data, buf, len > buflen ? buflen : len); + dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle); + return rc; +} + +static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset, + u32 length, u8 *data) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + u8 *buf; + dma_addr_t dma_handle; + struct hwrm_nvm_read_input req = {0}; + + buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle, + GFP_KERNEL); + if (!buf) { + netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", + (unsigned)length); + return -ENOMEM; + } + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1); + req.host_dest_addr = cpu_to_le64(dma_handle); + req.dir_idx = cpu_to_le16(index); + req.offset = cpu_to_le32(offset); + req.len = cpu_to_le32(length); + + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc == 0) + memcpy(data, buf, length); + dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle); + return rc; +} + +static int bnxt_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, + u8 *data) +{ + u32 index; + u32 offset; + + if (eeprom->offset == 0) /* special offset value to get directory */ + return bnxt_get_nvram_directory(dev, eeprom->len, data); + + index = eeprom->offset >> 24; + offset = eeprom->offset & 0xffffff; + + if (index == 0) { + netdev_err(dev, "unsupported index value: %d\n", index); + return -EINVAL; + } + + return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data); +} + +static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index) +{ + struct bnxt *bp = netdev_priv(dev); + struct hwrm_nvm_erase_dir_entry_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1); + req.dir_idx = cpu_to_le16(index); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +static int bnxt_set_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct bnxt *bp = netdev_priv(dev); + u8 index, dir_op; + u16 type, ext, ordinal, attr; + + if (!BNXT_PF(bp)) { + netdev_err(dev, "NVM write not supported from a virtual function\n"); + return -EINVAL; + } + + type = eeprom->magic >> 16; + + if (type == 0xffff) { /* special value for directory operations */ + index = eeprom->magic & 0xff; + dir_op = eeprom->magic >> 8; + if (index == 0) + return -EINVAL; + switch (dir_op) { + case 0x0e: /* erase */ + if (eeprom->offset != ~eeprom->magic) + return -EINVAL; + return bnxt_erase_nvram_directory(dev, index - 1); + default: + return -EINVAL; + } + } + + /* Create or re-write an NVM item: */ + if (bnxt_dir_type_is_executable(type) == true) + return -EINVAL; + ext = eeprom->magic & 0xffff; + ordinal = eeprom->offset >> 16; + attr = eeprom->offset & 0xffff; + + return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data, + eeprom->len); +} + +const struct ethtool_ops bnxt_ethtool_ops = { + .get_settings = bnxt_get_settings, + .set_settings = bnxt_set_settings, + .get_pauseparam = bnxt_get_pauseparam, + .set_pauseparam = bnxt_set_pauseparam, + .get_drvinfo = bnxt_get_drvinfo, + .get_coalesce = bnxt_get_coalesce, + .set_coalesce = bnxt_set_coalesce, + .get_msglevel = bnxt_get_msglevel, + .set_msglevel = bnxt_set_msglevel, + .get_sset_count = bnxt_get_sset_count, + .get_strings = bnxt_get_strings, + .get_ethtool_stats = bnxt_get_ethtool_stats, + .set_ringparam = bnxt_set_ringparam, + .get_ringparam = bnxt_get_ringparam, + .get_channels = bnxt_get_channels, + .set_channels = bnxt_set_channels, +#ifdef CONFIG_RFS_ACCEL + .get_rxnfc = bnxt_get_rxnfc, +#endif + .get_rxfh_indir_size = bnxt_get_rxfh_indir_size, + .get_rxfh_key_size = bnxt_get_rxfh_key_size, + .get_rxfh = bnxt_get_rxfh, + .flash_device = bnxt_flash_device, + .get_eeprom_len = bnxt_get_eeprom_len, + .get_eeprom = bnxt_get_eeprom, + .set_eeprom = bnxt_set_eeprom, + .get_link = bnxt_get_link, +}; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h new file mode 100644 index 000000000000..98fa81e08b58 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -0,0 +1,17 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_ETHTOOL_H +#define BNXT_ETHTOOL_H + +extern const struct ethtool_ops bnxt_ethtool_ops; + +u32 bnxt_fw_to_ethtool_speed(u16); + +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h new file mode 100644 index 000000000000..e0aac65c6d82 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h @@ -0,0 +1,104 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef __BNXT_FW_HDR_H__ +#define __BNXT_FW_HDR_H__ + +#define BNXT_FIRMWARE_BIN_SIGNATURE 0x1a4d4342 /* "BCM"+0x1a */ + +enum SUPPORTED_FAMILY { + DEVICE_5702_3_4_FAMILY, /* 0 - Denali, Vinson, K2 */ + DEVICE_5705_FAMILY, /* 1 - Bachelor */ + DEVICE_SHASTA_FAMILY, /* 2 - 5751 */ + DEVICE_5706_FAMILY, /* 3 - Teton */ + DEVICE_5714_FAMILY, /* 4 - Hamilton */ + DEVICE_STANFORD_FAMILY, /* 5 - 5755 */ + DEVICE_STANFORD_ME_FAMILY, /* 6 - 5756 */ + DEVICE_SOLEDAD_FAMILY, /* 7 - 5761[E] */ + DEVICE_CILAI_FAMILY, /* 8 - 57780/60/90/91 */ + DEVICE_ASPEN_FAMILY, /* 9 - 57781/85/61/65/91/95 */ + DEVICE_ASPEN_PLUS_FAMILY, /* 10 - 57786 */ + DEVICE_LOGAN_FAMILY, /* 11 - Any device in the Logan family + */ + DEVICE_LOGAN_5762, /* 12 - Logan Enterprise (aka Columbia) + */ + DEVICE_LOGAN_57767, /* 13 - Logan Client */ + DEVICE_LOGAN_57787, /* 14 - Logan Consumer */ + DEVICE_LOGAN_5725, /* 15 - Logan Server (TruManage-enabled) + */ + DEVICE_SAWTOOTH_FAMILY, /* 16 - 5717/18 */ + DEVICE_COTOPAXI_FAMILY, /* 17 - 5719 */ + DEVICE_SNAGGLETOOTH_FAMILY, /* 18 - 5720 */ + DEVICE_CUMULUS_FAMILY, /* 19 - Cumulus/Whitney */ + MAX_DEVICE_FAMILY +}; + +enum SUPPORTED_CODE { + CODE_ASF1, /* 0 - ASF VERSION 1.03 <deprecated> */ + CODE_ASF2, /* 1 - ASF VERSION 2.00 <deprecated> */ + CODE_PASSTHRU, /* 2 - PassThru <deprecated> */ + CODE_PT_SEC, /* 3 - PassThru with security <deprecated> */ + CODE_UMP, /* 4 - UMP <deprecated> */ + CODE_BOOT, /* 5 - Bootcode */ + CODE_DASH, /* 6 - TruManage (DASH + ASF + PMCI) + * Management firmwares + */ + CODE_MCTP_PASSTHRU, /* 7 - NCSI / MCTP Passt-hrough firmware */ + CODE_PM_OFFLOAD, /* 8 - Power-Management Proxy Offload firmwares + */ + CODE_MDNS_SD_OFFLOAD, /* 9 - Multicast DNS Service Discovery Proxys + * Offload firmware + */ + CODE_DISC_OFFLOAD, /* 10 - Discovery Offload firmware */ + CODE_MUSTANG, /* 11 - I2C Error reporting APE firmwares + * <deprecated> + */ + CODE_ARP_BATCH, /* 12 - ARP Batch firmware */ + CODE_SMASH, /* 13 - TruManage (SMASH + DCMI/IPMI + PMCI) + * Management firmware + */ + CODE_APE_DIAG, /* 14 - APE Test Diag firmware */ + CODE_APE_PATCH, /* 15 - APE Patch firmware */ + CODE_TANG_PATCH, /* 16 - TANG Patch firmware */ + CODE_KONG_FW, /* 17 - KONG firmware */ + CODE_KONG_PATCH, /* 18 - KONG Patch firmware */ + CODE_BONO_FW, /* 19 - BONO firmware */ + CODE_BONO_PATCH, /* 20 - BONO Patch firmware */ + + MAX_CODE_TYPE, +}; + +enum SUPPORTED_MEDIA { + MEDIA_COPPER, /* 0 */ + MEDIA_FIBER, /* 1 */ + MEDIA_NONE, /* 2 */ + MEDIA_COPPER_FIBER, /* 3 */ + MAX_MEDIA_TYPE, +}; + +struct bnxt_fw_header { + __le32 signature; /* constains the constant value of + * BNXT_Firmware_Bin_Signatures + */ + u8 flags; /* reserved for ChiMP use */ + u8 code_type; /* enum SUPPORTED_CODE */ + u8 device; /* enum SUPPORTED_FAMILY */ + u8 media; /* enum SUPPORTED_MEDIA */ + u8 version[16]; /* the null terminated version string to + * indicate the version of the + * file, this will be copied from the binary + * file version string + */ + u8 build; + u8 revision; + u8 minor_ver; + u8 major_ver; +}; + +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h new file mode 100644 index 000000000000..70fc8253c07f --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -0,0 +1,4046 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_HSI_H +#define BNXT_HSI_H + +/* per-context HW statistics -- chip view */ +struct ctx_hw_stats { + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_drop_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_discard_pkts; + __le64 tx_drop_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 tpa_pkts; + __le64 tpa_bytes; + __le64 tpa_events; + __le64 tpa_aborts; +}; + +/* Statistics Ejection Buffer Completion Record (16 bytes) */ +struct eject_cmpl { + __le16 type; + #define EJECT_CMPL_TYPE_MASK 0x3fUL + #define EJECT_CMPL_TYPE_SFT 0 + #define EJECT_CMPL_TYPE_STAT_EJECT (0x1aUL << 0) + __le16 len; + __le32 opaque; + __le32 v; + #define EJECT_CMPL_V 0x1UL + __le32 unused_2; +}; + +/* HWRM Completion Record (16 bytes) */ +struct hwrm_cmpl { + __le16 type; + #define HWRM_CMPL_TYPE_MASK 0x3fUL + #define HWRM_CMPL_TYPE_SFT 0 + #define HWRM_CMPL_TYPE_HWRM_DONE (0x20UL << 0) + __le16 sequence_id; + __le32 unused_1; + __le32 v; + #define HWRM_CMPL_V 0x1UL + __le32 unused_3; +}; + +/* HWRM Forwarded Request (16 bytes) */ +struct hwrm_fwd_req_cmpl { + __le16 req_len_type; + #define HWRM_FWD_REQ_CMPL_TYPE_MASK 0x3fUL + #define HWRM_FWD_REQ_CMPL_TYPE_SFT 0 + #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ (0x22UL << 0) + #define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL + #define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6 + __le16 source_id; + __le32 unused_0; + __le32 req_buf_addr_v[2]; + #define HWRM_FWD_REQ_CMPL_V 0x1UL + #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL + #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1 +}; + +/* HWRM Forwarded Response (16 bytes) */ +struct hwrm_fwd_resp_cmpl { + __le16 type; + #define HWRM_FWD_RESP_CMPL_TYPE_MASK 0x3fUL + #define HWRM_FWD_RESP_CMPL_TYPE_SFT 0 + #define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP (0x24UL << 0) + __le16 source_id; + __le16 resp_len; + __le16 unused_1; + __le32 resp_buf_addr_v[2]; + #define HWRM_FWD_RESP_CMPL_V 0x1UL + #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL + #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1 +}; + +/* HWRM Asynchronous Event Completion Record (16 bytes) */ +struct hwrm_async_event_cmpl { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE (0x1UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD (0x20UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR (0x30UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR (0xffUL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1 + u8 unused_1[3]; + __le32 event_data1; +}; + +/* HWRM Asynchronous Event Completion Record for link status change (16 bytes) */ +struct hwrm_async_event_cmpl_link_status_change { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1 + u8 unused_1[3]; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_UP 0x1UL +}; + +/* HWRM Asynchronous Event Completion Record for link MTU change (16 bytes) */ +struct hwrm_async_event_cmpl_link_mtu_change { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE (0x1UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1 + u8 unused_1[3]; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0 +}; + +/* HWRM Asynchronous Event Completion Record for link speed change (16 bytes) */ +struct hwrm_async_event_cmpl_link_speed_change { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1 + u8 unused_1[3]; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1 + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16 +}; + +/* HWRM Asynchronous Event Completion Record for DCB Config change (16 bytes) */ +struct hwrm_async_event_cmpl_dcb_config_change { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1 + u8 unused_1[3]; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0 +}; + +/* HWRM Asynchronous Event Completion Record for port connection not allowed (16 bytes) */ +struct hwrm_async_event_cmpl_port_conn_not_allowed { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1 + u8 unused_1[3]; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0 +}; + +/* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */ +struct hwrm_async_event_cmpl_func_drvr_unload { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1 + u8 unused_1[3]; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0 +}; + +/* HWRM Asynchronous Event Completion Record for Function Driver load (16 bytes) */ +struct hwrm_async_event_cmpl_func_drvr_load { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1 + u8 unused_1[3]; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0 +}; + +/* HWRM Asynchronous Event Completion Record for PF Driver Unload (16 bytes) */ +struct hwrm_async_event_cmpl_pf_drvr_unload { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1 + u8 unused_1[3]; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0 +}; + +/* HWRM Asynchronous Event Completion Record for PF Driver load (16 bytes) */ +struct hwrm_async_event_cmpl_pf_drvr_load { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD (0x20UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1 + u8 unused_1[3]; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0 +}; + +/* HWRM Asynchronous Event Completion Record for VF FLR (16 bytes) */ +struct hwrm_async_event_cmpl_vf_flr { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR (0x30UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1 + u8 unused_1[3]; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK 0xffffUL + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0 +}; + +/* HWRM Asynchronous Event Completion Record for VF MAC Addr change (16 bytes) */ +struct hwrm_async_event_cmpl_vf_mac_addr_change { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1 + u8 unused_1[3]; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0 +}; + +/* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */ +struct hwrm_async_event_cmpl_hwrm_error { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR (0xffUL << 0) + __le32 event_data2; + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING (0x0UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL (0x1UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL (0x2UL << 0) + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1 + u8 unused_1[3]; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL +}; + +/* HW Resource Manager Specification 0.7.8 */ +#define HWRM_VERSION_MAJOR 0 +#define HWRM_VERSION_MINOR 7 +#define HWRM_VERSION_UPDATE 8 + +#define HWRM_VERSION_STR "0.7.8" +/* Following is the signature for HWRM message field that indicates not + * applicable (All F's). Need to cast it the size of the field if needed. + */ +#define HWRM_NA_SIGNATURE ((__le32)(-1)) +#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */ +#define HWRM_MAX_RESP_LEN (176) /* hwrm_func_qstats */ +#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */ +#define HW_HASH_KEY_SIZE 40 +#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */ +/* Input (16 bytes) */ +struct input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* Output (8 bytes) */ +struct output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; +}; + +/* Command numbering (8 bytes) */ +struct cmd_nums { + __le16 req_type; + #define HWRM_VER_GET (0x0UL) + #define HWRM_FUNC_DISABLE (0x10UL) + #define HWRM_FUNC_RESET (0x11UL) + #define HWRM_FUNC_GETFID (0x12UL) + #define HWRM_FUNC_VF_ALLOC (0x13UL) + #define HWRM_FUNC_VF_FREE (0x14UL) + #define HWRM_FUNC_QCAPS (0x15UL) + #define HWRM_FUNC_QCFG (0x16UL) + #define HWRM_FUNC_CFG (0x17UL) + #define HWRM_FUNC_QSTATS (0x18UL) + #define HWRM_FUNC_CLR_STATS (0x19UL) + #define HWRM_FUNC_DRV_UNRGTR (0x1aUL) + #define HWRM_FUNC_VF_RESC_FREE (0x1bUL) + #define HWRM_FUNC_VF_VNIC_IDS_QUERY (0x1cUL) + #define HWRM_FUNC_DRV_RGTR (0x1dUL) + #define HWRM_FUNC_DRV_QVER (0x1eUL) + #define HWRM_FUNC_BUF_RGTR (0x1fUL) + #define HWRM_FUNC_VF_CFG (0x20UL) + #define HWRM_PORT_PHY_CFG (0x20UL) + #define HWRM_PORT_MAC_CFG (0x21UL) + #define HWRM_PORT_ENABLE (0x22UL) + #define HWRM_PORT_QSTATS (0x23UL) + #define HWRM_PORT_LPBK_QSTATS (0x24UL) + #define HWRM_PORT_CLR_STATS (0x25UL) + #define HWRM_PORT_LPBK_CLR_STATS (0x26UL) + #define HWRM_PORT_PHY_QCFG (0x27UL) + #define HWRM_PORT_MAC_QCFG (0x28UL) + #define HWRM_PORT_BLINK_LED (0x29UL) + #define HWRM_QUEUE_QPORTCFG (0x30UL) + #define HWRM_QUEUE_QCFG (0x31UL) + #define HWRM_QUEUE_CFG (0x32UL) + #define HWRM_QUEUE_BUFFERS_QCFG (0x33UL) + #define HWRM_QUEUE_BUFFERS_CFG (0x34UL) + #define HWRM_QUEUE_PFCENABLE_QCFG (0x35UL) + #define HWRM_QUEUE_PFCENABLE_CFG (0x36UL) + #define HWRM_QUEUE_PRI2COS_QCFG (0x37UL) + #define HWRM_QUEUE_PRI2COS_CFG (0x38UL) + #define HWRM_QUEUE_COS2BW_QCFG (0x39UL) + #define HWRM_QUEUE_COS2BW_CFG (0x3aUL) + #define HWRM_VNIC_ALLOC (0x40UL) + #define HWRM_VNIC_FREE (0x41UL) + #define HWRM_VNIC_CFG (0x42UL) + #define HWRM_VNIC_QCFG (0x43UL) + #define HWRM_VNIC_TPA_CFG (0x44UL) + #define HWRM_VNIC_TPA_QCFG (0x45UL) + #define HWRM_VNIC_RSS_CFG (0x46UL) + #define HWRM_VNIC_RSS_QCFG (0x47UL) + #define HWRM_VNIC_PLCMODES_CFG (0x48UL) + #define HWRM_VNIC_PLCMODES_QCFG (0x49UL) + #define HWRM_RING_ALLOC (0x50UL) + #define HWRM_RING_FREE (0x51UL) + #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (0x52UL) + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS (0x53UL) + #define HWRM_RING_RESET (0x5eUL) + #define HWRM_RING_GRP_ALLOC (0x60UL) + #define HWRM_RING_GRP_FREE (0x61UL) + #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (0x70UL) + #define HWRM_VNIC_RSS_COS_LB_CTX_FREE (0x71UL) + #define HWRM_ARB_GRP_ALLOC (0x80UL) + #define HWRM_ARB_GRP_CFG (0x81UL) + #define HWRM_CFA_L2_FILTER_ALLOC (0x90UL) + #define HWRM_CFA_L2_FILTER_FREE (0x91UL) + #define HWRM_CFA_L2_FILTER_CFG (0x92UL) + #define HWRM_CFA_L2_SET_RX_MASK (0x93UL) + #define HWRM_CFA_L2_SET_BCASTMCAST_MIRRORING (0x94UL) + #define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL) + #define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL) + #define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL) + #define HWRM_CFA_ENCAP_RECORD_FREE (0x98UL) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC (0x99UL) + #define HWRM_CFA_NTUPLE_FILTER_FREE (0x9aUL) + #define HWRM_CFA_NTUPLE_FILTER_CFG (0x9bUL) + #define HWRM_TUNNEL_DST_PORT_QUERY (0xa0UL) + #define HWRM_TUNNEL_DST_PORT_ALLOC (0xa1UL) + #define HWRM_TUNNEL_DST_PORT_FREE (0xa2UL) + #define HWRM_STAT_CTX_ALLOC (0xb0UL) + #define HWRM_STAT_CTX_FREE (0xb1UL) + #define HWRM_STAT_CTX_QUERY (0xb2UL) + #define HWRM_STAT_CTX_CLR_STATS (0xb3UL) + #define HWRM_FW_RESET (0xc0UL) + #define HWRM_FW_QSTATUS (0xc1UL) + #define HWRM_EXEC_FWD_RESP (0xd0UL) + #define HWRM_REJECT_FWD_RESP (0xd1UL) + #define HWRM_FWD_RESP (0xd2UL) + #define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL) + #define HWRM_TEMP_MONITOR_QUERY (0xe0UL) + #define HWRM_MGMT_L2_FILTER_ALLOC (0x100UL) + #define HWRM_MGMT_L2_FILTER_FREE (0x101UL) + #define HWRM_DBG_READ_DIRECT (0xff10UL) + #define HWRM_DBG_READ_INDIRECT (0xff11UL) + #define HWRM_DBG_WRITE_DIRECT (0xff12UL) + #define HWRM_DBG_WRITE_INDIRECT (0xff13UL) + #define HWRM_DBG_DUMP (0xff14UL) + #define HWRM_NVM_MODIFY (0xfff4UL) + #define HWRM_NVM_VERIFY_UPDATE (0xfff5UL) + #define HWRM_NVM_GET_DEV_INFO (0xfff6UL) + #define HWRM_NVM_ERASE_DIR_ENTRY (0xfff7UL) + #define HWRM_NVM_MOD_DIR_ENTRY (0xfff8UL) + #define HWRM_NVM_FIND_DIR_ENTRY (0xfff9UL) + #define HWRM_NVM_GET_DIR_ENTRIES (0xfffaUL) + #define HWRM_NVM_GET_DIR_INFO (0xfffbUL) + #define HWRM_NVM_RAW_DUMP (0xfffcUL) + #define HWRM_NVM_READ (0xfffdUL) + #define HWRM_NVM_WRITE (0xfffeUL) + #define HWRM_NVM_RAW_WRITE_BLK (0xffffUL) + __le16 unused_0[3]; +}; + +/* Return Codes (8 bytes) */ +struct ret_codes { + __le16 error_code; + #define HWRM_ERR_CODE_SUCCESS (0x0UL) + #define HWRM_ERR_CODE_FAIL (0x1UL) + #define HWRM_ERR_CODE_INVALID_PARAMS (0x2UL) + #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (0x3UL) + #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR (0x4UL) + #define HWRM_ERR_CODE_INVALID_FLAGS (0x5UL) + #define HWRM_ERR_CODE_INVALID_ENABLES (0x6UL) + #define HWRM_ERR_CODE_HWRM_ERROR (0xfUL) + #define HWRM_ERR_CODE_UNKNOWN_ERR (0xfffeUL) + #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED (0xffffUL) + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_err_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 opaque_0; + __le16 opaque_1; + u8 opaque_2; + u8 valid; +}; + +/* Port Tx Statistics Formats (408 bytes) */ +struct tx_port_stats { + __le64 tx_64b_frames; + __le64 tx_65b_127b_frames; + __le64 tx_128b_255b_frames; + __le64 tx_256b_511b_frames; + __le64 tx_512b_1023b_frames; + __le64 tx_1024b_1518_frames; + __le64 tx_good_vlan_frames; + __le64 tx_1519b_2047_frames; + __le64 tx_2048b_4095b_frames; + __le64 tx_4096b_9216b_frames; + __le64 tx_9217b_16383b_frames; + __le64 tx_good_frames; + __le64 tx_total_frames; + __le64 tx_ucast_frames; + __le64 tx_mcast_frames; + __le64 tx_bcast_frames; + __le64 tx_pause_frames; + __le64 tx_pfc_frames; + __le64 tx_jabber_frames; + __le64 tx_fcs_err_frames; + __le64 tx_control_frames; + __le64 tx_oversz_frames; + __le64 tx_single_dfrl_frames; + __le64 tx_multi_dfrl_frames; + __le64 tx_single_coll_frames; + __le64 tx_multi_coll_frames; + __le64 tx_late_coll_frames; + __le64 tx_excessive_coll_frames; + __le64 tx_frag_frames; + __le64 tx_err; + __le64 tx_tagged_frames; + __le64 tx_dbl_tagged_frames; + __le64 tx_runt_frames; + __le64 tx_fifo_underruns; + __le64 tx_pfc_ena_frames_pri0; + __le64 tx_pfc_ena_frames_pri1; + __le64 tx_pfc_ena_frames_pri2; + __le64 tx_pfc_ena_frames_pri3; + __le64 tx_pfc_ena_frames_pri4; + __le64 tx_pfc_ena_frames_pri5; + __le64 tx_pfc_ena_frames_pri6; + __le64 tx_pfc_ena_frames_pri7; + __le64 tx_eee_lpi_events; + __le64 tx_eee_lpi_duration; + __le64 tx_llfc_logical_msgs; + __le64 tx_hcfc_msgs; + __le64 tx_total_collisions; + __le64 tx_bytes; + __le64 tx_xthol_frames; + __le64 tx_stat_discard; + __le64 tx_stat_error; +}; + +/* Port Rx Statistics Formats (528 bytes) */ +struct rx_port_stats { + __le64 rx_64b_frames; + __le64 rx_65b_127b_frames; + __le64 rx_128b_255b_frames; + __le64 rx_256b_511b_frames; + __le64 rx_512b_1023b_frames; + __le64 rx_1024b_1518_frames; + __le64 rx_good_vlan_frames; + __le64 rx_1519b_2047b_frames; + __le64 rx_2048b_4095b_frames; + __le64 rx_4096b_9216b_frames; + __le64 rx_9217b_16383b_frames; + __le64 rx_total_frames; + __le64 rx_ucast_frames; + __le64 rx_mcast_frames; + __le64 rx_bcast_frames; + __le64 rx_fcs_err_frames; + __le64 rx_ctrl_frames; + __le64 rx_pause_frames; + __le64 rx_pfc_frames; + __le64 rx_unsupported_opcode_frames; + __le64 rx_unsupported_da_pausepfc_frames; + __le64 rx_wrong_sa_frames; + __le64 rx_align_err_frames; + __le64 rx_oor_len_frames; + __le64 rx_code_err_frames; + __le64 rx_false_carrier_frames; + __le64 rx_ovrsz_frames; + __le64 rx_jbr_frames; + __le64 rx_mtu_err_frames; + __le64 rx_match_crc_frames; + __le64 rx_promiscuous_frames; + __le64 rx_tagged_frames; + __le64 rx_double_tagged_frames; + __le64 rx_trunc_frames; + __le64 rx_good_frames; + __le64 rx_pfc_xon2xoff_frames_pri0; + __le64 rx_pfc_xon2xoff_frames_pri1; + __le64 rx_pfc_xon2xoff_frames_pri2; + __le64 rx_pfc_xon2xoff_frames_pri3; + __le64 rx_pfc_xon2xoff_frames_pri4; + __le64 rx_pfc_xon2xoff_frames_pri5; + __le64 rx_pfc_xon2xoff_frames_pri6; + __le64 rx_pfc_xon2xoff_frames_pri7; + __le64 rx_pfc_ena_frames_pri0; + __le64 rx_pfc_ena_frames_pri1; + __le64 rx_pfc_ena_frames_pri2; + __le64 rx_pfc_ena_frames_pri3; + __le64 rx_pfc_ena_frames_pri4; + __le64 rx_pfc_ena_frames_pri5; + __le64 rx_pfc_ena_frames_pri6; + __le64 rx_pfc_ena_frames_pri7; + __le64 rx_sch_crc_err_frames; + __le64 rx_undrsz_frames; + __le64 rx_frag_frames; + __le64 rx_eee_lpi_events; + __le64 rx_eee_lpi_duration; + __le64 rx_llfc_physical_msgs; + __le64 rx_llfc_logical_msgs; + __le64 rx_llfc_msgs_with_crc_err; + __le64 rx_hcfc_msgs; + __le64 rx_hcfc_msgs_with_crc_err; + __le64 rx_bytes; + __le64 rx_runt_bytes; + __le64 rx_runt_frames; + __le64 rx_stat_discard; + __le64 rx_stat_err; +}; + +/* hwrm_ver_get */ +/* Input (24 bytes) */ +struct hwrm_ver_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 hwrm_intf_maj; + u8 hwrm_intf_min; + u8 hwrm_intf_upd; + u8 unused_0[5]; +}; + +/* Output (128 bytes) */ +struct hwrm_ver_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 hwrm_intf_maj; + u8 hwrm_intf_min; + u8 hwrm_intf_upd; + u8 hwrm_intf_rsvd; + u8 hwrm_fw_maj; + u8 hwrm_fw_min; + u8 hwrm_fw_bld; + u8 hwrm_fw_rsvd; + u8 ape_fw_maj; + u8 ape_fw_min; + u8 ape_fw_bld; + u8 ape_fw_rsvd; + u8 kong_fw_maj; + u8 kong_fw_min; + u8 kong_fw_bld; + u8 kong_fw_rsvd; + u8 tang_fw_maj; + u8 tang_fw_min; + u8 tang_fw_bld; + u8 tang_fw_rsvd; + u8 bono_fw_maj; + u8 bono_fw_min; + u8 bono_fw_bld; + u8 bono_fw_rsvd; + char hwrm_fw_name[16]; + char ape_fw_name[16]; + char kong_fw_name[16]; + char tang_fw_name[16]; + char bono_fw_name[16]; + __le16 chip_num; + u8 chip_rev; + u8 chip_metal; + u8 chip_bond_id; + u8 unused_0; + __le16 max_req_win_len; + __le16 max_resp_len; + __le16 def_req_timeout; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_disable */ +/* Input (24 bytes) */ +struct hwrm_func_disable_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_DISABLE_REQ_ENABLES_VF_ID_VALID 0x1UL + __le16 vf_id; + __le16 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_func_disable_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_reset */ +/* Input (24 bytes) */ +struct hwrm_func_reset_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL + __le16 vf_id; + __le16 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_func_reset_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_getfid */ +/* Input (24 bytes) */ +struct hwrm_func_getfid_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL + __le16 pci_id; + __le16 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_func_getfid_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + +/* hwrm_func_vf_alloc */ +/* Input (24 bytes) */ +struct hwrm_func_vf_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL + __le16 first_vf_id; + __le16 num_vfs; +}; + +/* Output (16 bytes) */ +struct hwrm_func_vf_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 first_vf_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + +/* hwrm_func_vf_free */ +/* Input (24 bytes) */ +struct hwrm_func_vf_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL + __le16 first_vf_id; + __le16 num_vfs; +}; + +/* Output (16 bytes) */ +struct hwrm_func_vf_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_vf_cfg */ +/* Input (24 bytes) */ +struct hwrm_func_vf_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL + #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL + __le16 mtu; + __le16 guest_vlan; +}; + +/* Output (16 bytes) */ +struct hwrm_func_vf_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_qcaps */ +/* Input (24 bytes) */ +struct hwrm_func_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + __le16 unused_0[3]; +}; + +/* Output (80 bytes) */ +struct hwrm_func_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + __le16 port_id; + __le32 flags; + #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL + #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL + u8 perm_mac_address[6]; + __le16 max_rsscos_ctx; + __le16 max_cmpl_rings; + __le16 max_tx_rings; + __le16 max_rx_rings; + __le16 max_l2_ctxs; + __le16 max_vnics; + __le16 first_vf_id; + __le16 max_vfs; + __le16 max_stat_ctx; + __le32 max_encap_records; + __le32 max_decap_records; + __le32 max_tx_em_flows; + __le32 max_tx_wm_flows; + __le32 max_rx_em_flows; + __le32 max_rx_wm_flows; + __le32 max_mcast_filters; + __le32 max_flow_id; + __le32 max_hw_ring_grps; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_func_cfg */ +/* Input (88 bytes) */ +struct hwrm_func_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + u8 unused_0; + u8 unused_1; + __le32 flags; + #define FUNC_CFG_REQ_FLAGS_PROM_MODE 0x1UL + #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK 0x2UL + #define FUNC_CFG_REQ_FLAGS_SRC_IP_ADDR_CHECK 0x4UL + #define FUNC_CFG_REQ_FLAGS_VLAN_PRI_MATCH 0x8UL + #define FUNC_CFG_REQ_FLAGS_DFLT_PRI_NOMATCH 0x10UL + #define FUNC_CFG_REQ_FLAGS_DISABLE_PAUSE 0x20UL + #define FUNC_CFG_REQ_FLAGS_DISABLE_STP 0x40UL + #define FUNC_CFG_REQ_FLAGS_DISABLE_LLDP 0x80UL + #define FUNC_CFG_REQ_FLAGS_DISABLE_PTPV2 0x100UL + __le32 enables; + #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL + #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL + #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL + #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL + #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL + #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL + #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL + #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL + #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL + #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL + #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL + #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL + #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL + #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL + #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL + #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL + #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL + #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL + #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL + #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL + __le16 mtu; + __le16 mru; + __le16 num_rsscos_ctxs; + __le16 num_cmpl_rings; + __le16 num_tx_rings; + __le16 num_rx_rings; + __le16 num_l2_ctxs; + __le16 num_vnics; + __le16 num_stat_ctxs; + __le16 num_hw_ring_grps; + u8 dflt_mac_addr[6]; + __le16 dflt_vlan; + __be32 dflt_ip_addr[4]; + __le32 min_bw; + __le32 max_bw; + __le16 async_event_cr; + u8 vlan_antispoof_mode; + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK (0x0UL << 0) + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN (0x1UL << 0) + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE (0x2UL << 0) + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0) + u8 allowed_vlan_pris; + #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_NOCHECK (0x0UL << 0) + #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_VALIDATE_VLAN (0x1UL << 0) + #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_INSERT_IF_VLANDNE (0x2UL << 0) + #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0) + u8 evb_mode; + #define FUNC_CFG_REQ_EVB_MODE_NO_EVB (0x0UL << 0) + #define FUNC_CFG_REQ_EVB_MODE_VEB (0x1UL << 0) + #define FUNC_CFG_REQ_EVB_MODE_VEPA (0x2UL << 0) + u8 unused_2; + __le16 num_mcast_filters; +}; + +/* Output (16 bytes) */ +struct hwrm_func_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_qstats */ +/* Input (24 bytes) */ +struct hwrm_func_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + __le16 unused_0[3]; +}; + +/* Output (176 bytes) */ +struct hwrm_func_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_err_pkts; + __le64 tx_drop_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_err_pkts; + __le64 rx_drop_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 rx_agg_pkts; + __le64 rx_agg_bytes; + __le64 rx_agg_events; + __le64 rx_agg_aborts; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_clr_stats */ +/* Input (24 bytes) */ +struct hwrm_func_clr_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_func_clr_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_vf_resc_free */ +/* Input (24 bytes) */ +struct hwrm_func_vf_resc_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_func_vf_resc_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_vf_vnic_ids_query */ +/* Input (32 bytes) */ +struct hwrm_func_vf_vnic_ids_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + u8 unused_0; + u8 unused_1; + __le32 max_vnic_id_cnt; + __le64 vnic_id_tbl_addr; +}; + +/* Output (16 bytes) */ +struct hwrm_func_vf_vnic_ids_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 vnic_id_cnt; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_func_drv_rgtr */ +/* Input (80 bytes) */ +struct hwrm_func_drv_rgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL + #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL + __le32 enables; + #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL + #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL + #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL + #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL + #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL + __le16 os_type; + u8 ver_maj; + u8 ver_min; + u8 ver_upd; + u8 unused_0; + __le16 unused_1; + __le32 timestamp; + __le32 unused_2; + __le32 vf_req_fwd[8]; + __le32 async_event_fwd[8]; +}; + +/* Output (16 bytes) */ +struct hwrm_func_drv_rgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_drv_unrgtr */ +/* Input (24 bytes) */ +struct hwrm_func_drv_unrgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN 0x1UL + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_func_drv_unrgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_buf_rgtr */ +/* Input (128 bytes) */ +struct hwrm_func_buf_rgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL + #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL + __le16 vf_id; + __le16 req_buf_num_pages; + __le16 req_buf_page_size; + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B (0x4UL << 0) + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K (0xcUL << 0) + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K (0xdUL << 0) + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K (0x10UL << 0) + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M (0x16UL << 0) + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M (0x17UL << 0) + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G (0x1eUL << 0) + __le16 req_buf_len; + __le16 resp_buf_len; + u8 unused_0; + u8 unused_1; + __le64 req_buf_page_addr0; + __le64 req_buf_page_addr1; + __le64 req_buf_page_addr2; + __le64 req_buf_page_addr3; + __le64 req_buf_page_addr4; + __le64 req_buf_page_addr5; + __le64 req_buf_page_addr6; + __le64 req_buf_page_addr7; + __le64 req_buf_page_addr8; + __le64 req_buf_page_addr9; + __le64 error_buf_addr; + __le64 resp_buf_addr; +}; + +/* Output (16 bytes) */ +struct hwrm_func_buf_rgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_drv_qver */ +/* Input (24 bytes) */ +struct hwrm_func_drv_qver_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_DRV_QVER_REQ_ENABLES_OS_TYPE_VALID 0x1UL + #define FUNC_DRV_QVER_REQ_ENABLES_VER_VALID 0x2UL + __le16 fid; + __le16 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_func_drv_qver_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 os_type; + u8 ver_maj; + u8 ver_min; + u8 ver_upd; + u8 unused_0; + u8 unused_1; + u8 valid; +}; + +/* hwrm_port_phy_cfg */ +/* Input (48 bytes) */ +struct hwrm_port_phy_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL + #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN 0x2UL + #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL + #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL + __le32 enables; + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL + #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL + #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL + #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL + #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL + __le16 port_id; + __le16 force_link_speed; + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB (0x1UL << 0) + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB (0xaUL << 0) + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB (0x14UL << 0) + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB (0x19UL << 0) + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB (0x64UL << 0) + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB (0xc8UL << 0) + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB (0xfaUL << 0) + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB (0x190UL << 0) + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB (0x1f4UL << 0) + u8 auto_mode; + #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE (0x0UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS (0x1UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED (0x2UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_MODE_MASK (0x4UL << 0) + u8 auto_duplex; + #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF (0x0UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL (0x1UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH (0x2UL << 0) + u8 auto_pause; + #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL + u8 unused_0; + __le16 auto_link_speed; + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB (0x1UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB (0xaUL << 0) + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB (0x14UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB (0x19UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB (0x64UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB (0xc8UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB (0xfaUL << 0) + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB (0x190UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB (0x1f4UL << 0) + __le16 auto_link_speed_mask; + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL + u8 wirespeed; + #define PORT_PHY_CFG_REQ_WIRESPEED_OFF (0x0UL << 0) + #define PORT_PHY_CFG_REQ_WIRESPEED_ON (0x1UL << 0) + u8 lpbk; + #define PORT_PHY_CFG_REQ_LPBK_NONE (0x0UL << 0) + #define PORT_PHY_CFG_REQ_LPBK_LOCAL (0x1UL << 0) + #define PORT_PHY_CFG_REQ_LPBK_REMOTE (0x2UL << 0) + u8 force_pause; + #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL + #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL + u8 unused_1; + __le32 preemphasis; + __le32 unused_2; +}; + +/* Output (16 bytes) */ +struct hwrm_port_phy_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_port_phy_qcfg */ +/* Input (24 bytes) */ +struct hwrm_port_phy_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 unused_0[3]; +}; + +/* Output (48 bytes) */ +struct hwrm_port_phy_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 link; + #define PORT_PHY_QCFG_RESP_LINK_NO_LINK (0x0UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_SIGNAL (0x1UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_LINK (0x2UL << 0) + u8 unused_0; + __le16 link_speed; + #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB (0x1UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB (0xaUL << 0) + #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB (0x14UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB (0x19UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB (0x64UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB (0xc8UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB (0xfaUL << 0) + #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB (0x190UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB (0x1f4UL << 0) + u8 duplex; + #define PORT_PHY_QCFG_RESP_DUPLEX_HALF (0x0UL << 0) + #define PORT_PHY_QCFG_RESP_DUPLEX_FULL (0x1UL << 0) + u8 pause; + #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL + __le16 support_speeds; + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL + __le16 force_link_speed; + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB (0x1UL << 0) + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB (0xaUL << 0) + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB (0x14UL << 0) + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB (0x19UL << 0) + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB (0x64UL << 0) + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB (0xc8UL << 0) + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB (0xfaUL << 0) + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB (0x190UL << 0) + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB (0x1f4UL << 0) + u8 auto_mode; + #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE (0x0UL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS (0x1UL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED (0x2UL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_MODE_MASK (0x4UL << 0) + u8 auto_pause; + #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL + __le16 auto_link_speed; + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB (0x1UL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB (0xaUL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB (0x14UL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB (0x19UL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB (0x64UL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB (0xc8UL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB (0xfaUL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB (0x190UL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB (0x1f4UL << 0) + __le16 auto_link_speed_mask; + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL + u8 wirespeed; + #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF (0x0UL << 0) + #define PORT_PHY_QCFG_RESP_WIRESPEED_ON (0x1UL << 0) + u8 lpbk; + #define PORT_PHY_QCFG_RESP_LPBK_NONE (0x0UL << 0) + #define PORT_PHY_QCFG_RESP_LPBK_LOCAL (0x1UL << 0) + #define PORT_PHY_QCFG_RESP_LPBK_REMOTE (0x2UL << 0) + u8 force_pause; + #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL + u8 duplex_setting; + #define PORT_PHY_QCFG_RESP_DUPLEX_SETTING_HALF (0x0UL << 0) + #define PORT_PHY_QCFG_RESP_DUPLEX_SETTING_FULL (0x1UL << 0) + __le32 preemphasis; + u8 phy_maj; + u8 phy_min; + u8 phy_bld; + u8 phy_type; + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR4 (0x1UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 (0x2UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR4 (0x3UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR4 (0x4UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 (0x5UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX4 (0x6UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR (0x7UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET (0x8UL << 0) + u8 media_type; + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP (0x1UL << 0) + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC (0x2UL << 0) + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE (0x3UL << 0) + u8 transceiver_type; + #define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL (0x1UL << 0) + #define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_EXTERNAL (0x2UL << 0) + u8 phy_addr; + #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL + #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0 + u8 unused_2; + __le16 link_partner_adv_speeds; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL + u8 link_partner_adv_auto_mode; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE (0x0UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS (0x1UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED (0x2UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_MASK (0x4UL << 0) + u8 link_partner_adv_pause; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL + u8 unused_3; + u8 unused_4; + u8 unused_5; + u8 valid; +}; + +/* hwrm_port_mac_cfg */ +/* Input (32 bytes) */ +struct hwrm_port_mac_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL + #define PORT_MAC_CFG_REQ_FLAGS_COS_ASSIGNMENT_ENABLE 0x2UL + #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL + #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL + __le32 enables; + #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL + #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL + #define PORT_MAC_CFG_REQ_ENABLES_IVLAN_PRI2COS_MAP_PRI 0x4UL + #define PORT_MAC_CFG_REQ_ENABLES_LCOS_MAP_PRI 0x8UL + #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL + #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL + __le16 port_id; + u8 ipg; + u8 lpbk; + #define PORT_MAC_CFG_REQ_LPBK_NONE (0x0UL << 0) + #define PORT_MAC_CFG_REQ_LPBK_LOCAL (0x1UL << 0) + #define PORT_MAC_CFG_REQ_LPBK_REMOTE (0x2UL << 0) + u8 ivlan_pri2cos_map_pri; + u8 lcos_map_pri; + u8 tunnel_pri2cos_map_pri; + u8 dscp2pri_map_pri; +}; + +/* Output (16 bytes) */ +struct hwrm_port_mac_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 mru; + __le16 mtu; + u8 ipg; + u8 lpbk; + #define PORT_MAC_CFG_RESP_LPBK_NONE (0x0UL << 0) + #define PORT_MAC_CFG_RESP_LPBK_LOCAL (0x1UL << 0) + #define PORT_MAC_CFG_RESP_LPBK_REMOTE (0x2UL << 0) + u8 unused_0; + u8 valid; +}; + +/* hwrm_port_enable */ +/* Input (24 bytes) */ +struct hwrm_port_enable_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define PORT_ENABLE_REQ_FLAGS_FORWARD_TRAFFIC 0x1UL + __le16 port_id; + __le16 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_port_enable_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_port_qstats */ +/* Input (40 bytes) */ +struct hwrm_port_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0; + u8 unused_1; + u8 unused_2[3]; + u8 unused_3; + __le64 tx_stat_host_addr; + __le64 rx_stat_host_addr; +}; + +/* Output (16 bytes) */ +struct hwrm_port_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_port_lpbk_qstats */ +/* Input (16 bytes) */ +struct hwrm_port_lpbk_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* Output (64 bytes) */ +struct hwrm_port_lpbk_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 lpbk_ucast_frames; + __le64 lpbk_mcast_frames; + __le64 lpbk_bcast_frames; + __le64 lpbk_ucast_bytes; + __le64 lpbk_mcast_bytes; + __le64 lpbk_bcast_bytes; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_port_clr_stats */ +/* Input (24 bytes) */ +struct hwrm_port_clr_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_port_clr_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_port_lpbk_clr_stats */ +/* Input (16 bytes) */ +struct hwrm_port_lpbk_clr_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* Output (16 bytes) */ +struct hwrm_port_lpbk_clr_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_port_blink_led */ +/* Input (24 bytes) */ +struct hwrm_port_blink_led_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 num_blinks; + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_port_blink_led_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_queue_qportcfg */ +/* Input (24 bytes) */ +struct hwrm_queue_qportcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX (0x0UL << 0) + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX (0x1UL << 0) + __le16 port_id; + __le16 unused_0; +}; + +/* Output (32 bytes) */ +struct hwrm_queue_qportcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 max_configurable_queues; + u8 max_configurable_lossless_queues; + u8 queue_cfg_allowed; + u8 queue_buffers_cfg_allowed; + u8 queue_pfcenable_cfg_allowed; + u8 queue_pri2cos_cfg_allowed; + u8 queue_cos2bw_cfg_allowed; + u8 queue_id0; + u8 queue_id0_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY (0x0UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS (0x1UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN (0xffUL << 0) + u8 queue_id1; + u8 queue_id1_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY (0x0UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS (0x1UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN (0xffUL << 0) + u8 queue_id2; + u8 queue_id2_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY (0x0UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS (0x1UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN (0xffUL << 0) + u8 queue_id3; + u8 queue_id3_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY (0x0UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS (0x1UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN (0xffUL << 0) + u8 queue_id4; + u8 queue_id4_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY (0x0UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS (0x1UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN (0xffUL << 0) + u8 queue_id5; + u8 queue_id5_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY (0x0UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS (0x1UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN (0xffUL << 0) + u8 queue_id6; + u8 queue_id6_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY (0x0UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS (0x1UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN (0xffUL << 0) + u8 queue_id7; + u8 queue_id7_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY (0x0UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS (0x1UL << 0) + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN (0xffUL << 0) + u8 valid; +}; + +/* hwrm_queue_cfg */ +/* Input (40 bytes) */ +struct hwrm_queue_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_CFG_REQ_FLAGS_PATH 0x1UL + #define QUEUE_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0) + #define QUEUE_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0) + __le32 enables; + #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL + #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL + __le32 queue_id; + __le32 dflt_len; + u8 service_profile; + #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY (0x0UL << 0) + #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS (0x1UL << 0) + #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN (0xffUL << 0) + u8 unused_0[7]; +}; + +/* Output (16 bytes) */ +struct hwrm_queue_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_queue_buffers_cfg */ +/* Input (56 bytes) */ +struct hwrm_queue_buffers_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH 0x1UL + #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0) + #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0) + __le32 enables; + #define QUEUE_BUFFERS_CFG_REQ_ENABLES_RESERVED 0x1UL + #define QUEUE_BUFFERS_CFG_REQ_ENABLES_SHARED 0x2UL + #define QUEUE_BUFFERS_CFG_REQ_ENABLES_GROUP 0x4UL + #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XOFF 0x8UL + #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XON 0x10UL + #define QUEUE_BUFFERS_CFG_REQ_ENABLES_FULL 0x20UL + #define QUEUE_BUFFERS_CFG_REQ_ENABLES_NOTFULL 0x40UL + #define QUEUE_BUFFERS_CFG_REQ_ENABLES_MAX 0x80UL + __le32 queue_id; + __le32 reserved; + __le32 shared; + __le32 xoff; + __le32 xon; + __le32 full; + __le32 notfull; + __le32 max; +}; + +/* Output (16 bytes) */ +struct hwrm_queue_buffers_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_queue_pfcenable_cfg */ +/* Input (24 bytes) */ +struct hwrm_queue_pfcenable_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI0_PFC_ENABLED 0x1UL + #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI1_PFC_ENABLED 0x2UL + #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI2_PFC_ENABLED 0x4UL + #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI3_PFC_ENABLED 0x8UL + #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI4_PFC_ENABLED 0x10UL + #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI5_PFC_ENABLED 0x20UL + #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI6_PFC_ENABLED 0x40UL + #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI7_PFC_ENABLED 0x80UL + __le16 port_id; + __le16 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_queue_pfcenable_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_queue_pri2cos_cfg */ +/* Input (40 bytes) */ +struct hwrm_queue_pri2cos_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH 0x1UL + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0) + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0) + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x2UL + __le32 enables; + u8 port_id; + u8 pri0_cos; + u8 pri1_cos; + u8 pri2_cos; + u8 pri3_cos; + u8 pri4_cos; + u8 pri5_cos; + u8 pri6_cos; + u8 pri7_cos; + u8 unused_0[7]; +}; + +/* Output (16 bytes) */ +struct hwrm_queue_pri2cos_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_queue_cos2bw_cfg */ +/* Input (128 bytes) */ +struct hwrm_queue_cos2bw_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL + __le16 port_id; + u8 queue_id0; + u8 unused_0; + __le32 queue_id0_min_bw; + __le32 queue_id0_max_bw; + u8 queue_id0_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP (0x0UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS (0x1UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + u8 queue_id0_pri_lvl; + u8 queue_id0_bw_weight; + u8 queue_id1; + __le32 queue_id1_min_bw; + __le32 queue_id1_max_bw; + u8 queue_id1_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP (0x0UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS (0x1UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + u8 queue_id1_pri_lvl; + u8 queue_id1_bw_weight; + u8 queue_id2; + __le32 queue_id2_min_bw; + __le32 queue_id2_max_bw; + u8 queue_id2_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP (0x0UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS (0x1UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + u8 queue_id2_pri_lvl; + u8 queue_id2_bw_weight; + u8 queue_id3; + __le32 queue_id3_min_bw; + __le32 queue_id3_max_bw; + u8 queue_id3_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP (0x0UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS (0x1UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + u8 queue_id3_pri_lvl; + u8 queue_id3_bw_weight; + u8 queue_id4; + __le32 queue_id4_min_bw; + __le32 queue_id4_max_bw; + u8 queue_id4_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP (0x0UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS (0x1UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + u8 queue_id4_pri_lvl; + u8 queue_id4_bw_weight; + u8 queue_id5; + __le32 queue_id5_min_bw; + __le32 queue_id5_max_bw; + u8 queue_id5_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP (0x0UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS (0x1UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + u8 queue_id5_pri_lvl; + u8 queue_id5_bw_weight; + u8 queue_id6; + __le32 queue_id6_min_bw; + __le32 queue_id6_max_bw; + u8 queue_id6_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP (0x0UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS (0x1UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + u8 queue_id6_pri_lvl; + u8 queue_id6_bw_weight; + u8 queue_id7; + __le32 queue_id7_min_bw; + __le32 queue_id7_max_bw; + u8 queue_id7_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP (0x0UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS (0x1UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + u8 queue_id7_pri_lvl; + u8 queue_id7_bw_weight; + u8 unused_1[5]; +}; + +/* Output (16 bytes) */ +struct hwrm_queue_cos2bw_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_vnic_alloc */ +/* Input (24 bytes) */ +struct hwrm_vnic_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_vnic_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 vnic_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_vnic_free */ +/* Input (24 bytes) */ +struct hwrm_vnic_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 vnic_id; + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_vnic_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_vnic_cfg */ +/* Input (40 bytes) */ +struct hwrm_vnic_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL + #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL + __le32 enables; + #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL + #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL + #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL + #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL + #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL + __le16 vnic_id; + __le16 dflt_ring_grp; + __le16 rss_rule; + __le16 cos_rule; + __le16 lb_rule; + __le16 mru; + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_vnic_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_vnic_tpa_cfg */ +/* Input (40 bytes) */ +struct hwrm_vnic_tpa_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL + #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL + #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL + #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL + #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL + #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL + #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL + #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL + __le32 enables; + #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL + #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL + #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL + #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL + __le16 vnic_id; + __le16 max_agg_segs; + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 (0x0UL << 0) + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 (0x1UL << 0) + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 (0x2UL << 0) + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 (0x3UL << 0) + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX (0x1fUL << 0) + __le16 max_aggs; + #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 (0x0UL << 0) + #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 (0x1UL << 0) + #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 (0x2UL << 0) + #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 (0x3UL << 0) + #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 (0x4UL << 0) + #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX (0x7UL << 0) + u8 unused_0; + u8 unused_1; + __le32 max_agg_timer; + __le32 min_agg_len; +}; + +/* Output (16 bytes) */ +struct hwrm_vnic_tpa_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_vnic_rss_cfg */ +/* Input (48 bytes) */ +struct hwrm_vnic_rss_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 hash_type; + #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL + __le32 unused_0; + __le64 ring_grp_tbl_addr; + __le64 hash_key_tbl_addr; + __le16 rss_ctx_idx; + __le16 unused_1[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_vnic_rss_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_vnic_plcmodes_cfg */ +/* Input (40 bytes) */ +struct hwrm_vnic_plcmodes_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL + __le32 enables; + #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL + #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL + #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL + __le32 vnic_id; + __le16 jumbo_thresh; + __le16 hds_offset; + __le16 hds_threshold; + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_vnic_plcmodes_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_vnic_rss_cos_lb_ctx_alloc */ +/* Input (16 bytes) */ +struct hwrm_vnic_rss_cos_lb_ctx_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* Output (16 bytes) */ +struct hwrm_vnic_rss_cos_lb_ctx_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 rss_cos_lb_ctx_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + +/* hwrm_vnic_rss_cos_lb_ctx_free */ +/* Input (24 bytes) */ +struct hwrm_vnic_rss_cos_lb_ctx_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 rss_cos_lb_ctx_id; + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_vnic_rss_cos_lb_ctx_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_ring_alloc */ +/* Input (80 bytes) */ +struct hwrm_ring_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define RING_ALLOC_REQ_ENABLES_ARB_GRP_ID_VALID 0x1UL + #define RING_ALLOC_REQ_ENABLES_INPUT_NUM_VALID 0x2UL + #define RING_ALLOC_REQ_ENABLES_WEIGHT_VALID 0x4UL + #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL + #define RING_ALLOC_REQ_ENABLES_MIN_BW_VALID 0x10UL + #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL + u8 ring_type; + #define RING_ALLOC_REQ_RING_TYPE_CMPL (0x0UL << 0) + #define RING_ALLOC_REQ_RING_TYPE_TX (0x1UL << 0) + #define RING_ALLOC_REQ_RING_TYPE_RX (0x2UL << 0) + #define RING_ALLOC_REQ_RING_TYPE_STATUS (0x3UL << 0) + #define RING_ALLOC_REQ_RING_TYPE_CMD (0x4UL << 0) + u8 unused_0; + __le16 unused_1; + __le64 page_tbl_addr; + __le32 fbo; + u8 page_size; + u8 page_tbl_depth; + u8 unused_2; + u8 unused_3; + __le32 length; + __le16 logical_id; + __le16 cmpl_ring_id; + __le16 queue_id; + u8 unused_4; + u8 unused_5; + __le32 arb_grp_id; + __le16 input_number; + u8 unused_6; + u8 unused_7; + __le32 weight; + __le32 stat_ctx_id; + __le32 min_bw; + __le32 max_bw; + u8 int_mode; + #define RING_ALLOC_REQ_INT_MODE_LEGACY (0x0UL << 0) + #define RING_ALLOC_REQ_INT_MODE_MSI (0x1UL << 0) + #define RING_ALLOC_REQ_INT_MODE_MSIX (0x2UL << 0) + #define RING_ALLOC_REQ_INT_MODE_POLL (0x3UL << 0) + u8 unused_8[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_ring_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 ring_id; + __le16 logical_ring_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_ring_free */ +/* Input (24 bytes) */ +struct hwrm_ring_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 ring_type; + #define RING_FREE_REQ_RING_TYPE_CMPL (0x0UL << 0) + #define RING_FREE_REQ_RING_TYPE_TX (0x1UL << 0) + #define RING_FREE_REQ_RING_TYPE_RX (0x2UL << 0) + #define RING_FREE_REQ_RING_TYPE_STATUS (0x3UL << 0) + #define RING_FREE_REQ_RING_TYPE_CMD (0x4UL << 0) + u8 unused_0; + __le16 ring_id; + __le32 unused_1; +}; + +/* Output (16 bytes) */ +struct hwrm_ring_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_ring_cmpl_ring_qaggint_params */ +/* Input (24 bytes) */ +struct hwrm_ring_cmpl_ring_qaggint_params_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 ring_id; + __le16 unused_0[3]; +}; + +/* Output (32 bytes) */ +struct hwrm_ring_cmpl_ring_qaggint_params_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 flags; + #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL + #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL + __le16 num_cmpl_dma_aggr; + __le16 num_cmpl_dma_aggr_during_int; + __le16 cmpl_aggr_dma_tmr; + __le16 cmpl_aggr_dma_tmr_during_int; + __le16 int_lat_tmr_min; + __le16 int_lat_tmr_max; + __le16 num_cmpl_aggr_int; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_ring_cmpl_ring_cfg_aggint_params */ +/* Input (40 bytes) */ +struct hwrm_ring_cmpl_ring_cfg_aggint_params_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 ring_id; + __le16 flags; + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL + __le16 num_cmpl_dma_aggr; + __le16 num_cmpl_dma_aggr_during_int; + __le16 cmpl_aggr_dma_tmr; + __le16 cmpl_aggr_dma_tmr_during_int; + __le16 int_lat_tmr_min; + __le16 int_lat_tmr_max; + __le16 num_cmpl_aggr_int; + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_ring_cmpl_ring_cfg_aggint_params_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_ring_reset */ +/* Input (24 bytes) */ +struct hwrm_ring_reset_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 ring_type; + #define RING_RESET_REQ_RING_TYPE_CMPL (0x0UL << 0) + #define RING_RESET_REQ_RING_TYPE_TX (0x1UL << 0) + #define RING_RESET_REQ_RING_TYPE_RX (0x2UL << 0) + #define RING_RESET_REQ_RING_TYPE_STATUS (0x3UL << 0) + #define RING_RESET_REQ_RING_TYPE_CMD (0x4UL << 0) + u8 unused_0; + __le16 ring_id; + __le32 unused_1; +}; + +/* Output (16 bytes) */ +struct hwrm_ring_reset_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_ring_grp_alloc */ +/* Input (24 bytes) */ +struct hwrm_ring_grp_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 cr; + __le16 rr; + __le16 ar; + __le16 sc; +}; + +/* Output (16 bytes) */ +struct hwrm_ring_grp_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 ring_group_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_ring_grp_free */ +/* Input (24 bytes) */ +struct hwrm_ring_grp_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 ring_group_id; + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_ring_grp_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_arb_grp_alloc */ +/* Input (24 bytes) */ +struct hwrm_arb_grp_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 input_number; + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_arb_grp_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 arb_grp_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + +/* hwrm_arb_grp_cfg */ +/* Input (32 bytes) */ +struct hwrm_arb_grp_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 arb_grp_id; + __le16 input_number; + __le16 tx_ring; + __le32 weight; + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_arb_grp_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_cfa_l2_filter_alloc */ +/* Input (96 bytes) */ +struct hwrm_cfa_l2_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL + __le32 enables; + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x8000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL + u8 l2_addr[6]; + u8 unused_0; + u8 unused_1; + u8 l2_addr_mask[6]; + __le16 l2_ovlan; + __le16 l2_ovlan_mask; + __le16 l2_ivlan; + __le16 l2_ivlan_mask; + u8 unused_2; + u8 unused_3; + u8 t_l2_addr[6]; + u8 unused_4; + u8 unused_5; + u8 t_l2_addr_mask[6]; + __le16 t_l2_ovlan; + __le16 t_l2_ovlan_mask; + __le16 t_l2_ivlan; + __le16 t_l2_ivlan_mask; + u8 src_type; + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT (0x0UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF (0x1UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF (0x2UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC (0x3UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG (0x4UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE (0x5UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO (0x6UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG (0x7UL << 0) + u8 unused_6; + __le32 src_id; + u8 tunnel_type; + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0) + u8 unused_7; + __le16 dst_vnic_id; + __le16 mirror_vnic_id; + u8 pri_hint; + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER (0x0UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER (0x1UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER (0x2UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX (0x3UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN (0x4UL << 0) + u8 unused_8; + __le32 unused_9; + __le64 l2_filter_id_hint; +}; + +/* Output (24 bytes) */ +struct hwrm_cfa_l2_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 l2_filter_id; + __le32 flow_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_cfa_l2_filter_free */ +/* Input (24 bytes) */ +struct hwrm_cfa_l2_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 l2_filter_id; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_l2_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_cfa_l2_filter_cfg */ +/* Input (40 bytes) */ +struct hwrm_cfa_l2_filter_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL + __le32 enables; + #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_VNIC_ID_VALID 0x1UL + __le64 l2_filter_id; + __le32 dst_vnic_id; + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_l2_filter_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_cfa_l2_set_rx_mask */ +/* Input (40 bytes) */ +struct hwrm_cfa_l2_set_rx_mask_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 dflt_vnic_id; + __le32 mask; + #define CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST 0x1UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL + __le64 mc_tbl_addr; + __le32 num_mc_entries; + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_l2_set_rx_mask_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_cfa_l2_set_bcastmcast_mirroring */ +/* Input (32 bytes) */ +struct hwrm_cfa_l2_set_bcastmcast_mirroring_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 dflt_vnic_id; + __le32 mirroring_flags; + #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_BCAST_MIRRORING 0x1UL + #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_MCAST_MIRRORING 0x2UL + #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_BCAST_SRC_KNOCKOUT 0x4UL + #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_MCAST_SRC_KNOCKOUT 0x8UL + #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_VLAN_ID_VALID 0x10UL + __le16 vlan_id; + u8 bcast_domain; + #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_PFONLY (0x0UL << 0) + #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_ALLPFS (0x1UL << 0) + #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_ALLPFSVFS (0x2UL << 0) + u8 mcast_domain; + #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_PFONLY (0x0UL << 0) + #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_ALLPFS (0x1UL << 0) + #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_ALLPFSVFS (0x2UL << 0) + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_l2_set_bcastmcast_mirroring_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_cfa_tunnel_filter_alloc */ +/* Input (88 bytes) */ +struct hwrm_cfa_tunnel_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL + __le32 enables; + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL + __le64 l2_filter_id; + u8 l2_addr[6]; + __le16 l2_ivlan; + __le32 l3_addr[4]; + __le32 t_l3_addr[4]; + u8 l3_addr_type; + u8 t_l3_addr_type; + u8 tunnel_type; + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0) + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0) + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0) + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0) + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0) + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0) + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0) + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0) + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0) + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0) + u8 unused_0; + __le32 vni; + __le32 dst_vnic_id; + __le32 mirror_vnic_id; +}; + +/* Output (24 bytes) */ +struct hwrm_cfa_tunnel_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 tunnel_filter_id; + __le32 flow_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_cfa_tunnel_filter_free */ +/* Input (24 bytes) */ +struct hwrm_cfa_tunnel_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 tunnel_filter_id; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_tunnel_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_cfa_encap_record_alloc */ +/* Input (32 bytes) */ +struct hwrm_cfa_encap_record_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL + u8 encap_type; + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN (0x1UL << 0) + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE (0x2UL << 0) + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE (0x3UL << 0) + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP (0x4UL << 0) + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE (0x5UL << 0) + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS (0x6UL << 0) + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN (0x7UL << 0) + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE (0x8UL << 0) + u8 unused_0; + __le16 unused_1; + __le32 encap_data[16]; +}; + +/* Output (24 bytes) */ +struct hwrm_cfa_encap_record_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 encap_record_id; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_cfa_encap_record_free */ +/* Input (24 bytes) */ +struct hwrm_cfa_encap_record_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 encap_record_id; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_encap_record_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_cfa_ntuple_filter_alloc */ +/* Input (128 bytes) */ +struct hwrm_cfa_ntuple_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL + __le32 enables; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x10000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL + __le64 l2_filter_id; + u8 src_macaddr[6]; + __be16 ethertype; + u8 ipaddr_type; + u8 ip_protocol; + __le16 dst_vnic_id; + __le16 mirror_vnic_id; + u8 tunnel_type; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0) + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0) + u8 pri_hint; + __be32 src_ipaddr[4]; + __be32 src_ipaddr_mask[4]; + __be32 dst_ipaddr[4]; + __be32 dst_ipaddr_mask[4]; + __be16 src_port; + __be16 src_port_mask; + __be16 dst_port; + __be16 dst_port_mask; + __le64 ntuple_filter_id_hint; +}; + +/* Output (24 bytes) */ +struct hwrm_cfa_ntuple_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 ntuple_filter_id; + __le32 flow_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_cfa_ntuple_filter_free */ +/* Input (24 bytes) */ +struct hwrm_cfa_ntuple_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 ntuple_filter_id; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_ntuple_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_cfa_ntuple_filter_cfg */ +/* Input (40 bytes) */ +struct hwrm_cfa_ntuple_filter_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_VNIC_ID_VALID 0x1UL + #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID_VALID 0x2UL + __le32 unused_0; + __le64 ntuple_filter_id; + __le32 new_dst_vnic_id; + __le32 new_mirror_vnic_id; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_ntuple_filter_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_tunnel_dst_port_query */ +/* Input (24 bytes) */ +struct hwrm_tunnel_dst_port_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 tunnel_type; + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0) + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0) + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0) + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0) + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0) + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0) + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0) + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_STT (0x7UL << 0) + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0) + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0) + u8 unused_0[7]; +}; + +/* Output (16 bytes) */ +struct hwrm_tunnel_dst_port_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tunnel_dst_port_id; + __be16 tunnel_dst_port_val; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_tunnel_dst_port_alloc */ +/* Input (24 bytes) */ +struct hwrm_tunnel_dst_port_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 tunnel_type; + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0) + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0) + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0) + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0) + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0) + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0) + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0) + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0) + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0) + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0) + u8 unused_0; + __be16 tunnel_dst_port_val; + __le32 unused_1; +}; + +/* Output (16 bytes) */ +struct hwrm_tunnel_dst_port_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tunnel_dst_port_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + +/* hwrm_tunnel_dst_port_free */ +/* Input (24 bytes) */ +struct hwrm_tunnel_dst_port_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 tunnel_type; + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0) + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0) + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0) + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0) + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0) + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0) + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0) + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_STT (0x7UL << 0) + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0) + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0) + u8 unused_0; + __le16 tunnel_dst_port_id; + __le32 unused_1; +}; + +/* Output (16 bytes) */ +struct hwrm_tunnel_dst_port_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_stat_ctx_alloc */ +/* Input (32 bytes) */ +struct hwrm_stat_ctx_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 stats_dma_addr; + __le32 update_period_ms; + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_stat_ctx_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 stat_ctx_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_stat_ctx_free */ +/* Input (24 bytes) */ +struct hwrm_stat_ctx_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_stat_ctx_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 stat_ctx_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_stat_ctx_query */ +/* Input (24 bytes) */ +struct hwrm_stat_ctx_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + __le32 unused_0; +}; + +/* Output (176 bytes) */ +struct hwrm_stat_ctx_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_err_pkts; + __le64 tx_drop_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_err_pkts; + __le64 rx_drop_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 rx_agg_pkts; + __le64 rx_agg_bytes; + __le64 rx_agg_events; + __le64 rx_agg_aborts; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_stat_ctx_clr_stats */ +/* Input (24 bytes) */ +struct hwrm_stat_ctx_clr_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_stat_ctx_clr_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_mgmt_l2_filter_alloc */ +/* Input (56 bytes) */ +struct hwrm_mgmt_l2_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL + #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0) + #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0) + __le32 enables; + #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDRESS 0x1UL + #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_OVLAN 0x2UL + #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_IVLAN 0x4UL + #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_ACTION_ID 0x8UL + u8 l2_address[6]; + u8 unused_0; + u8 unused_1; + u8 l2_address_mask[6]; + __le16 ovlan; + __le16 ovlan_mask; + __le16 ivlan; + __le16 ivlan_mask; + u8 unused_2; + u8 unused_3; + __le32 action_id; + u8 action_bypass; + #define MGMT_L2_FILTER_ALLOC_REQ_ACTION_BYPASS 0x1UL + u8 unused_5[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_mgmt_l2_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 mgmt_l2_filter_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + +/* hwrm_mgmt_l2_filter_free */ +/* Input (24 bytes) */ +struct hwrm_mgmt_l2_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 mgmt_l2_filter_id; + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_mgmt_l2_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_nvm_raw_write_blk */ +/* Input (32 bytes) */ +struct hwrm_nvm_raw_write_blk_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_src_addr; + __le32 dest_addr; + __le32 len; +}; + +/* Output (16 bytes) */ +struct hwrm_nvm_raw_write_blk_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_nvm_read */ +/* Input (40 bytes) */ +struct hwrm_nvm_read_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le16 dir_idx; + u8 unused_0; + u8 unused_1; + __le32 offset; + __le32 len; + __le32 unused_2; +}; + +/* Output (16 bytes) */ +struct hwrm_nvm_read_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_nvm_raw_dump */ +/* Input (32 bytes) */ +struct hwrm_nvm_raw_dump_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 offset; + __le32 len; +}; + +/* Output (16 bytes) */ +struct hwrm_nvm_raw_dump_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_nvm_get_dir_entries */ +/* Input (24 bytes) */ +struct hwrm_nvm_get_dir_entries_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; +}; + +/* Output (16 bytes) */ +struct hwrm_nvm_get_dir_entries_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_nvm_get_dir_info */ +/* Input (16 bytes) */ +struct hwrm_nvm_get_dir_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* Output (24 bytes) */ +struct hwrm_nvm_get_dir_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 entries; + __le32 entry_length; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_nvm_write */ +/* Input (40 bytes) */ +struct hwrm_nvm_write_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_src_addr; + __le16 dir_type; + __le16 dir_ordinal; + __le16 dir_ext; + __le16 dir_attr; + __le32 dir_data_length; + __le16 option; + __le16 flags; + #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL +}; + +/* Output (16 bytes) */ +struct hwrm_nvm_write_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_nvm_modify */ +/* Input (40 bytes) */ +struct hwrm_nvm_modify_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_src_addr; + __le16 dir_idx; + u8 unused_0; + u8 unused_1; + __le32 offset; + __le32 len; + __le32 unused_2; +}; + +/* Output (16 bytes) */ +struct hwrm_nvm_modify_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_nvm_find_dir_entry */ +/* Input (32 bytes) */ +struct hwrm_nvm_find_dir_entry_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL + __le16 dir_idx; + __le16 dir_type; + __le16 dir_ordinal; + __le16 dir_ext; + u8 opt_ordinal; + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0 + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ (0x0UL << 0) + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE (0x1UL << 0) + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT (0x2UL << 0) + u8 unused_1[3]; +}; + +/* Output (32 bytes) */ +struct hwrm_nvm_find_dir_entry_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 dir_item_length; + __le32 dir_data_length; + __le32 fw_ver; + __le16 dir_ordinal; + __le16 dir_idx; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_nvm_erase_dir_entry */ +/* Input (24 bytes) */ +struct hwrm_nvm_erase_dir_entry_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 dir_idx; + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_nvm_erase_dir_entry_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_nvm_get_dev_info */ +/* Input (16 bytes) */ +struct hwrm_nvm_get_dev_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* Output (32 bytes) */ +struct hwrm_nvm_get_dev_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 manufacturer_id; + __le16 device_id; + __le32 sector_size; + __le32 nvram_size; + __le32 reserved_size; + __le32 available_size; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_nvm_mod_dir_entry */ +/* Input (32 bytes) */ +struct hwrm_nvm_mod_dir_entry_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL + __le16 dir_idx; + __le16 dir_ordinal; + __le16 dir_ext; + __le16 dir_attr; + __le32 checksum; +}; + +/* Output (16 bytes) */ +struct hwrm_nvm_mod_dir_entry_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_nvm_verify_update */ +/* Input (24 bytes) */ +struct hwrm_nvm_verify_update_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 dir_type; + __le16 dir_ordinal; + __le16 dir_ext; + __le16 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_nvm_verify_update_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_exec_fwd_resp */ +/* Input (120 bytes) */ +struct hwrm_exec_fwd_resp_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 encap_request[24]; + __le16 encap_resp_target_id; + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_exec_fwd_resp_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_reject_fwd_resp */ +/* Input (120 bytes) */ +struct hwrm_reject_fwd_resp_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 encap_request[24]; + __le16 encap_resp_target_id; + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_reject_fwd_resp_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_fwd_resp */ +/* Input (40 bytes) */ +struct hwrm_fwd_resp_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 encap_resp_target_id; + __le16 encap_resp_cmpl_ring; + __le16 encap_resp_len; + u8 unused_0; + u8 unused_1; + __le64 encap_resp_addr; + __le32 encap_resp[24]; +}; + +/* Output (16 bytes) */ +struct hwrm_fwd_resp_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_fwd_async_event_cmpl */ +/* Input (32 bytes) */ +struct hwrm_fwd_async_event_cmpl_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 encap_async_event_target_id; + u8 unused_0; + u8 unused_1; + u8 unused_2[3]; + u8 unused_3; + __le32 encap_async_event_cmpl[4]; +}; + +/* Output (16 bytes) */ +struct hwrm_fwd_async_event_cmpl_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_fw_reset */ +/* Input (24 bytes) */ +struct hwrm_fw_reset_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 embedded_proc_type; + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIMP (0x0UL << 0) + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_APE (0x1UL << 0) + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_KONG (0x2UL << 0) + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BONO (0x3UL << 0) + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_TANG (0x4UL << 0) + u8 selfrst_status; + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0) + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0) + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0) + __le16 unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_fw_reset_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 selfrst_status; + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0) + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0) + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0) + u8 unused_0; + __le16 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + +/* hwrm_fw_qstatus */ +/* Input (24 bytes) */ +struct hwrm_fw_qstatus_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 embedded_proc_type; + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIMP (0x0UL << 0) + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_APE (0x1UL << 0) + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_KONG (0x2UL << 0) + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BONO (0x3UL << 0) + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_TANG (0x4UL << 0) + u8 unused_0[7]; +}; + +/* Output (16 bytes) */ +struct hwrm_fw_qstatus_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 selfrst_status; + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0) + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0) + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0) + u8 unused_0; + __le16 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + +/* hwrm_temp_monitor_query */ +/* Input (16 bytes) */ +struct hwrm_temp_monitor_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* Output (16 bytes) */ +struct hwrm_temp_monitor_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 temp; + u8 unused_0; + __le16 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h new file mode 100644 index 000000000000..3cf3e1b70b64 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h @@ -0,0 +1,59 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef _BNXT_NVM_DEFS_H_ +#define _BNXT_NVM_DEFS_H_ + +enum bnxt_nvm_directory_type { + BNX_DIR_TYPE_UNUSED = 0, + BNX_DIR_TYPE_PKG_LOG = 1, + BNX_DIR_TYPE_CHIMP_PATCH = 3, + BNX_DIR_TYPE_BOOTCODE = 4, + BNX_DIR_TYPE_VPD = 5, + BNX_DIR_TYPE_EXP_ROM_MBA = 6, + BNX_DIR_TYPE_AVS = 7, + BNX_DIR_TYPE_PCIE = 8, + BNX_DIR_TYPE_PORT_MACRO = 9, + BNX_DIR_TYPE_APE_FW = 10, + BNX_DIR_TYPE_APE_PATCH = 11, + BNX_DIR_TYPE_KONG_FW = 12, + BNX_DIR_TYPE_KONG_PATCH = 13, + BNX_DIR_TYPE_BONO_FW = 14, + BNX_DIR_TYPE_BONO_PATCH = 15, + BNX_DIR_TYPE_TANG_FW = 16, + BNX_DIR_TYPE_TANG_PATCH = 17, + BNX_DIR_TYPE_BOOTCODE_2 = 18, + BNX_DIR_TYPE_CCM = 19, + BNX_DIR_TYPE_PCI_CFG = 20, + BNX_DIR_TYPE_TSCF_UCODE = 21, + BNX_DIR_TYPE_ISCSI_BOOT = 22, + BNX_DIR_TYPE_ISCSI_BOOT_IPV6 = 24, + BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6 = 25, + BNX_DIR_TYPE_ISCSI_BOOT_CFG6 = 26, + BNX_DIR_TYPE_EXT_PHY = 27, + BNX_DIR_TYPE_SHARED_CFG = 40, + BNX_DIR_TYPE_PORT_CFG = 41, + BNX_DIR_TYPE_FUNC_CFG = 42, + BNX_DIR_TYPE_MGMT_CFG = 48, + BNX_DIR_TYPE_MGMT_DATA = 49, + BNX_DIR_TYPE_MGMT_WEB_DATA = 50, + BNX_DIR_TYPE_MGMT_WEB_META = 51, + BNX_DIR_TYPE_MGMT_EVENT_LOG = 52, + BNX_DIR_TYPE_MGMT_AUDIT_LOG = 53 +}; + +#define BNX_DIR_ORDINAL_FIRST 0 + +#define BNX_DIR_EXT_INACTIVE (1 << 0) +#define BNX_DIR_EXT_UPDATE (1 << 1) + +#define BNX_DIR_ATTR_NO_CHKSUM (1 << 0) +#define BNX_DIR_ATTR_PROP_STREAM (1 << 1) + +#endif /* Don't add anything after this line */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c new file mode 100644 index 000000000000..60989e7e266a --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -0,0 +1,816 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/if_vlan.h> +#include <linux/interrupt.h> +#include <linux/etherdevice.h> +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_sriov.h" +#include "bnxt_ethtool.h" + +#ifdef CONFIG_BNXT_SRIOV +static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) +{ + if (bp->state != BNXT_STATE_OPEN) { + netdev_err(bp->dev, "vf ndo called though PF is down\n"); + return -EINVAL; + } + if (!bp->pf.active_vfs) { + netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); + return -EINVAL; + } + if (vf_id >= bp->pf.max_vfs) { + netdev_err(bp->dev, "Invalid VF id %d\n", vf_id); + return -EINVAL; + } + return 0; +} + +int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) +{ + struct hwrm_func_cfg_input req = {0}; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_vf_info *vf; + bool old_setting = false; + u32 func_flags; + int rc; + + rc = bnxt_vf_ndo_prep(bp, vf_id); + if (rc) + return rc; + + vf = &bp->pf.vf[vf_id]; + if (vf->flags & BNXT_VF_SPOOFCHK) + old_setting = true; + if (old_setting == setting) + return 0; + + func_flags = vf->func_flags; + if (setting) + func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK; + else + func_flags &= ~FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK; + /*TODO: if the driver supports VLAN filter on guest VLAN, + * the spoof check should also include vlan anti-spoofing + */ + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.vf_id = cpu_to_le16(vf->fw_fid); + req.flags = cpu_to_le32(func_flags); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + vf->func_flags = func_flags; + if (setting) + vf->flags |= BNXT_VF_SPOOFCHK; + else + vf->flags &= ~BNXT_VF_SPOOFCHK; + } + return rc; +} + +int bnxt_get_vf_config(struct net_device *dev, int vf_id, + struct ifla_vf_info *ivi) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_vf_info *vf; + int rc; + + rc = bnxt_vf_ndo_prep(bp, vf_id); + if (rc) + return rc; + + ivi->vf = vf_id; + vf = &bp->pf.vf[vf_id]; + + memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN); + ivi->max_tx_rate = vf->max_tx_rate; + ivi->min_tx_rate = vf->min_tx_rate; + ivi->vlan = vf->vlan; + ivi->qos = vf->flags & BNXT_VF_QOS; + ivi->spoofchk = vf->flags & BNXT_VF_SPOOFCHK; + if (!(vf->flags & BNXT_VF_LINK_FORCED)) + ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; + else if (vf->flags & BNXT_VF_LINK_UP) + ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; + else + ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; + + return 0; +} + +int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) +{ + struct hwrm_func_cfg_input req = {0}; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_vf_info *vf; + int rc; + + rc = bnxt_vf_ndo_prep(bp, vf_id); + if (rc) + return rc; + /* reject bc or mc mac addr, zero mac addr means allow + * VF to use its own mac addr + */ + if (is_multicast_ether_addr(mac)) { + netdev_err(dev, "Invalid VF ethernet address\n"); + return -EINVAL; + } + vf = &bp->pf.vf[vf_id]; + + memcpy(vf->mac_addr, mac, ETH_ALEN); + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.vf_id = cpu_to_le16(vf->fw_fid); + req.flags = cpu_to_le32(vf->func_flags); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); + memcpy(req.dflt_mac_addr, mac, ETH_ALEN); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos) +{ + struct hwrm_func_cfg_input req = {0}; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_vf_info *vf; + u16 vlan_tag; + int rc; + + rc = bnxt_vf_ndo_prep(bp, vf_id); + if (rc) + return rc; + + /* TODO: needed to implement proper handling of user priority, + * currently fail the command if there is valid priority + */ + if (vlan_id > 4095 || qos) + return -EINVAL; + + vf = &bp->pf.vf[vf_id]; + vlan_tag = vlan_id; + if (vlan_tag == vf->vlan) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.vf_id = cpu_to_le16(vf->fw_fid); + req.flags = cpu_to_le32(vf->func_flags); + req.dflt_vlan = cpu_to_le16(vlan_tag); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + vf->vlan = vlan_tag; + return rc; +} + +int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, + int max_tx_rate) +{ + struct hwrm_func_cfg_input req = {0}; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_vf_info *vf; + u32 pf_link_speed; + int rc; + + rc = bnxt_vf_ndo_prep(bp, vf_id); + if (rc) + return rc; + + vf = &bp->pf.vf[vf_id]; + pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); + if (max_tx_rate > pf_link_speed) { + netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n", + max_tx_rate, vf_id); + return -EINVAL; + } + + if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) { + netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n", + min_tx_rate, vf_id); + return -EINVAL; + } + if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate) + return 0; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.vf_id = cpu_to_le16(vf->fw_fid); + req.flags = cpu_to_le32(vf->func_flags); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); + req.max_bw = cpu_to_le32(max_tx_rate); + req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); + req.min_bw = cpu_to_le32(min_tx_rate); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + vf->min_tx_rate = min_tx_rate; + vf->max_tx_rate = max_tx_rate; + } + return rc; +} + +int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_vf_info *vf; + int rc; + + rc = bnxt_vf_ndo_prep(bp, vf_id); + if (rc) + return rc; + + vf = &bp->pf.vf[vf_id]; + + vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED); + switch (link) { + case IFLA_VF_LINK_STATE_AUTO: + vf->flags |= BNXT_VF_LINK_UP; + break; + case IFLA_VF_LINK_STATE_DISABLE: + vf->flags |= BNXT_VF_LINK_FORCED; + break; + case IFLA_VF_LINK_STATE_ENABLE: + vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED; + break; + default: + netdev_err(bp->dev, "Invalid link option\n"); + rc = -EINVAL; + break; + } + /* CHIMP TODO: send msg to VF to update new link state */ + + return rc; +} + +static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs) +{ + int i; + struct bnxt_vf_info *vf; + + for (i = 0; i < num_vfs; i++) { + vf = &bp->pf.vf[i]; + memset(vf, 0, sizeof(*vf)); + vf->flags = BNXT_VF_QOS | BNXT_VF_LINK_UP; + } + return 0; +} + +static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp) +{ + int i, rc = 0; + struct bnxt_pf_info *pf = &bp->pf; + struct hwrm_func_vf_resc_free_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1); + + mutex_lock(&bp->hwrm_cmd_lock); + for (i = pf->first_vf_id; i < pf->first_vf_id + pf->active_vfs; i++) { + req.vf_id = cpu_to_le16(i); + rc = _hwrm_send_message(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); + if (rc) + break; + } + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static void bnxt_free_vf_resources(struct bnxt *bp) +{ + struct pci_dev *pdev = bp->pdev; + int i; + + kfree(bp->pf.vf_event_bmap); + bp->pf.vf_event_bmap = NULL; + + for (i = 0; i < 4; i++) { + if (bp->pf.hwrm_cmd_req_addr[i]) { + dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE, + bp->pf.hwrm_cmd_req_addr[i], + bp->pf.hwrm_cmd_req_dma_addr[i]); + bp->pf.hwrm_cmd_req_addr[i] = NULL; + } + } + + kfree(bp->pf.vf); + bp->pf.vf = NULL; +} + +static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs) +{ + struct pci_dev *pdev = bp->pdev; + u32 nr_pages, size, i, j, k = 0; + + bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL); + if (!bp->pf.vf) + return -ENOMEM; + + bnxt_set_vf_attr(bp, num_vfs); + + size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE; + nr_pages = size / BNXT_PAGE_SIZE; + if (size & (BNXT_PAGE_SIZE - 1)) + nr_pages++; + + for (i = 0; i < nr_pages; i++) { + bp->pf.hwrm_cmd_req_addr[i] = + dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE, + &bp->pf.hwrm_cmd_req_dma_addr[i], + GFP_KERNEL); + + if (!bp->pf.hwrm_cmd_req_addr[i]) + return -ENOMEM; + + for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) { + struct bnxt_vf_info *vf = &bp->pf.vf[k]; + + vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] + + j * BNXT_HWRM_REQ_MAX_SIZE; + vf->hwrm_cmd_req_dma_addr = + bp->pf.hwrm_cmd_req_dma_addr[i] + j * + BNXT_HWRM_REQ_MAX_SIZE; + k++; + } + } + + /* Max 128 VF's */ + bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL); + if (!bp->pf.vf_event_bmap) + return -ENOMEM; + + bp->pf.hwrm_cmd_req_pages = nr_pages; + return 0; +} + +static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) +{ + struct hwrm_func_buf_rgtr_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1); + + req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages); + req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT); + req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE); + req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]); + req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]); + req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]); + req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]); + + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +/* only call by PF to reserve resources for VF */ +static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs) +{ + u32 rc = 0, mtu, i; + u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics; + struct hwrm_func_cfg_input req = {0}; + struct bnxt_pf_info *pf = &bp->pf; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + + /* Remaining rings are distributed equally amongs VF's for now */ + /* TODO: the following workaroud is needed to restrict total number + * of vf_cp_rings not exceed number of HW ring groups. This WA should + * be removed once new HWRM provides HW ring groups capability in + * hwrm_func_qcap. + */ + vf_cp_rings = min_t(u16, bp->pf.max_cp_rings, bp->pf.max_stat_ctxs); + vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / *num_vfs; + /* TODO: restore this logic below once the WA above is removed */ + /* vf_cp_rings = (bp->pf.max_cp_rings - bp->cp_nr_rings) / *num_vfs; */ + vf_stat_ctx = (bp->pf.max_stat_ctxs - bp->num_stat_ctxs) / *num_vfs; + if (bp->flags & BNXT_FLAG_AGG_RINGS) + vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings * 2) / + *num_vfs; + else + vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings) / + *num_vfs; + vf_tx_rings = (bp->pf.max_tx_rings - bp->tx_nr_rings) / *num_vfs; + + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU | + FUNC_CFG_REQ_ENABLES_MRU | + FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_VNICS); + + mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + req.mru = cpu_to_le16(mtu); + req.mtu = cpu_to_le16(mtu); + + req.num_rsscos_ctxs = cpu_to_le16(1); + req.num_cmpl_rings = cpu_to_le16(vf_cp_rings); + req.num_tx_rings = cpu_to_le16(vf_tx_rings); + req.num_rx_rings = cpu_to_le16(vf_rx_rings); + req.num_l2_ctxs = cpu_to_le16(4); + vf_vnics = 1; + + req.num_vnics = cpu_to_le16(vf_vnics); + /* FIXME spec currently uses 1 bit for stats ctx */ + req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx); + + mutex_lock(&bp->hwrm_cmd_lock); + for (i = 0; i < *num_vfs; i++) { + req.vf_id = cpu_to_le16(pf->first_vf_id + i); + rc = _hwrm_send_message(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); + if (rc) + break; + bp->pf.active_vfs = i + 1; + bp->pf.vf[i].fw_fid = le16_to_cpu(req.vf_id); + } + mutex_unlock(&bp->hwrm_cmd_lock); + if (!rc) { + bp->pf.max_pf_tx_rings = bp->tx_nr_rings; + if (bp->flags & BNXT_FLAG_AGG_RINGS) + bp->pf.max_pf_rx_rings = bp->rx_nr_rings * 2; + else + bp->pf.max_pf_rx_rings = bp->rx_nr_rings; + } + return rc; +} + +static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) +{ + int rc = 0, vfs_supported; + int min_rx_rings, min_tx_rings, min_rss_ctxs; + int tx_ok = 0, rx_ok = 0, rss_ok = 0; + + /* Check if we can enable requested num of vf's. At a mininum + * we require 1 RX 1 TX rings for each VF. In this minimum conf + * features like TPA will not be available. + */ + vfs_supported = *num_vfs; + + while (vfs_supported) { + min_rx_rings = vfs_supported; + min_tx_rings = vfs_supported; + min_rss_ctxs = vfs_supported; + + if (bp->flags & BNXT_FLAG_AGG_RINGS) { + if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >= + min_rx_rings) + rx_ok = 1; + } else { + if (bp->pf.max_rx_rings - bp->rx_nr_rings >= + min_rx_rings) + rx_ok = 1; + } + + if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings) + tx_ok = 1; + + if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs) + rss_ok = 1; + + if (tx_ok && rx_ok && rss_ok) + break; + + vfs_supported--; + } + + if (!vfs_supported) { + netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n"); + return -EINVAL; + } + + if (vfs_supported != *num_vfs) { + netdev_info(bp->dev, "Requested VFs %d, can enable %d\n", + *num_vfs, vfs_supported); + *num_vfs = vfs_supported; + } + + rc = bnxt_alloc_vf_resources(bp, *num_vfs); + if (rc) + goto err_out1; + + /* Reserve resources for VFs */ + rc = bnxt_hwrm_func_cfg(bp, num_vfs); + if (rc) + goto err_out2; + + /* Register buffers for VFs */ + rc = bnxt_hwrm_func_buf_rgtr(bp); + if (rc) + goto err_out2; + + rc = pci_enable_sriov(bp->pdev, *num_vfs); + if (rc) + goto err_out2; + + return 0; + +err_out2: + /* Free the resources reserved for various VF's */ + bnxt_hwrm_func_vf_resource_free(bp); + +err_out1: + bnxt_free_vf_resources(bp); + + return rc; +} + +void bnxt_sriov_disable(struct bnxt *bp) +{ + if (!bp->pf.active_vfs) + return; + + pci_disable_sriov(bp->pdev); + + /* Free the resources reserved for various VF's */ + bnxt_hwrm_func_vf_resource_free(bp); + + bnxt_free_vf_resources(bp); + + bp->pf.active_vfs = 0; + bp->pf.max_pf_rx_rings = bp->pf.max_rx_rings; + bp->pf.max_pf_tx_rings = bp->pf.max_tx_rings; +} + +int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnxt *bp = netdev_priv(dev); + + if (!(bp->flags & BNXT_FLAG_USING_MSIX)) { + netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n"); + return 0; + } + + rtnl_lock(); + if (!netif_running(dev)) { + netdev_warn(dev, "Reject SRIOV config request since if is down!\n"); + rtnl_unlock(); + return 0; + } + bp->sriov_cfg = true; + rtnl_unlock(); + if (!num_vfs) { + bnxt_sriov_disable(bp); + return 0; + } + + /* Check if enabled VFs is same as requested */ + if (num_vfs == bp->pf.active_vfs) + return 0; + + bnxt_sriov_enable(bp, &num_vfs); + + bp->sriov_cfg = false; + wake_up(&bp->sriov_cfg_wait); + + return num_vfs; +} + +static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, + void *encap_resp, __le64 encap_resp_addr, + __le16 encap_resp_cpr, u32 msg_size) +{ + int rc = 0; + struct hwrm_fwd_resp_input req = {0}; + struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1); + + /* Set the new target id */ + req.target_id = cpu_to_le16(vf->fw_fid); + req.encap_resp_len = cpu_to_le16(msg_size); + req.encap_resp_addr = encap_resp_addr; + req.encap_resp_cmpl_ring = encap_resp_cpr; + memcpy(req.encap_resp, encap_resp, msg_size); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + + if (rc) { + netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc); + goto fwd_resp_exit; + } + + if (resp->error_code) { + netdev_err(bp->dev, "hwrm_fwd_resp error %d\n", + resp->error_code); + rc = -1; + } + +fwd_resp_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, + u32 msg_size) +{ + int rc = 0; + struct hwrm_reject_fwd_resp_input req = {0}; + struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1); + /* Set the new target id */ + req.target_id = cpu_to_le16(vf->fw_fid); + memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + + if (rc) { + netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc); + goto fwd_err_resp_exit; + } + + if (resp->error_code) { + netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n", + resp->error_code); + rc = -1; + } + +fwd_err_resp_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, + u32 msg_size) +{ + int rc = 0; + struct hwrm_exec_fwd_resp_input req = {0}; + struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1); + /* Set the new target id */ + req.target_id = cpu_to_le16(vf->fw_fid); + memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + + if (rc) { + netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc); + goto exec_fwd_resp_exit; + } + + if (resp->error_code) { + netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n", + resp->error_code); + rc = -1; + } + +exec_fwd_resp_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf) +{ + u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input); + struct hwrm_cfa_l2_filter_alloc_input *req = + (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr; + + if (!is_valid_ether_addr(vf->mac_addr) || + ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr)) + return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); + else + return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); +} + +static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) +{ + int rc = 0; + + if (!(vf->flags & BNXT_VF_LINK_FORCED)) { + /* real link */ + rc = bnxt_hwrm_exec_fwd_resp( + bp, vf, sizeof(struct hwrm_port_phy_qcfg_input)); + } else { + struct hwrm_port_phy_qcfg_output phy_qcfg_resp; + struct hwrm_port_phy_qcfg_input *phy_qcfg_req; + + phy_qcfg_req = + (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr; + mutex_lock(&bp->hwrm_cmd_lock); + memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp, + sizeof(phy_qcfg_resp)); + mutex_unlock(&bp->hwrm_cmd_lock); + phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id; + + if (vf->flags & BNXT_VF_LINK_UP) { + /* if physical link is down, force link up on VF */ + if (phy_qcfg_resp.link == + PORT_PHY_QCFG_RESP_LINK_NO_LINK) { + phy_qcfg_resp.link = + PORT_PHY_QCFG_RESP_LINK_LINK; + if (phy_qcfg_resp.auto_link_speed) + phy_qcfg_resp.link_speed = + phy_qcfg_resp.auto_link_speed; + else + phy_qcfg_resp.link_speed = + phy_qcfg_resp.force_link_speed; + phy_qcfg_resp.duplex = + PORT_PHY_QCFG_RESP_DUPLEX_FULL; + phy_qcfg_resp.pause = + (PORT_PHY_QCFG_RESP_PAUSE_TX | + PORT_PHY_QCFG_RESP_PAUSE_RX); + } + } else { + /* force link down */ + phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK; + phy_qcfg_resp.link_speed = 0; + phy_qcfg_resp.duplex = PORT_PHY_QCFG_RESP_DUPLEX_HALF; + phy_qcfg_resp.pause = 0; + } + rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp, + phy_qcfg_req->resp_addr, + phy_qcfg_req->cmpl_ring, + sizeof(phy_qcfg_resp)); + } + return rc; +} + +static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf) +{ + int rc = 0; + struct hwrm_cmd_req_hdr *encap_req = vf->hwrm_cmd_req_addr; + u32 req_type = le32_to_cpu(encap_req->cmpl_ring_req_type) & 0xffff; + + switch (req_type) { + case HWRM_CFA_L2_FILTER_ALLOC: + rc = bnxt_vf_validate_set_mac(bp, vf); + break; + case HWRM_FUNC_CFG: + /* TODO Validate if VF is allowed to change mac address, + * mtu, num of rings etc + */ + rc = bnxt_hwrm_exec_fwd_resp( + bp, vf, sizeof(struct hwrm_func_cfg_input)); + break; + case HWRM_PORT_PHY_QCFG: + rc = bnxt_vf_set_link(bp, vf); + break; + default: + break; + } + return rc; +} + +void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) +{ + u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id; + + /* Scan through VF's and process commands */ + while (1) { + vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i); + if (vf_id >= active_vfs) + break; + + clear_bit(vf_id, bp->pf.vf_event_bmap); + bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]); + i = vf_id + 1; + } +} + +void bnxt_update_vf_mac(struct bnxt *bp) +{ + struct hwrm_func_qcaps_input req = {0}; + struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); + req.fid = cpu_to_le16(0xffff); + + mutex_lock(&bp->hwrm_cmd_lock); + if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) + goto update_vf_mac_exit; + + if (!is_valid_ether_addr(resp->perm_mac_address)) + goto update_vf_mac_exit; + + if (ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr)) + goto update_vf_mac_exit; + + memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN); + memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); +update_vf_mac_exit: + mutex_unlock(&bp->hwrm_cmd_lock); +} + +#else + +void bnxt_sriov_disable(struct bnxt *bp) +{ +} + +void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) +{ + netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n"); +} + +void bnxt_update_vf_mac(struct bnxt *bp) +{ +} +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h new file mode 100644 index 000000000000..c151280e3980 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h @@ -0,0 +1,23 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_SRIOV_H +#define BNXT_SRIOV_H + +int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *); +int bnxt_set_vf_mac(struct net_device *, int, u8 *); +int bnxt_set_vf_vlan(struct net_device *, int, u16, u8); +int bnxt_set_vf_bw(struct net_device *, int, int, int); +int bnxt_set_vf_link_state(struct net_device *, int, int); +int bnxt_set_vf_spoofchk(struct net_device *, int, bool); +int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs); +void bnxt_sriov_disable(struct bnxt *); +void bnxt_hwrm_exec_fwd_req(struct bnxt *); +void bnxt_update_vf_mac(struct bnxt *); +#endif diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 1a3988f51305..50f63b7f3c3e 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -793,7 +793,6 @@ static void bcmgenet_get_drvinfo(struct net_device *dev, { strlcpy(info->driver, "bcmgenet", sizeof(info->driver)); strlcpy(info->version, "v2.0", sizeof(info->version)); - info->n_stats = BCMGENET_STATS_LEN; } static int bcmgenet_get_sset_count(struct net_device *dev, int string_set) @@ -1832,6 +1831,24 @@ static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); } +static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv) +{ + u32 int0_enable = 0; + + /* Monitor cable plug/unplugged event for internal PHY, external PHY + * and MoCA PHY + */ + if (priv->internal_phy) { + int0_enable |= UMAC_IRQ_LINK_EVENT; + } else if (priv->ext_phy) { + int0_enable |= UMAC_IRQ_LINK_EVENT; + } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) + int0_enable |= UMAC_IRQ_LINK_EVENT; + } + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); +} + static int init_umac(struct bcmgenet_priv *priv) { struct device *kdev = &priv->pdev->dev; @@ -1872,15 +1889,8 @@ static int init_umac(struct bcmgenet_priv *priv) /* Enable Tx default queue 16 interrupts */ int0_enable |= UMAC_IRQ_TXDMA_DONE; - /* Monitor cable plug/unplugged event for internal PHY */ - if (priv->internal_phy) { - int0_enable |= UMAC_IRQ_LINK_EVENT; - } else if (priv->ext_phy) { - int0_enable |= UMAC_IRQ_LINK_EVENT; - } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { - if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) - int0_enable |= UMAC_IRQ_LINK_EVENT; - + /* Configure backpressure vectors for MoCA */ + if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { reg = bcmgenet_bp_mc_get(priv); reg |= BIT(priv->hw_params->bp_in_en_shift); @@ -2794,6 +2804,9 @@ static void bcmgenet_netif_start(struct net_device *dev) netif_tx_start_all_queues(dev); + /* Monitor link interrupts now */ + bcmgenet_link_intr_enable(priv); + phy_start(priv->phydev); } diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 9b35d142f47a..8fb84e69c30e 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -3,7 +3,7 @@ # config NET_VENDOR_CAVIUM - tristate "Cavium ethernet drivers" + bool "Cavium ethernet drivers" depends on PCI default y ---help--- diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 29f330831784..245c063ed4db 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -153,7 +153,6 @@ lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, ETHTOOL_FWVERS_LEN); strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); - drvinfo->regdump_len = OCT_ETHTOOL_REGDUMP_LEN; } static void diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 02e4e028a647..a077f9476daf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -35,79 +35,79 @@ static void set_msglevel(struct net_device *dev, u32 val) } static const char stats_strings[][ETH_GSTRING_LEN] = { - "TxOctetsOK ", - "TxFramesOK ", - "TxBroadcastFrames ", - "TxMulticastFrames ", - "TxUnicastFrames ", - "TxErrorFrames ", - - "TxFrames64 ", - "TxFrames65To127 ", - "TxFrames128To255 ", - "TxFrames256To511 ", - "TxFrames512To1023 ", - "TxFrames1024To1518 ", - "TxFrames1519ToMax ", - - "TxFramesDropped ", - "TxPauseFrames ", - "TxPPP0Frames ", - "TxPPP1Frames ", - "TxPPP2Frames ", - "TxPPP3Frames ", - "TxPPP4Frames ", - "TxPPP5Frames ", - "TxPPP6Frames ", - "TxPPP7Frames ", - - "RxOctetsOK ", - "RxFramesOK ", - "RxBroadcastFrames ", - "RxMulticastFrames ", - "RxUnicastFrames ", - - "RxFramesTooLong ", - "RxJabberErrors ", - "RxFCSErrors ", - "RxLengthErrors ", - "RxSymbolErrors ", - "RxRuntFrames ", - - "RxFrames64 ", - "RxFrames65To127 ", - "RxFrames128To255 ", - "RxFrames256To511 ", - "RxFrames512To1023 ", - "RxFrames1024To1518 ", - "RxFrames1519ToMax ", - - "RxPauseFrames ", - "RxPPP0Frames ", - "RxPPP1Frames ", - "RxPPP2Frames ", - "RxPPP3Frames ", - "RxPPP4Frames ", - "RxPPP5Frames ", - "RxPPP6Frames ", - "RxPPP7Frames ", - - "RxBG0FramesDropped ", - "RxBG1FramesDropped ", - "RxBG2FramesDropped ", - "RxBG3FramesDropped ", - "RxBG0FramesTrunc ", - "RxBG1FramesTrunc ", - "RxBG2FramesTrunc ", - "RxBG3FramesTrunc ", - - "TSO ", - "TxCsumOffload ", - "RxCsumGood ", - "VLANextractions ", - "VLANinsertions ", - "GROpackets ", - "GROmerged ", + "tx_octets_ok ", + "tx_frames_ok ", + "tx_broadcast_frames ", + "tx_multicast_frames ", + "tx_unicast_frames ", + "tx_error_frames ", + + "tx_frames_64 ", + "tx_frames_65_to_127 ", + "tx_frames_128_to_255 ", + "tx_frames_256_to_511 ", + "tx_frames_512_to_1023 ", + "tx_frames_1024_to_1518 ", + "tx_frames_1519_to_max ", + + "tx_frames_dropped ", + "tx_pause_frames ", + "tx_ppp0_frames ", + "tx_ppp1_frames ", + "tx_ppp2_frames ", + "tx_ppp3_frames ", + "tx_ppp4_frames ", + "tx_ppp5_frames ", + "tx_ppp6_frames ", + "tx_ppp7_frames ", + + "rx_octets_ok ", + "rx_frames_ok ", + "rx_broadcast_frames ", + "rx_multicast_frames ", + "rx_unicast_frames ", + + "rx_frames_too_long ", + "rx_jabber_errors ", + "rx_fcs_errors ", + "rx_length_errors ", + "rx_symbol_errors ", + "rx_runt_frames ", + + "rx_frames_64 ", + "rx_frames_65_to_127 ", + "rx_frames_128_to_255 ", + "rx_frames_256_to_511 ", + "rx_frames_512_to_1023 ", + "rx_frames_1024_to_1518 ", + "rx_frames_1519_to_max ", + + "rx_pause_frames ", + "rx_ppp0_frames ", + "rx_ppp1_frames ", + "rx_ppp2_frames ", + "rx_ppp3_frames ", + "rx_ppp4_frames ", + "rx_ppp5_frames ", + "rx_ppp6_frames ", + "rx_ppp7_frames ", + + "rx_bg0_frames_dropped ", + "rx_bg1_frames_dropped ", + "rx_bg2_frames_dropped ", + "rx_bg3_frames_dropped ", + "rx_bg0_frames_trunc ", + "rx_bg1_frames_trunc ", + "rx_bg2_frames_trunc ", + "rx_bg3_frames_trunc ", + + "tso ", + "tx_csum_offload ", + "rx_csum_good ", + "vlan_extractions ", + "vlan_insertions ", + "gro_packets ", + "gro_merged ", }; static char adapter_stats_strings[][ETH_GSTRING_LEN] = { @@ -211,8 +211,11 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) sizeof(info->version)); strlcpy(info->bus_info, pci_name(adapter->pdev), sizeof(info->bus_info)); + info->regdump_len = get_regs_len(dev); - if (adapter->params.fw_vers) + if (!adapter->params.fw_vers) + strcpy(info->fw_version, "N/A"); + else snprintf(info->fw_version, sizeof(info->fw_version), "%u.%u.%u.%u, TP %u.%u.%u.%u", FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers), @@ -612,6 +615,8 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) struct port_info *p = netdev_priv(dev); struct link_config *lc = &p->link_cfg; u32 speed = ethtool_cmd_speed(cmd); + struct link_config old_lc; + int ret; if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */ return -EINVAL; @@ -626,13 +631,11 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) return -EINVAL; } + old_lc = *lc; if (cmd->autoneg == AUTONEG_DISABLE) { cap = speed_to_caps(speed); - if (!(lc->supported & cap) || - (speed == 1000) || - (speed == 10000) || - (speed == 40000)) + if (!(lc->supported & cap)) return -EINVAL; lc->requested_speed = cap; lc->advertising = 0; @@ -645,10 +648,14 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) } lc->autoneg = cmd->autoneg; - if (netif_running(dev)) - return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan, - lc); - return 0; + /* If the firmware rejects the Link Configuration request, back out + * the changes and report the error. + */ + ret = t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan, lc); + if (ret) + *lc = old_lc; + + return ret; } static void get_pauseparam(struct net_device *dev, @@ -847,7 +854,7 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, { int i, err = 0; struct adapter *adapter = netdev2adap(dev); - u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL); + u8 *buf = t4_alloc_mem(EEPROMSIZE); if (!buf) return -ENOMEM; @@ -858,7 +865,7 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, if (!err) memcpy(data, buf + e->offset, e->len); - kfree(buf); + t4_free_mem(buf); return err; } @@ -887,7 +894,7 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { /* RMW possibly needed for first or last words. */ - buf = kmalloc(aligned_len, GFP_KERNEL); + buf = t4_alloc_mem(aligned_len); if (!buf) return -ENOMEM; err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf); @@ -915,7 +922,7 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, err = t4_seeprom_wp(adapter, true); out: if (buf != data) - kfree(buf); + t4_free_mem(buf); return err; } @@ -1011,11 +1018,15 @@ static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key, if (!p) return 0; - for (i = 0; i < pi->rss_size; i++) - pi->rss[i] = p[i]; - if (pi->adapter->flags & FULL_INIT_DONE) + /* Interface must be brought up atleast once */ + if (pi->adapter->flags & FULL_INIT_DONE) { + for (i = 0; i < pi->rss_size; i++) + pi->rss[i] = p[i]; + return cxgb4_write_rss(pi, pi->rss); - return 0; + } + + return -EPERM; } static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index c29227ee9ee8..2cf81857a297 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -83,7 +83,7 @@ char cxgb4_driver_name[] = KBUILD_MODNAME; #endif #define DRV_VERSION "2.0.0-ko" const char cxgb4_driver_version[] = DRV_VERSION; -#define DRV_DESC "Chelsio T4/T5 Network Driver" +#define DRV_DESC "Chelsio T4/T5/T6 Network Driver" /* Host shadow copy of ingress filter entry. This is in host native format * and doesn't match the ordering or bit order, etc. of the hardware of the @@ -151,6 +151,7 @@ MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); MODULE_FIRMWARE(FW4_FNAME); MODULE_FIRMWARE(FW5_FNAME); +MODULE_FIRMWARE(FW6_FNAME); /* * Normally we're willing to become the firmware's Master PF but will be happy @@ -4485,6 +4486,10 @@ static int enable_msix(struct adapter *adap) } for (i = 0; i < allocated; ++i) adap->msix_info[i].vec = entries[i].vector; + dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, " + "nic %d iscsi %d rdma cpl %d rdma ciq %d\n", + allocated, s->max_ethqsets, s->ofldqsets, s->rdmaqs, + s->rdmaciqs); kfree(entries); return 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index b2b5e5bbe04c..0cfa5d72cafd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -56,7 +56,7 @@ * Generic information about the driver. */ #define DRV_VERSION "2.0.0-ko" -#define DRV_DESC "Chelsio T4/T5 Virtual Function (VF) Network Driver" +#define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver" /* * Module Parameters. diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index a02ecc4f9002..cadcee645f74 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -1597,7 +1597,6 @@ static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info) strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info)); - info->eedump_len = DE_EEPROM_SIZE; } static int de_get_regs_len(struct net_device *dev) diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 821540913343..d463563e1f70 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -592,6 +592,7 @@ struct be_adapter { int be_get_temp_freq; struct be_hwmon hwmon_info; u8 pf_number; + u8 pci_func_num; struct rss_info rss_info; /* Filters for packets that need to be sent to BMC */ u32 bmc_filt_mask; diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index eb323913cd39..1795c935ff02 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -851,8 +851,10 @@ static int be_cmd_notify_wait(struct be_adapter *adapter, return status; dest_wrb = be_cmd_copy(adapter, wrb); - if (!dest_wrb) - return -EBUSY; + if (!dest_wrb) { + status = -EBUSY; + goto unlock; + } if (use_mcc(adapter)) status = be_mcc_notify_wait(adapter); @@ -862,6 +864,7 @@ static int be_cmd_notify_wait(struct be_adapter *adapter, if (!status) memcpy(wrb, dest_wrb, sizeof(*wrb)); +unlock: be_cmd_unlock(adapter); return status; } @@ -1984,6 +1987,8 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) be_if_cap_flags(adapter)); } flags &= be_if_cap_flags(adapter); + if (!flags) + return -ENOTSUPP; return __be_cmd_rx_filter(adapter, flags, value); } @@ -2887,6 +2892,7 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter) if (!status) { attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr); adapter->hba_port_num = attribs->hba_attribs.phy_port; + adapter->pci_func_num = attribs->pci_func_num; serial_num = attribs->hba_attribs.controller_serial_number; for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++) adapter->serial_num[i] = le32_to_cpu(serial_num[i]) & @@ -3709,7 +3715,6 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) status = -EINVAL; goto err; } - adapter->pf_number = desc->pf_num; be_copy_nic_desc(res, desc); } @@ -3721,7 +3726,10 @@ err: return status; } -/* Will use MBOX only if MCCQ has not been created */ +/* Will use MBOX only if MCCQ has not been created + * non-zero domain => a PF is querying this on behalf of a VF + * zero domain => a PF or a VF is querying this for itself + */ int be_cmd_get_profile_config(struct be_adapter *adapter, struct be_resources *res, u8 query, u8 domain) { @@ -3748,10 +3756,15 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, OPCODE_COMMON_GET_PROFILE_CONFIG, cmd.size, &wrb, &cmd); - req->hdr.domain = domain; if (!lancer_chip(adapter)) req->hdr.version = 1; req->type = ACTIVE_PROFILE_TYPE; + /* When a function is querying profile information relating to + * itself hdr.pf_number must be set to it's pci_func_num + 1 + */ + req->hdr.domain = domain; + if (domain == 0) + req->hdr.pf_num = adapter->pci_func_num + 1; /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the * descriptors with all bits set to "1" for the fields which can be @@ -3921,12 +3934,16 @@ static void be_fill_vf_res_template(struct be_adapter *adapter, vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS); } - - nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags); } else { num_vf_qs = 1; } + if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) { + nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT); + vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS; + } + + nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags); nic_vft->rq_count = cpu_to_le16(num_vf_qs); nic_vft->txq_count = cpu_to_le16(num_vf_qs); nic_vft->rssq_count = cpu_to_le16(num_vf_qs); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 7d178bdb112e..91155ea74f34 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -289,7 +289,9 @@ struct be_cmd_req_hdr { u32 timeout; /* dword 1 */ u32 request_length; /* dword 2 */ u8 version; /* dword 3 */ - u8 rsvd[3]; /* dword 3 */ + u8 rsvd1; /* dword 3 */ + u8 pf_num; /* dword 3 */ + u8 rsvd2; /* dword 3 */ }; #define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */ @@ -1652,7 +1654,11 @@ struct mgmt_hba_attribs { struct mgmt_controller_attrib { struct mgmt_hba_attribs hba_attribs; - u32 rsvd0[10]; + u32 rsvd0[2]; + u16 rsvd1; + u8 pci_func_num; + u8 rsvd2; + u32 rsvd3[7]; } __packed; struct be_cmd_req_cntl_attribs { diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 2c9ed1710ba6..f4cb8e425853 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -234,9 +234,6 @@ static void be_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->testinfo_len = 0; - drvinfo->regdump_len = 0; - drvinfo->eedump_len = 0; } static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name) diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 7bf51a1a0a77..eb48a977f8da 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1123,11 +1123,12 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, struct sk_buff *skb, struct be_wrb_params *wrb_params) { - /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or - * less may cause a transmit stall on that port. So the work-around is - * to pad short packets (<= 32 bytes) to a 36-byte length. + /* Lancer, SH and BE3 in SRIOV mode have a bug wherein + * packets that are 32b or less may cause a transmit stall + * on that port. The workaround is to pad such packets + * (len <= 32 bytes) to a minimum length of 36b. */ - if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) { + if (skb->len <= 32) { if (skb_put_padto(skb, 36)) return NULL; } @@ -4205,10 +4206,6 @@ static int be_get_config(struct be_adapter *adapter) int status, level; u16 profile_id; - status = be_cmd_get_cntl_attributes(adapter); - if (status) - return status; - status = be_cmd_query_fw_cfg(adapter); if (status) return status; @@ -4407,6 +4404,11 @@ static int be_setup(struct be_adapter *adapter) if (!lancer_chip(adapter)) be_cmd_req_native_mode(adapter); + /* Need to invoke this cmd first to get the PCI Function Number */ + status = be_cmd_get_cntl_attributes(adapter); + if (status) + return status; + if (!BE2_chip(adapter) && be_physfn(adapter)) be_alloc_sriov_res(adapter); @@ -4999,7 +5001,15 @@ static bool be_check_ufi_compatibility(struct be_adapter *adapter, return false; } - return (fhdr->asic_type_rev >= adapter->asic_rev); + /* In BE3 FW images the "asic_type_rev" field doesn't track the + * asic_rev of the chips it is compatible with. + * When asic_type_rev is 0 the image is compatible only with + * pre-BE3-R chips (asic_rev < 0x10) + */ + if (BEx_chip(adapter) && fhdr->asic_type_rev == 0) + return adapter->asic_rev < 0x10; + else + return (fhdr->asic_type_rev >= adapter->asic_rev); } static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw) diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 3c40f6b99224..55c36230e176 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -198,11 +198,13 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus) #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) /* + * Return the TBIPA address, starting from the address + * of the mapped GFAR MDIO registers (struct gfar) * This is mildly evil, but so is our hardware for doing this. * Also, we have to cast back to struct gfar because of * definition weirdness done in gianfar.h. */ -static uint32_t __iomem *get_gfar_tbipa(void __iomem *p) +static uint32_t __iomem *get_gfar_tbipa_from_mdio(void __iomem *p) { struct gfar __iomem *enet_regs = p; @@ -210,6 +212,15 @@ static uint32_t __iomem *get_gfar_tbipa(void __iomem *p) } /* + * Return the TBIPA address, starting from the address + * of the mapped GFAR MII registers (gfar_mii_regs[] within struct gfar) + */ +static uint32_t __iomem *get_gfar_tbipa_from_mii(void __iomem *p) +{ + return get_gfar_tbipa_from_mdio(container_of(p, struct gfar, gfar_mii_regs)); +} + +/* * Return the TBIPAR address for an eTSEC2 node */ static uint32_t __iomem *get_etsec_tbipa(void __iomem *p) @@ -220,11 +231,12 @@ static uint32_t __iomem *get_etsec_tbipa(void __iomem *p) #if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) /* - * Return the TBIPAR address for a QE MDIO node + * Return the TBIPAR address for a QE MDIO node, starting from the address + * of the mapped MII registers (struct fsl_pq_mii) */ static uint32_t __iomem *get_ucc_tbipa(void __iomem *p) { - struct fsl_pq_mdio __iomem *mdio = p; + struct fsl_pq_mdio __iomem *mdio = container_of(p, struct fsl_pq_mdio, mii); return &mdio->utbipar; } @@ -300,14 +312,14 @@ static const struct of_device_id fsl_pq_mdio_match[] = { .compatible = "fsl,gianfar-tbi", .data = &(struct fsl_pq_mdio_data) { .mii_offset = 0, - .get_tbipa = get_gfar_tbipa, + .get_tbipa = get_gfar_tbipa_from_mii, }, }, { .compatible = "fsl,gianfar-mdio", .data = &(struct fsl_pq_mdio_data) { .mii_offset = 0, - .get_tbipa = get_gfar_tbipa, + .get_tbipa = get_gfar_tbipa_from_mii, }, }, { @@ -315,7 +327,7 @@ static const struct of_device_id fsl_pq_mdio_match[] = { .compatible = "gianfar", .data = &(struct fsl_pq_mdio_data) { .mii_offset = offsetof(struct fsl_pq_mdio, mii), - .get_tbipa = get_gfar_tbipa, + .get_tbipa = get_gfar_tbipa_from_mdio, }, }, { @@ -445,6 +457,16 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) tbipa = data->get_tbipa(priv->map); + /* + * Add consistency check to make sure TBI is contained + * within the mapped range (not because we would get a + * segfault, rather to catch bugs in computing TBI + * address). Print error message but continue anyway. + */ + if ((void *)tbipa > priv->map + resource_size(&res) - 4) + dev_err(&pdev->dev, "invalid register map (should be at least 0x%04x to contain TBI address)\n", + ((void *)tbipa - priv->map) + 4); + iowrite32be(be32_to_cpup(prop), tbipa); } } diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 7f5389c3c0cf..47f0400ad020 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -107,7 +107,7 @@ #include "gianfar.h" -#define TX_TIMEOUT (1*HZ) +#define TX_TIMEOUT (5*HZ) const char gfar_driver_version[] = "2.0"; diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index fb7f8d67aef4..928ca2bdd238 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -182,8 +182,6 @@ static void gfar_gdrvinfo(struct net_device *dev, sizeof(drvinfo->version)); strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info)); - drvinfo->regdump_len = 0; - drvinfo->eedump_len = 0; } diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c index cc83350d56ba..89714f5e0dfc 100644 --- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c +++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c @@ -351,8 +351,6 @@ uec_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info)); - drvinfo->eedump_len = 0; - drvinfo->regdump_len = uec_get_regs_len(netdev); } #ifdef CONFIG_PM diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index 8d12b587809e..f250dec488fd 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -5,7 +5,7 @@ config NET_VENDOR_HISILICON bool "Hisilicon devices" default y - depends on ARM || ARM64 + depends on OF && (ARM || ARM64 || COMPILE_TEST) ---help--- If you have a network (Ethernet) card belonging to this class, say Y. diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index a5e077eac99a..e51892d518ff 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -371,7 +371,7 @@ static void hix5hd2_port_enable(struct hix5hd2_priv *priv) static void hix5hd2_port_disable(struct hix5hd2_priv *priv) { - writel_relaxed(~(BITS_RX_EN | BITS_TX_EN), priv->base + PORT_EN); + writel_relaxed(~(u32)(BITS_RX_EN | BITS_TX_EN), priv->base + PORT_EN); writel_relaxed(0, priv->base + DESC_WR_RD_ENA); } diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index f52e99acf463..b3645297477e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -436,60 +436,10 @@ void hnae_ae_unregister(struct hnae_ae_dev *hdev) } EXPORT_SYMBOL(hnae_ae_unregister); -static ssize_t handles_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - ssize_t s = 0; - struct hnae_ae_dev *hdev = cls_to_ae_dev(dev); - struct hnae_handle *h; - int i = 0, j; - - list_for_each_entry_rcu(h, &hdev->handle_list, node) { - s += sprintf(buf + s, "handle %d (eport_id=%u from %s):\n", - i++, h->eport_id, h->dev->name); - for (j = 0; j < h->q_num; j++) { - s += sprintf(buf + s, "\tqueue[%d] on %p\n", - j, h->qs[i]->io_base); -#define HANDEL_TX_MSG "\t\ttx_ring on %p:%u,%u,%u,%u,%u,%llu,%llu\n" - s += sprintf(buf + s, - HANDEL_TX_MSG, - h->qs[i]->tx_ring.io_base, - h->qs[i]->tx_ring.buf_size, - h->qs[i]->tx_ring.desc_num, - h->qs[i]->tx_ring.max_desc_num_per_pkt, - h->qs[i]->tx_ring.max_raw_data_sz_per_desc, - h->qs[i]->tx_ring.max_pkt_size, - h->qs[i]->tx_ring.stats.sw_err_cnt, - h->qs[i]->tx_ring.stats.io_err_cnt); - s += sprintf(buf + s, - "\t\trx_ring on %p:%u,%u,%llu,%llu,%llu\n", - h->qs[i]->rx_ring.io_base, - h->qs[i]->rx_ring.buf_size, - h->qs[i]->rx_ring.desc_num, - h->qs[i]->rx_ring.stats.sw_err_cnt, - h->qs[i]->rx_ring.stats.io_err_cnt, - h->qs[i]->rx_ring.stats.seg_pkt_cnt); - } - } - - return s; -} - -static DEVICE_ATTR_RO(handles); -static struct attribute *hnae_class_attrs[] = { - &dev_attr_handles.attr, - NULL, -}; -ATTRIBUTE_GROUPS(hnae_class); - static int __init hnae_init(void) { hnae_class = class_create(THIS_MODULE, "hnae"); - if (IS_ERR(hnae_class)) - return PTR_ERR(hnae_class); - - hnae_class->dev_groups = hnae_class_groups; - return 0; + return PTR_ERR_OR_ZERO(hnae_class); } static void __exit hnae_exit(void) diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index d4a1eb1b8e54..cec95ac8687d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -430,6 +430,7 @@ struct hnae_ae_ops { void (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout); int (*set_coalesce_frames)(struct hnae_handle *handle, u32 coalesce_frames); + void (*set_promisc_mode)(struct hnae_handle *handle, u32 en); int (*get_mac_addr)(struct hnae_handle *handle, void **p); int (*set_mac_addr)(struct hnae_handle *handle, void *p); int (*set_mc_addr)(struct hnae_handle *handle, void *addr); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index a2c72f84e397..1a16c0307b47 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -392,6 +392,11 @@ static int hns_ae_set_autoneg(struct hnae_handle *handle, u8 enable) return hns_mac_set_autoneg(hns_get_mac_cb(handle), enable); } +static void hns_ae_set_promisc_mode(struct hnae_handle *handle, u32 en) +{ + hns_dsaf_set_promisc_mode(hns_ae_get_dsaf_dev(handle->dev), en); +} + static int hns_ae_get_autoneg(struct hnae_handle *handle) { u32 auto_neg; @@ -748,6 +753,7 @@ static struct hnae_ae_ops hns_dsaf_ops = { .get_rx_max_coalesced_frames = hns_ae_get_rx_max_coalesced_frames, .set_coalesce_usecs = hns_ae_set_coalesce_usecs, .set_coalesce_frames = hns_ae_set_coalesce_frames, + .set_promisc_mode = hns_ae_set_promisc_mode, .set_mac_addr = hns_ae_set_mac_address, .set_mc_addr = hns_ae_set_multicast_one, .set_mtu = hns_ae_set_mtu, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 95bf42aae24c..026b38676cba 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -179,7 +179,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, return -EINVAL; } } else if (mac_cb->dsaf_dev->dsaf_mode < DSAF_MODE_MAX) { - if (mac_cb->mac_id <= DSAF_MAX_PORT_NUM_PER_CHIP) { + if (mac_cb->mac_id >= DSAF_MAX_PORT_NUM_PER_CHIP) { dev_err(mac_cb->dev, "input invalid,%s mac%d vmid%d!\n", mac_cb->dsaf_dev->ae_dev.name, @@ -744,9 +744,11 @@ int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, int mac_idx) mac_cb->serdes_vaddr = dsaf_dev->sds_base; if (dsaf_dev->cpld_base && - mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF) + mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF) { mac_cb->cpld_vaddr = dsaf_dev->cpld_base + mac_cb->mac_id * CPLD_ADDR_PORT_OFFSET; + cpld_led_reset(mac_cb); + } mac_cb->sfp_prsnt = 0; mac_cb->txpkt_for_led = 0; mac_cb->rxpkt_for_led = 0; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 26ae6c64d74c..2a98eba660c0 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -17,6 +17,8 @@ #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/device.h> +#include <linux/vmalloc.h> + #include "hns_dsaf_main.h" #include "hns_dsaf_rcb.h" #include "hns_dsaf_ppe.h" @@ -217,6 +219,25 @@ hns_dsaf_ppe_qid_cfg(struct dsaf_device *dsaf_dev, u32 qid_cfg) } } +static void hns_dsaf_mix_def_qid_cfg(struct dsaf_device *dsaf_dev) +{ + u16 max_q_per_vf, max_vfn; + u32 q_id, q_num_per_port; + u32 i; + + hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, + HNS_DSAF_COMM_SERVICE_NW_IDX, + &max_vfn, &max_q_per_vf); + q_num_per_port = max_vfn * max_q_per_vf; + + for (i = 0, q_id = 0; i < DSAF_SERVICE_NW_NUM; i++) { + dsaf_set_dev_field(dsaf_dev, + DSAF_MIX_DEF_QID_0_REG + 0x0004 * i, + 0xff, 0, q_id); + q_id += q_num_per_port; + } +} + /** * hns_dsaf_sw_port_type_cfg - cfg sw type * @dsaf_id: dsa fabric id @@ -592,6 +613,11 @@ static void hns_dsaf_tbl_tcam_data_ucast_pul( dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul); } +void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en) +{ + dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_MIX_MODE_S, !!en); +} + /** * hns_dsaf_tbl_stat_en - tbl * @dsaf_id: dsa fabric id @@ -920,6 +946,9 @@ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev) /* set 22 queue per tx ppe engine, only used in switch mode */ hns_dsaf_ppe_qid_cfg(dsaf_dev, DSAF_DEFAUTL_QUEUE_NUM_PER_PPE); + /* set promisc def queue id */ + hns_dsaf_mix_def_qid_cfg(dsaf_dev); + /* in non switch mode, set all port to access mode */ hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 315b07ecd291..b2b93484995c 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -423,5 +423,6 @@ void hns_dsaf_get_strings(int stringset, u8 *data, int port); void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data); int hns_dsaf_get_regs_count(void); +void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en); #endif /* __HNS_DSAF_MAIN_H__ */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index d611388aecef..523e9b83d304 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -64,17 +64,10 @@ int cpld_set_led_id(struct hns_mac_cb *mac_cb, switch (status) { case HNAE_LED_ACTIVE: mac_cb->cpld_led_value = dsaf_read_b(mac_cb->cpld_vaddr); - return 2; - case HNAE_LED_ON: dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B, CPLD_LED_ON_VALUE); dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value); - break; - case HNAE_LED_OFF: - dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B, - CPLD_LED_DEFAULT_VALUE); - dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value); - break; + return 2; case HNAE_LED_INACTIVE: dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B, CPLD_LED_DEFAULT_VALUE); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 05ea244d999c..4db32c62f062 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -575,8 +575,8 @@ int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev, *@max_vfn : max vfn number *@max_q_per_vf:max ring number per vm */ -static void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index, - u16 *max_vfn, u16 *max_q_per_vf) +void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index, + u16 *max_vfn, u16 *max_q_per_vf) { if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { switch (dsaf_mode) { diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h index c7db6130a3cf..3a2afe2dd8bb 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h @@ -107,6 +107,8 @@ int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common); void hns_rcb_start(struct hnae_queue *q, u32 val); void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common); void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common); +void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index, + u16 *max_vfn, u16 *max_q_per_vf); void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val); void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c index dab5ecf382a0..802d55457f19 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c @@ -51,9 +51,9 @@ static const struct mac_stats_string g_xgmac_stats_string[] = { {"xgmac_rx_bad_pkt_from_dsaf", MAC_STATS_FIELD_OFF(rx_bad_from_sw)}, {"xgmac_tx_bad_pkt_64tomax", MAC_STATS_FIELD_OFF(tx_bad_pkts)}, - {"xgmac_rx_not_well_pkt", MAC_STATS_FIELD_OFF(rx_fragment_err)}, - {"xgmac_rx_good_well_pkt", MAC_STATS_FIELD_OFF(rx_undersize)}, - {"xgmac_rx_total_pkt", MAC_STATS_FIELD_OFF(rx_under_min)}, + {"xgmac_rx_bad_pkts_minto64", MAC_STATS_FIELD_OFF(rx_fragment_err)}, + {"xgmac_rx_good_pkts_minto64", MAC_STATS_FIELD_OFF(rx_undersize)}, + {"xgmac_rx_total_pkts_minto64", MAC_STATS_FIELD_OFF(rx_under_min)}, {"xgmac_rx_pkt_64", MAC_STATS_FIELD_OFF(rx_64bytes)}, {"xgmac_rx_pkt_65to127", MAC_STATS_FIELD_OFF(rx_65to127)}, {"xgmac_rx_pkt_128to255", MAC_STATS_FIELD_OFF(rx_128to255)}, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index ce7f2e0e3fd1..302d3ae8e9e5 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1161,6 +1161,21 @@ void hns_set_multicast_list(struct net_device *ndev) } } +void hns_nic_set_rx_mode(struct net_device *ndev) +{ + struct hns_nic_priv *priv = netdev_priv(ndev); + struct hnae_handle *h = priv->ae_handle; + + if (h->dev->ops->set_promisc_mode) { + if (ndev->flags & IFF_PROMISC) + h->dev->ops->set_promisc_mode(h, 1); + else + h->dev->ops->set_promisc_mode(h, 0); + } + + hns_set_multicast_list(ndev); +} + struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats) { @@ -1220,7 +1235,7 @@ static const struct net_device_ops hns_nic_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = hns_nic_poll_controller, #endif - .ndo_set_rx_mode = hns_set_multicast_list, + .ndo_set_rx_mode = hns_nic_set_rx_mode, }; static void hns_nic_update_link_status(struct net_device *netdev) @@ -1300,16 +1315,15 @@ static void hns_nic_reset_subtask(struct hns_nic_priv *priv) return; hns_nic_dump(priv); - netdev_err(priv->netdev, "Reset %s port\n", - (type == HNAE_PORT_DEBUG ? "debug" : "business")); + netdev_info(priv->netdev, "Reset %s port\n", + (type == HNAE_PORT_DEBUG ? "debug" : "business")); rtnl_lock(); - if (type == HNAE_PORT_DEBUG) { + /* put off any impending NetWatchDogTimeout */ + priv->netdev->trans_start = jiffies; + + if (type == HNAE_PORT_DEBUG) hns_nic_net_reinit(priv->netdev); - } else { - hns_nic_net_down(priv->netdev); - hns_nic_net_reset(priv->netdev); - } rtnl_unlock(); } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 2550208cb22e..a0332129970b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -194,9 +194,7 @@ static int hns_nic_set_settings(struct net_device *net_dev, { struct hns_nic_priv *priv = netdev_priv(net_dev); struct hnae_handle *h; - int link_stat; u32 speed; - u8 duplex, autoneg; if (!netif_running(net_dev)) return -ESRCH; @@ -206,48 +204,35 @@ static int hns_nic_set_settings(struct net_device *net_dev, return -ENODEV; h = priv->ae_handle; - link_stat = hns_nic_get_link(net_dev); - duplex = cmd->duplex; speed = ethtool_cmd_speed(cmd); - autoneg = cmd->autoneg; - - if (!link_stat) { - if (duplex != (u8)DUPLEX_UNKNOWN || speed != (u32)SPEED_UNKNOWN) - return -EINVAL; - - if (h->phy_if == PHY_INTERFACE_MODE_SGMII && h->phy_node) { - priv->phy->autoneg = autoneg; - return phy_start_aneg(priv->phy); - } - } if (h->phy_if == PHY_INTERFACE_MODE_XGMII) { - if (autoneg != AUTONEG_DISABLE) - return -EINVAL; - - if (speed != SPEED_10000 || duplex != DUPLEX_FULL) + if (cmd->autoneg == AUTONEG_ENABLE || speed != SPEED_10000 || + cmd->duplex != DUPLEX_FULL) return -EINVAL; } else if (h->phy_if == PHY_INTERFACE_MODE_SGMII) { - if (!h->phy_node && autoneg != AUTONEG_DISABLE) + if (!priv->phy && cmd->autoneg == AUTONEG_ENABLE) return -EINVAL; - if (speed == SPEED_1000 && duplex == DUPLEX_HALF) + if (speed == SPEED_1000 && cmd->duplex == DUPLEX_HALF) return -EINVAL; + if (priv->phy) + return phy_ethtool_sset(priv->phy, cmd); - if (speed != SPEED_10 && speed != SPEED_100 && - speed != SPEED_1000) + if ((speed != SPEED_10 && speed != SPEED_100 && + speed != SPEED_1000) || (cmd->duplex != DUPLEX_HALF && + cmd->duplex != DUPLEX_FULL)) return -EINVAL; } else { netdev_err(net_dev, "Not supported!"); return -ENOTSUPP; } - if (priv->phy) { - return phy_ethtool_sset(priv->phy, cmd); - } else if (h->dev->ops->adjust_link && link_stat) { - h->dev->ops->adjust_link(h, speed, duplex); + if (h->dev->ops->adjust_link) { + h->dev->ops->adjust_link(h, (int)speed, cmd->duplex); return 0; } + netdev_err(net_dev, "Not supported!"); return -ENOTSUPP; } @@ -682,7 +667,6 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev, drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0'; strncpy(drvinfo->fw_version, "N/A", ETHTOOL_FWVERS_LEN); - drvinfo->eedump_len = 0; } /** diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index e4ec52ae61ff..37491c85bc42 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c @@ -11,6 +11,7 @@ #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/netdevice.h> @@ -20,6 +21,7 @@ #include <linux/of_platform.h> #include <linux/phy.h> #include <linux/platform_device.h> +#include <linux/regmap.h> #include <linux/spinlock_types.h> #define MDIO_DRV_NAME "Hi-HNS_MDIO" @@ -36,7 +38,7 @@ struct hns_mdio_device { void *vbase; /* mdio reg base address */ - void *sys_vbase; + struct regmap *subctrl_vbase; }; /* mdio reg */ @@ -155,10 +157,10 @@ static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev, u32 time_cnt; u32 reg_value; - mdio_write_reg((void *)mdio_dev->sys_vbase, cfg_reg, set_val); + regmap_write(mdio_dev->subctrl_vbase, cfg_reg, set_val); for (time_cnt = MDIO_TIMEOUT; time_cnt; time_cnt--) { - reg_value = mdio_read_reg((void *)mdio_dev->sys_vbase, st_reg); + regmap_read(mdio_dev->subctrl_vbase, st_reg, ®_value); reg_value &= st_msk; if ((!!check_st) == (!!reg_value)) break; @@ -352,7 +354,7 @@ static int hns_mdio_reset(struct mii_bus *bus) struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv; int ret; - if (!mdio_dev->sys_vbase) { + if (!mdio_dev->subctrl_vbase) { dev_err(&bus->dev, "mdio sys ctl reg has not maped\n"); return -ENODEV; } @@ -455,13 +457,12 @@ static int hns_mdio_probe(struct platform_device *pdev) return ret; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - mdio_dev->sys_vbase = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(mdio_dev->sys_vbase)) { - ret = PTR_ERR(mdio_dev->sys_vbase); - return ret; + mdio_dev->subctrl_vbase = + syscon_node_to_regmap(of_parse_phandle(np, "subctrl_vbase", 0)); + if (IS_ERR(mdio_dev->subctrl_vbase)) { + dev_warn(&pdev->dev, "no syscon hisilicon,peri-c-subctrl\n"); + mdio_dev->subctrl_vbase = NULL; } - new_bus->irq = devm_kcalloc(&pdev->dev, PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); if (!new_bus->irq) diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index b60a34d982a9..5d7db6c01c46 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -2204,7 +2204,6 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev, strlcpy(info->version, DRV_VERSION, sizeof(info->version)); snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %s", dev->cell_index, dev->ofdev->dev.of_node->full_name); - info->regdump_len = emac_ethtool_get_regs_len(ndev); } static const struct ethtool_ops emac_ethtool_ops = { diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 4270ad2d4ddf..83e557c7f279 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -559,8 +559,6 @@ static void e1000_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->regdump_len = e1000_get_regs_len(netdev); - drvinfo->eedump_len = e1000_get_eeprom_len(netdev); } static void e1000_get_ringparam(struct net_device *netdev, diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 74dc15055971..fd7be860c201 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -3820,7 +3820,7 @@ static int e1000_clean(struct napi_struct *napi, int budget) if (work_done < budget) { if (likely(adapter->itr_setting & 3)) e1000_set_itr(adapter); - napi_complete(napi); + napi_complete_done(napi, work_done); if (!test_bit(__E1000_DOWN, &adapter->flags)) e1000_irq_enable(adapter); } diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index ad6daa656d3e..6cab1f30d41e 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -648,8 +648,6 @@ static void e1000_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->regdump_len = e1000_get_regs_len(netdev); - drvinfo->eedump_len = e1000_get_eeprom_len(netdev); } static void e1000_get_ringparam(struct net_device *netdev, diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 2e2ddec04a50..0a854a47d31a 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -2693,7 +2693,7 @@ static int e1000e_poll(struct napi_struct *napi, int weight) if (work_done < weight) { if (adapter->itr_setting & 3) e1000_set_itr(adapter); - napi_complete(napi); + napi_complete_done(napi, work_done); if (!test_bit(__E1000_DOWN, &adapter->state)) { if (adapter->msix_entries) ew32(IMS, adapter->rx_ring->ims_val); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c index 08ecf43dffc7..5304bc1fbecd 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c @@ -176,7 +176,7 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) return; /* Generate a folder for each q_vector */ - sprintf(name, "q_vector.%03d", q_vector->v_idx); + snprintf(name, sizeof(name), "q_vector.%03d", q_vector->v_idx); q_vector->dbg_q_vector = debugfs_create_dir(name, interface->dbg_intfc); if (!q_vector->dbg_q_vector) @@ -186,7 +186,7 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) for (i = 0; i < q_vector->tx.count; i++) { struct fm10k_ring *ring = &q_vector->tx.ring[i]; - sprintf(name, "tx_ring.%03d", ring->queue_index); + snprintf(name, sizeof(name), "tx_ring.%03d", ring->queue_index); debugfs_create_file(name, 0600, q_vector->dbg_q_vector, ring, @@ -197,7 +197,7 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) for (i = 0; i < q_vector->rx.count; i++) { struct fm10k_ring *ring = &q_vector->rx.ring[i]; - sprintf(name, "rx_ring.%03d", ring->queue_index); + snprintf(name, sizeof(name), "rx_ring.%03d", ring->queue_index); debugfs_create_file(name, 0600, q_vector->dbg_q_vector, ring, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 4ef2fbd22911..2ce0eba5e040 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -206,13 +206,13 @@ static void fm10k_get_stat_strings(struct net_device *dev, u8 *data) } for (i = 0; i < interface->hw.mac.max_queues; i++) { - sprintf(p, "tx_queue_%u_packets", i); + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_packets", i); p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bytes", i); + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_packets", i); + snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_packets", i); p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bytes", i); + snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; } } @@ -515,10 +515,6 @@ static void fm10k_get_drvinfo(struct net_device *dev, sizeof(info->version) - 1); strncpy(info->bus_info, pci_name(interface->pdev), sizeof(info->bus_info) - 1); - - info->n_stats = fm10k_get_sset_count(dev, ETH_SS_STATS); - - info->regdump_len = fm10k_get_regs_len(dev); } static void fm10k_get_pauseparam(struct net_device *dev, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 2f47bfe6cc90..e76a44cf330c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -593,9 +593,9 @@ static void fm10k_receive_skb(struct fm10k_q_vector *q_vector, napi_gro_receive(&q_vector->napi, skb); } -static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, - struct fm10k_ring *rx_ring, - int budget) +static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, + struct fm10k_ring *rx_ring, + int budget) { struct sk_buff *skb = rx_ring->skb; unsigned int total_bytes = 0, total_packets = 0; @@ -662,7 +662,7 @@ static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, q_vector->rx.total_packets += total_packets; q_vector->rx.total_bytes += total_bytes; - return total_packets < budget; + return total_packets; } #define VXLAN_HLEN (sizeof(struct udphdr) + 8) @@ -1422,7 +1422,7 @@ static int fm10k_poll(struct napi_struct *napi, int budget) struct fm10k_q_vector *q_vector = container_of(napi, struct fm10k_q_vector, napi); struct fm10k_ring *ring; - int per_ring_budget; + int per_ring_budget, work_done = 0; bool clean_complete = true; fm10k_for_each_ring(ring, q_vector->tx) @@ -1436,16 +1436,19 @@ static int fm10k_poll(struct napi_struct *napi, int budget) else per_ring_budget = budget; - fm10k_for_each_ring(ring, q_vector->rx) - clean_complete &= fm10k_clean_rx_irq(q_vector, ring, - per_ring_budget); + fm10k_for_each_ring(ring, q_vector->rx) { + int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget); + + work_done += work; + clean_complete &= !!(work < per_ring_budget); + } /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; /* all work done, exit the polling mode */ - napi_complete(napi); + napi_complete_done(napi, work_done); /* re-enable the q_vector */ fm10k_qv_enable(q_vector); @@ -1905,7 +1908,7 @@ static void fm10k_init_reta(struct fm10k_intfc *interface) u32 reta, base; /* If the netdev is initialized we have to maintain table if possible */ - if (interface->netdev->reg_state) { + if (interface->netdev->reg_state != NETREG_UNINITIALIZED) { for (i = FM10K_RETA_SIZE; i--;) { reta = interface->reta[i]; if ((((reta << 24) >> 24) < rss_i) && diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index f26dcb23ebf9..4dd3e26129b4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -93,7 +93,7 @@ #endif /* I40E_FCOE */ #define I40E_MAX_AQ_BUF_SIZE 4096 #define I40E_AQ_LEN 256 -#define I40E_AQ_WORK_LIMIT 32 +#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */ #define I40E_MAX_USER_PRIORITY 8 #define I40E_DEFAULT_MSG_ENABLE 4 #define I40E_QUEUE_WAIT_RETRY_LIMIT 10 @@ -102,11 +102,17 @@ /* Ethtool Private Flags */ #define I40E_PRIV_FLAGS_NPAR_FLAG BIT(0) #define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1) +#define I40E_PRIV_FLAGS_FD_ATR BIT(2) +#define I40E_PRIV_FLAGS_VEB_STATS BIT(3) #define I40E_NVM_VERSION_LO_SHIFT 0 #define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) #define I40E_NVM_VERSION_HI_SHIFT 12 #define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT) +#define I40E_OEM_VER_BUILD_MASK 0xffff +#define I40E_OEM_VER_PATCH_MASK 0xff +#define I40E_OEM_VER_BUILD_SHIFT 8 +#define I40E_OEM_VER_SHIFT 24 /* The values in here are decimal coded as hex as is the case in the NVM map*/ #define I40E_CURRENT_NVM_VERSION_HI 0x2 @@ -304,7 +310,6 @@ struct i40e_pf { #ifdef I40E_FCOE #define I40E_FLAG_FCOE_ENABLED BIT_ULL(11) #endif /* I40E_FCOE */ -#define I40E_FLAG_IN_NETPOLL BIT_ULL(12) #define I40E_FLAG_16BYTE_RX_DESC_ENABLED BIT_ULL(13) #define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14) #define I40E_FLAG_FILTER_SYNC BIT_ULL(15) @@ -330,6 +335,7 @@ struct i40e_pf { #define I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT_ULL(38) #define I40E_FLAG_LINK_POLLING_ENABLED BIT_ULL(39) #define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40) +#define I40E_FLAG_NO_PCI_LINK_CHECK BIT_ULL(42) /* tracks features that get auto disabled by errors */ u64 auto_disable_flags; @@ -464,6 +470,8 @@ struct i40e_vsi { #define I40E_VSI_FLAG_VEB_OWNER BIT(1) unsigned long flags; + /* Per VSI lock to protect elements/list (MAC filter) */ + spinlock_t mac_filter_list_lock; struct list_head mac_filter_list; /* VSI stats */ @@ -494,6 +502,7 @@ struct i40e_vsi { */ u16 rx_itr_setting; u16 tx_itr_setting; + u16 int_rate_limit; /* value in usecs */ u16 rss_table_size; u16 rss_size; @@ -570,6 +579,8 @@ struct i40e_q_vector { struct rcu_head rcu; /* to avoid race with update stats on free */ char name[I40E_INT_NAME_STR_LEN]; bool arm_wb_state; +#define ITR_COUNTDOWN_START 100 + u8 itr_countdown; /* when 0 should adjust ITR */ } ____cacheline_internodealigned_in_smp; /* lan device */ @@ -579,22 +590,29 @@ struct i40e_device { }; /** - * i40e_fw_version_str - format the FW and NVM version strings + * i40e_nvm_version_str - format the NVM version strings * @hw: ptr to the hardware info **/ -static inline char *i40e_fw_version_str(struct i40e_hw *hw) +static inline char *i40e_nvm_version_str(struct i40e_hw *hw) { static char buf[32]; + u32 full_ver; + u8 ver, patch; + u16 build; + + full_ver = hw->nvm.oem_ver; + ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT); + build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT) + & I40E_OEM_VER_BUILD_MASK); + patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK); snprintf(buf, sizeof(buf), - "f%d.%d.%05d a%d.%d n%x.%02x e%x", - hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, - hw->aq.api_maj_ver, hw->aq.api_min_ver, + "%x.%02x 0x%x %d.%d.%d", (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >> I40E_NVM_VERSION_HI_SHIFT, (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >> I40E_NVM_VERSION_LO_SHIFT, - (hw->nvm.eetrack & 0xffffff)); + hw->nvm.eetrack, ver, build, patch); return buf; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index fa2e916b23da..0ff8f01e57ee 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -386,7 +386,6 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw) hw->aq.asq.next_to_use = 0; hw->aq.asq.next_to_clean = 0; - hw->aq.asq.count = hw->aq.num_asq_entries; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_asq_ring(hw); @@ -404,6 +403,7 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw) goto init_adminq_free_rings; /* success! */ + hw->aq.asq.count = hw->aq.num_asq_entries; goto init_adminq_exit; init_adminq_free_rings: @@ -445,7 +445,6 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw) hw->aq.arq.next_to_use = 0; hw->aq.arq.next_to_clean = 0; - hw->aq.arq.count = hw->aq.num_arq_entries; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_arq_ring(hw); @@ -463,6 +462,7 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw) goto init_adminq_free_rings; /* success! */ + hw->aq.arq.count = hw->aq.num_arq_entries; goto init_adminq_exit; init_adminq_free_rings: @@ -553,8 +553,9 @@ shutdown_arq_out: **/ i40e_status i40e_init_adminq(struct i40e_hw *hw) { - i40e_status ret_code; + u16 cfg_ptr, oem_hi, oem_lo; u16 eetrack_lo, eetrack_hi; + i40e_status ret_code; int retry = 0; /* verify input for valid configuration */ @@ -613,6 +614,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo); i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; + i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr); + i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF), + &oem_hi); + i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)), + &oem_lo); + hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo; if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { ret_code = I40E_ERR_FIRMWARE_API_VERSION; diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 785b3dbd22ca..6584b6cd73fd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1722,11 +1722,13 @@ struct i40e_aqc_get_link_status { u8 phy_type; /* i40e_aq_phy_type */ u8 link_speed; /* i40e_aq_link_speed */ u8 link_info; -#define I40E_AQ_LINK_UP 0x01 +#define I40E_AQ_LINK_UP 0x01 /* obsolete */ +#define I40E_AQ_LINK_UP_FUNCTION 0x01 #define I40E_AQ_LINK_FAULT 0x02 #define I40E_AQ_LINK_FAULT_TX 0x04 #define I40E_AQ_LINK_FAULT_RX 0x08 #define I40E_AQ_LINK_FAULT_REMOTE 0x10 +#define I40E_AQ_LINK_UP_PORT 0x20 #define I40E_AQ_MEDIA_AVAILABLE 0x40 #define I40E_AQ_SIGNAL_DETECT 0x80 u8 an_info; @@ -2130,6 +2132,13 @@ I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp); struct i40e_aqc_lldp_set_local_mib { #define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0 #define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT) +#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \ + SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT) +#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0 +#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1) +#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \ + SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT) +#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1 u8 type; u8 reserved0; __le16 length; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 2d012d9c22ad..2d74c6e4d7b6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -87,7 +87,7 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) * @hw: pointer to the HW structure * @aq_err: the AQ error code to convert **/ -char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) +const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) { switch (aq_err) { case I40E_AQ_RC_OK: @@ -147,7 +147,7 @@ char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) * @hw: pointer to the HW structure * @stat_err: the status error code to convert **/ -char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err) +const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err) { switch (stat_err) { case 0: @@ -331,25 +331,11 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, len = buf_len; /* write the full 16-byte chunks */ for (i = 0; i < (len - 16); i += 16) - i40e_debug(hw, mask, - "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", - i, buf[i], buf[i + 1], buf[i + 2], - buf[i + 3], buf[i + 4], buf[i + 5], - buf[i + 6], buf[i + 7], buf[i + 8], - buf[i + 9], buf[i + 10], buf[i + 11], - buf[i + 12], buf[i + 13], buf[i + 14], - buf[i + 15]); + i40e_debug(hw, mask, "\t0x%04X %16ph\n", i, buf + i); /* write whatever's left over without overrunning the buffer */ - if (i < len) { - char d_buf[80]; - int j = 0; - - memset(d_buf, 0, sizeof(d_buf)); - j += sprintf(d_buf, "\t0x%04X ", i); - while (i < len) - j += sprintf(&d_buf[j], " %02X", buf[i++]); - i40e_debug(hw, mask, "%s\n", d_buf); - } + if (i < len) + i40e_debug(hw, mask, "\t0x%04X %*ph\n", + i, len - i, buf + i); } } @@ -958,6 +944,9 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw) else hw->pf_id = (u8)(func_rid & 0x7); + if (hw->mac.type == I40E_MAC_X722) + hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE; + status = i40e_init_nvm(hw); return status; } @@ -1617,6 +1606,9 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) status = I40E_ERR_UNKNOWN_PHY; + if (report_init) + hw->phy.phy_types = le32_to_cpu(abilities->phy_type); + return status; } @@ -1717,14 +1709,14 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; } /* Update the link info */ - status = i40e_aq_get_link_info(hw, true, NULL, NULL); + status = i40e_update_link_info(hw); if (status) { /* Wait a little bit (on 40G cards it sometimes takes a really * long time for link to come back from the atomic reset) * and try once more */ msleep(1000); - status = i40e_aq_get_link_info(hw, true, NULL, NULL); + status = i40e_update_link_info(hw); } if (status) *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; @@ -2247,7 +2239,7 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up) i40e_status status = 0; if (hw->phy.get_link_info) { - status = i40e_aq_get_link_info(hw, true, NULL, NULL); + status = i40e_update_link_info(hw); if (status) i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n", @@ -2260,6 +2252,32 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up) } /** + * i40e_updatelink_status - update status of the HW network link + * @hw: pointer to the hw struct + **/ +i40e_status i40e_update_link_info(struct i40e_hw *hw) +{ + struct i40e_aq_get_phy_abilities_resp abilities; + i40e_status status = 0; + + status = i40e_aq_get_link_info(hw, true, NULL, NULL); + if (status) + return status; + + if (hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) { + status = i40e_aq_get_phy_capabilities(hw, false, false, + &abilities, NULL); + if (status) + return status; + + memcpy(hw->phy.link_info.module_type, &abilities.module_type, + sizeof(hw->phy.link_info.module_type)); + } + + return status; +} + +/** * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC * @hw: pointer to the hw struct * @uplink_seid: the MAC or other gizmo SEID @@ -3797,6 +3815,28 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, } /** + * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control + * @hw: pointer to the hw struct + * @seid: VSI seid to add ethertype filter from + **/ +#define I40E_FLOW_CONTROL_ETHTYPE 0x8808 +void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, + u16 seid) +{ + u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | + I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | + I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; + u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE; + i40e_status status; + + status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag, + seid, 0, true, NULL, + NULL); + if (status) + hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n"); +} + +/** * i40e_aq_alternate_read * @hw: pointer to the hardware structure * @reg_addr0: address of first dword to be read diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 6fa07ef1651d..2691277c0055 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -380,7 +380,7 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv, { u16 length, typelength, offset = 0; struct i40e_cee_app_prio *app; - u8 i, up; + u8 i, up, selector; typelength = ntohs(tlv->hdr.typelen); length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> @@ -393,13 +393,21 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv, for (i = 0; i < dcbcfg->numapps; i++) { app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset); for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) { - if (app->prio_map & (1 << up)) + if (app->prio_map & BIT(up)) break; } dcbcfg->app[i].priority = up; - /* Get Selector from lower 2 bits */ - dcbcfg->app[i].selector = (app->upper_oui_sel & - I40E_CEE_APP_SELECTOR_MASK); + + /* Get Selector from lower 2 bits, and convert to IEEE */ + selector = (app->upper_oui_sel & I40E_CEE_APP_SELECTOR_MASK); + if (selector == I40E_CEE_APP_SEL_ETHTYPE) + dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE; + else if (selector == I40E_CEE_APP_SEL_TCPIP) + dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP; + else + /* Keep selector as it is for unknown types */ + dcbcfg->app[i].selector = selector; + dcbcfg->app[i].protocolid = ntohs(app->protocol); /* Move to next app */ offset += sizeof(*app); diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index 7c42d1340de6..886e667f2f1c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -240,10 +240,9 @@ static void i40e_dcbnl_del_app(struct i40e_pf *pf, for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v] && pf->vsi[v]->netdev) { err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app); - if (err) - dev_info(&pf->pdev->dev, "Failed deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n", - pf->vsi[v]->seid, err, app->selector, - app->protocolid, app->priority); + dev_dbg(&pf->pdev->dev, "Deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n", + pf->vsi[v]->seid, err, app->selector, + app->protocolid, app->priority); } } } diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index c1dd2483e262..d4b7af9a2fc8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -953,24 +953,6 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf) } } -/** - * i40e_dbg_cmd_fd_ctrl - Enable/disable FD sideband/ATR - * @pf: the PF that would be altered - * @flag: flag that needs enabling or disabling - * @enable: Enable/disable FD SD/ATR - **/ -static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable) -{ - if (enable) { - pf->flags |= flag; - } else { - pf->flags &= ~flag; - pf->auto_disable_flags |= flag; - } - dev_info(&pf->pdev->dev, "requesting a PF reset\n"); - i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED)); -} - #define I40E_MAX_DEBUG_OUT_BUFFER (4096*4) /** * i40e_dbg_command_write - write into command datum @@ -1155,7 +1137,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, goto command_write_done; } + spin_lock_bh(&vsi->mac_filter_list_lock); f = i40e_add_filter(vsi, ma, vlan, false, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); ret = i40e_sync_vsi_filters(vsi, true); if (f && !ret) dev_info(&pf->pdev->dev, @@ -1192,7 +1176,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, goto command_write_done; } + spin_lock_bh(&vsi->mac_filter_list_lock); i40e_del_filter(vsi, ma, vlan, false, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); ret = i40e_sync_vsi_filters(vsi, true); if (!ret) dev_info(&pf->pdev->dev, @@ -1759,10 +1745,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp, raw_packet = NULL; kfree(asc_packet); asc_packet = NULL; - } else if (strncmp(cmd_buf, "fd-atr off", 10) == 0) { - i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false); - } else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) { - i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true); } else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) { dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n", i40e_get_current_fd_count(pf)); @@ -1989,8 +1971,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp, dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n"); dev_info(&pf->pdev->dev, " add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n"); dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n"); - dev_info(&pf->pdev->dev, " fd-atr off\n"); - dev_info(&pf->pdev->dev, " fd-atr on\n"); dev_info(&pf->pdev->dev, " fd current cnt"); dev_info(&pf->pdev->dev, " lldp start\n"); dev_info(&pf->pdev->dev, " lldp stop\n"); diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h new file mode 100644 index 000000000000..c601ca4a610c --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -0,0 +1,55 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_DEVIDS_H_ +#define _I40E_DEVIDS_H_ + +/* Device IDs */ +#define I40E_DEV_ID_SFP_XL710 0x1572 +#define I40E_DEV_ID_QEMU 0x1574 +#define I40E_DEV_ID_KX_A 0x157F +#define I40E_DEV_ID_KX_B 0x1580 +#define I40E_DEV_ID_KX_C 0x1581 +#define I40E_DEV_ID_QSFP_A 0x1583 +#define I40E_DEV_ID_QSFP_B 0x1584 +#define I40E_DEV_ID_QSFP_C 0x1585 +#define I40E_DEV_ID_10G_BASE_T 0x1586 +#define I40E_DEV_ID_20G_KR2 0x1587 +#define I40E_DEV_ID_20G_KR2_A 0x1588 +#define I40E_DEV_ID_10G_BASE_T4 0x1589 +#define I40E_DEV_ID_VF 0x154C +#define I40E_DEV_ID_VF_HV 0x1571 +#define I40E_DEV_ID_SFP_X722 0x37D0 +#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 +#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 +#define I40E_DEV_ID_X722_VF 0x37CD +#define I40E_DEV_ID_X722_VF_HV 0x37D9 + +#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ + (d) == I40E_DEV_ID_QSFP_B || \ + (d) == I40E_DEV_ID_QSFP_C) + +#endif /* _I40E_DEVIDS_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 148f61461076..3f385ffe420f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -90,9 +90,6 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = { I40E_VSI_STAT("tx_linearize", tx_linearize), }; -static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, - struct ethtool_rxnfc *cmd); - /* These PF_STATs might look like duplicates of some NETDEV_STATs, * but they are separate. This device supports Virtualization, and * as such might have several netdevs supporting VMDq and FCoE going @@ -231,6 +228,8 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = { static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = { "NPAR", "LinkPolling", + "flow-director-atr", + "veb-stats", }; #define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings) @@ -254,7 +253,8 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf) **/ static void i40e_get_settings_link_up(struct i40e_hw *hw, struct ethtool_cmd *ecmd, - struct net_device *netdev) + struct net_device *netdev, + struct i40e_pf *pf) { struct i40e_link_status *hw_link_info = &hw->phy.link_info; u32 link_speed = hw_link_info->link_speed; @@ -273,65 +273,49 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, case I40E_PHY_TYPE_40GBASE_AOC: ecmd->supported = SUPPORTED_40000baseCR4_Full; break; - case I40E_PHY_TYPE_40GBASE_KR4: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_40000baseKR4_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_40000baseKR4_Full; - break; case I40E_PHY_TYPE_40GBASE_SR4: ecmd->supported = SUPPORTED_40000baseSR4_Full; break; case I40E_PHY_TYPE_40GBASE_LR4: ecmd->supported = SUPPORTED_40000baseLR4_Full; break; - case I40E_PHY_TYPE_20GBASE_KR2: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_20000baseKR2_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_20000baseKR2_Full; - break; - case I40E_PHY_TYPE_10GBASE_KX4: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_10000baseKX4_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_10000baseKX4_Full; - break; - case I40E_PHY_TYPE_10GBASE_KR: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_10000baseKR_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_10000baseKR_Full; - break; case I40E_PHY_TYPE_10GBASE_SR: case I40E_PHY_TYPE_10GBASE_LR: case I40E_PHY_TYPE_1000BASE_SX: case I40E_PHY_TYPE_1000BASE_LX: - ecmd->supported = SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full; + ecmd->supported = SUPPORTED_10000baseT_Full; + if (hw_link_info->module_type[2] & + I40E_MODULE_TYPE_1000BASE_SX || + hw_link_info->module_type[2] & + I40E_MODULE_TYPE_1000BASE_LX) { + ecmd->supported |= SUPPORTED_1000baseT_Full; + if (hw_link_info->requested_speeds & + I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + } if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) ecmd->advertising |= ADVERTISED_10000baseT_Full; - if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) - ecmd->advertising |= ADVERTISED_1000baseT_Full; - break; - case I40E_PHY_TYPE_1000BASE_KX: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_1000baseKX_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_1000baseKX_Full; break; case I40E_PHY_TYPE_10GBASE_T: case I40E_PHY_TYPE_1000BASE_T: - case I40E_PHY_TYPE_100BASE_TX: ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full; + SUPPORTED_1000baseT_Full; ecmd->advertising = ADVERTISED_Autoneg; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) ecmd->advertising |= ADVERTISED_10000baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) ecmd->advertising |= ADVERTISED_1000baseT_Full; + break; + case I40E_PHY_TYPE_1000BASE_T_OPTICAL: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_1000baseT_Full; + ecmd->advertising = ADVERTISED_Autoneg | + ADVERTISED_1000baseT_Full; + break; + case I40E_PHY_TYPE_100BASE_TX: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_100baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) ecmd->advertising |= ADVERTISED_100baseT_Full; break; @@ -351,12 +335,24 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, break; case I40E_PHY_TYPE_SGMII: ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full; + SUPPORTED_1000baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) ecmd->advertising |= ADVERTISED_1000baseT_Full; - if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) - ecmd->advertising |= ADVERTISED_100baseT_Full; + if (pf->hw.mac.type == I40E_MAC_X722) { + ecmd->supported |= SUPPORTED_100baseT_Full; + if (hw_link_info->requested_speeds & + I40E_LINK_SPEED_100MB) + ecmd->advertising |= ADVERTISED_100baseT_Full; + } + break; + /* Backplane is set based on supported phy types in get_settings + * so don't set anything here but don't warn either + */ + case I40E_PHY_TYPE_40GBASE_KR4: + case I40E_PHY_TYPE_20GBASE_KR2: + case I40E_PHY_TYPE_10GBASE_KR: + case I40E_PHY_TYPE_10GBASE_KX4: + case I40E_PHY_TYPE_1000BASE_KX: break; default: /* if we got here and link is up something bad is afoot */ @@ -395,66 +391,73 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, * Reports link settings that can be determined when link is down **/ static void i40e_get_settings_link_down(struct i40e_hw *hw, - struct ethtool_cmd *ecmd) + struct ethtool_cmd *ecmd, + struct i40e_pf *pf) { - struct i40e_link_status *hw_link_info = &hw->phy.link_info; + enum i40e_aq_capabilities_phy_type phy_types = hw->phy.phy_types; /* link is down and the driver needs to fall back on - * device ID to determine what kinds of info to display, - * it's mostly a guess that may change when link is up + * supported phy types to figure out what info to display */ - switch (hw->device_id) { - case I40E_DEV_ID_QSFP_A: - case I40E_DEV_ID_QSFP_B: - case I40E_DEV_ID_QSFP_C: - /* pluggable QSFP */ - ecmd->supported = SUPPORTED_40000baseSR4_Full | - SUPPORTED_40000baseCR4_Full | - SUPPORTED_40000baseLR4_Full; - ecmd->advertising = ADVERTISED_40000baseSR4_Full | - ADVERTISED_40000baseCR4_Full | - ADVERTISED_40000baseLR4_Full; - break; - case I40E_DEV_ID_KX_B: - /* backplane 40G */ - ecmd->supported = SUPPORTED_40000baseKR4_Full; - ecmd->advertising = ADVERTISED_40000baseKR4_Full; - break; - case I40E_DEV_ID_KX_C: - /* backplane 10G */ - ecmd->supported = SUPPORTED_10000baseKR_Full; - ecmd->advertising = ADVERTISED_10000baseKR_Full; - break; - case I40E_DEV_ID_10G_BASE_T: - case I40E_DEV_ID_10G_BASE_T4: - ecmd->supported = SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full; - /* Figure out what has been requested */ - if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) - ecmd->advertising |= ADVERTISED_10000baseT_Full; - if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) - ecmd->advertising |= ADVERTISED_1000baseT_Full; - if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) + ecmd->supported = 0x0; + ecmd->advertising = 0x0; + if (phy_types & I40E_CAP_PHY_TYPE_SGMII) { + ecmd->supported |= SUPPORTED_Autoneg | + SUPPORTED_1000baseT_Full; + ecmd->advertising |= ADVERTISED_Autoneg | + ADVERTISED_1000baseT_Full; + if (pf->hw.mac.type == I40E_MAC_X722) { + ecmd->supported |= SUPPORTED_100baseT_Full; ecmd->advertising |= ADVERTISED_100baseT_Full; - break; - case I40E_DEV_ID_20G_KR2: - case I40E_DEV_ID_20G_KR2_A: - /* backplane 20G */ - ecmd->supported = SUPPORTED_20000baseKR2_Full; - ecmd->advertising = ADVERTISED_20000baseKR2_Full; - break; - default: - /* all the rest are 10G/1G */ - ecmd->supported = SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full; - /* Figure out what has been requested */ - if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) - ecmd->advertising |= ADVERTISED_10000baseT_Full; - if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) - ecmd->advertising |= ADVERTISED_1000baseT_Full; - break; + } } + if (phy_types & I40E_CAP_PHY_TYPE_XAUI || + phy_types & I40E_CAP_PHY_TYPE_XFI || + phy_types & I40E_CAP_PHY_TYPE_SFI || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC) + ecmd->supported |= SUPPORTED_10000baseT_Full; + if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_T || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) { + ecmd->supported |= SUPPORTED_Autoneg | + SUPPORTED_10000baseT_Full; + ecmd->advertising |= ADVERTISED_Autoneg | + ADVERTISED_10000baseT_Full; + } + if (phy_types & I40E_CAP_PHY_TYPE_XLAUI || + phy_types & I40E_CAP_PHY_TYPE_XLPPI || + phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC) + ecmd->supported |= SUPPORTED_40000baseCR4_Full; + if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU || + phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) { + ecmd->supported |= SUPPORTED_Autoneg | + SUPPORTED_40000baseCR4_Full; + ecmd->advertising |= ADVERTISED_Autoneg | + ADVERTISED_40000baseCR4_Full; + } + if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) && + !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) { + ecmd->supported |= SUPPORTED_Autoneg | + SUPPORTED_100baseT_Full; + ecmd->advertising |= ADVERTISED_Autoneg | + ADVERTISED_100baseT_Full; + } + if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T || + phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX || + phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX || + phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) { + ecmd->supported |= SUPPORTED_Autoneg | + SUPPORTED_1000baseT_Full; + ecmd->advertising |= ADVERTISED_Autoneg | + ADVERTISED_1000baseT_Full; + } + if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4) + ecmd->supported |= SUPPORTED_40000baseSR4_Full; + if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4) + ecmd->supported |= SUPPORTED_40000baseLR4_Full; /* With no link speed and duplex are unknown */ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); @@ -478,12 +481,43 @@ static int i40e_get_settings(struct net_device *netdev, bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; if (link_up) - i40e_get_settings_link_up(hw, ecmd, netdev); + i40e_get_settings_link_up(hw, ecmd, netdev, pf); else - i40e_get_settings_link_down(hw, ecmd); + i40e_get_settings_link_down(hw, ecmd, pf); /* Now set the settings that don't rely on link being up/down */ + /* For backplane, supported and advertised are only reliant on the + * phy types the NVM specifies are supported. + */ + if (hw->device_id == I40E_DEV_ID_KX_B || + hw->device_id == I40E_DEV_ID_KX_C || + hw->device_id == I40E_DEV_ID_20G_KR2 || + hw->device_id == I40E_DEV_ID_20G_KR2_A) { + ecmd->supported = SUPPORTED_Autoneg; + ecmd->advertising = ADVERTISED_Autoneg; + if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) { + ecmd->supported |= SUPPORTED_40000baseKR4_Full; + ecmd->advertising |= ADVERTISED_40000baseKR4_Full; + } + if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) { + ecmd->supported |= SUPPORTED_20000baseKR2_Full; + ecmd->advertising |= ADVERTISED_20000baseKR2_Full; + } + if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) { + ecmd->supported |= SUPPORTED_10000baseKR_Full; + ecmd->advertising |= ADVERTISED_10000baseKR_Full; + } + if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) { + ecmd->supported |= SUPPORTED_10000baseKX4_Full; + ecmd->advertising |= ADVERTISED_10000baseKX4_Full; + } + if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) { + ecmd->supported |= SUPPORTED_1000baseKX_Full; + ecmd->advertising |= ADVERTISED_1000baseKX_Full; + } + } + /* Set autoneg settings */ ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? AUTONEG_ENABLE : AUTONEG_DISABLE); @@ -583,6 +617,14 @@ static int i40e_set_settings(struct net_device *netdev, hw->phy.link_info.link_info & I40E_AQ_LINK_UP) return -EOPNOTSUPP; + if (hw->device_id == I40E_DEV_ID_KX_B || + hw->device_id == I40E_DEV_ID_KX_C || + hw->device_id == I40E_DEV_ID_20G_KR2 || + hw->device_id == I40E_DEV_ID_20G_KR2_A) { + netdev_info(netdev, "Changing settings is not supported on backplane.\n"); + return -EOPNOTSUPP; + } + /* get our own copy of the bits to check against */ memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd)); i40e_get_settings(netdev, &safe_ecmd); @@ -619,28 +661,31 @@ static int i40e_set_settings(struct net_device *netdev, /* Check autoneg */ if (autoneg == AUTONEG_ENABLE) { - /* If autoneg is not supported, return error */ - if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) { - netdev_info(netdev, "Autoneg not supported on this phy\n"); - return -EINVAL; - } /* If autoneg was not already enabled */ if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) { + /* If autoneg is not supported, return error */ + if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) { + netdev_info(netdev, "Autoneg not supported on this phy\n"); + return -EINVAL; + } + /* Autoneg is allowed to change */ config.abilities = abilities.abilities | I40E_AQ_PHY_ENABLE_AN; change = true; } } else { - /* If autoneg is supported 10GBASE_T is the only phy that - * can disable it, so otherwise return error - */ - if (safe_ecmd.supported & SUPPORTED_Autoneg && - hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) { - netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); - return -EINVAL; - } /* If autoneg is currently enabled */ if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) { + /* If autoneg is supported 10GBASE_T is the only PHY + * that can disable it, so otherwise return error + */ + if (safe_ecmd.supported & SUPPORTED_Autoneg && + hw->phy.link_info.phy_type != + I40E_PHY_TYPE_10GBASE_T) { + netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); + return -EINVAL; + } + /* Autoneg is allowed to change */ config.abilities = abilities.abilities & ~I40E_AQ_PHY_ENABLE_AN; change = true; @@ -704,11 +749,11 @@ static int i40e_set_settings(struct net_device *netdev, return -EAGAIN; } - status = i40e_aq_get_link_info(hw, true, NULL, NULL); + status = i40e_update_link_info(hw); if (status) - netdev_info(netdev, "Updating link info failed with err %s aq_err %s\n", - i40e_stat_str(hw, status), - i40e_aq_str(hw, hw->aq.asq_last_status)); + netdev_dbg(netdev, "Updating link info failed with err %s aq_err %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); } else { netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); @@ -958,9 +1003,7 @@ static int i40e_get_eeprom(struct net_device *netdev, cmd = (struct i40e_nvm_access *)eeprom; ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); - if (ret_val && - ((hw->aq.asq_last_status != I40E_AQ_RC_EACCES) || - (hw->debug_mask & I40E_DEBUG_NVM))) + if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM)) dev_info(&pf->pdev->dev, "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", ret_val, hw->aq.asq_last_status, errno, @@ -1064,10 +1107,7 @@ static int i40e_set_eeprom(struct net_device *netdev, cmd = (struct i40e_nvm_access *)eeprom; ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); - if (ret_val && - ((hw->aq.asq_last_status != I40E_AQ_RC_EPERM && - hw->aq.asq_last_status != I40E_AQ_RC_EBUSY) || - (hw->debug_mask & I40E_DEBUG_NVM))) + if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM)) dev_info(&pf->pdev->dev, "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", ret_val, hw->aq.asq_last_status, errno, @@ -1087,11 +1127,10 @@ static void i40e_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, i40e_driver_version_str, sizeof(drvinfo->version)); - strlcpy(drvinfo->fw_version, i40e_fw_version_str(&pf->hw), + strlcpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw), sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(pf->pdev), sizeof(drvinfo->bus_info)); - drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN; } static void i40e_get_ringparam(struct net_device *netdev, @@ -1367,6 +1406,12 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } + for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++) { + data[i++] = veb->tc_stats.tc_tx_packets[j]; + data[i++] = veb->tc_stats.tc_tx_bytes[j]; + data[i++] = veb->tc_stats.tc_rx_packets[j]; + data[i++] = veb->tc_stats.tc_rx_bytes[j]; + } } for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) { p = (char *)pf + i40e_gstrings_stats[j].stat_offset; @@ -1815,6 +1860,14 @@ static int i40e_get_coalesce(struct net_device *netdev, ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC; ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC; + /* we use the _usecs_high to store/set the interrupt rate limit + * that the hardware supports, that almost but not quite + * fits the original intent of the ethtool variable, + * the rx_coalesce_usecs_high limits total interrupts + * per second from both tx/rx sources. + */ + ec->rx_coalesce_usecs_high = vsi->int_rate_limit; + ec->tx_coalesce_usecs_high = vsi->int_rate_limit; return 0; } @@ -1833,6 +1886,17 @@ static int i40e_set_coalesce(struct net_device *netdev, if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) vsi->work_limit = ec->tx_max_coalesced_frames_irq; + /* tx_coalesce_usecs_high is ignored, use rx-usecs-high instead */ + if (ec->tx_coalesce_usecs_high != vsi->int_rate_limit) { + netif_info(pf, drv, netdev, "tx-usecs-high is not used, please program rx-usecs-high\n"); + return -EINVAL; + } + + if (ec->rx_coalesce_usecs_high >= INTRL_REG_TO_USEC(I40E_MAX_INTRL)) { + netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-235\n"); + return -EINVAL; + } + vector = vsi->base_vector; if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) && (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1))) { @@ -1846,6 +1910,8 @@ static int i40e_set_coalesce(struct net_device *netdev, return -EINVAL; } + vsi->int_rate_limit = ec->rx_coalesce_usecs_high; + if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) && (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1))) { vsi->tx_itr_setting = ec->tx_coalesce_usecs; @@ -1870,11 +1936,14 @@ static int i40e_set_coalesce(struct net_device *netdev, vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC; for (i = 0; i < vsi->num_q_vectors; i++, vector++) { + u16 intrl = INTRL_USEC_TO_REG(vsi->int_rate_limit); + q_vector = vsi->q_vectors[i]; q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr); q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr); + wr32(hw, I40E_PFINT_RATEN(vector - 1), intrl); i40e_flush(hw); } @@ -2639,6 +2708,10 @@ static u32 i40e_get_priv_flags(struct net_device *dev) I40E_PRIV_FLAGS_NPAR_FLAG : 0; ret_flags |= pf->flags & I40E_FLAG_LINK_POLLING_ENABLED ? I40E_PRIV_FLAGS_LINKPOLL_FLAG : 0; + ret_flags |= pf->flags & I40E_FLAG_FD_ATR_ENABLED ? + I40E_PRIV_FLAGS_FD_ATR : 0; + ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ? + I40E_PRIV_FLAGS_VEB_STATS : 0; return ret_flags; } @@ -2659,6 +2732,22 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) else pf->flags &= ~I40E_FLAG_LINK_POLLING_ENABLED; + /* allow the user to control the state of the Flow + * Director ATR (Application Targeted Routing) feature + * of the driver + */ + if (flags & I40E_PRIV_FLAGS_FD_ATR) { + pf->flags |= I40E_FLAG_FD_ATR_ENABLED; + } else { + pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; + pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED; + } + + if (flags & I40E_PRIV_FLAGS_VEB_STATS) + pf->flags |= I40E_FLAG_VEB_STATS_ENABLED; + else + pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; + return 0; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index eaedc1f4c257..fe5d9bf3ed6d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -284,7 +284,7 @@ void i40e_init_pf_fcoe(struct i40e_pf *pf) pf->fcoe_hmc_filt_num = 0; if (!pf->hw.func_caps.fcoe) { - dev_info(&pf->pdev->dev, "FCoE capability is disabled\n"); + dev_dbg(&pf->pdev->dev, "FCoE capability is disabled\n"); return; } @@ -1516,10 +1516,12 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi) * same PCI function. */ netdev->dev_port = 1; + spin_lock_bh(&vsi->mac_filter_list_lock); i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false); i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false); i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false); i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); /* use san mac */ ether_addr_copy(netdev->dev_addr, hw->mac.san_addr); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index a484f2265524..b825f978d441 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -39,7 +39,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 3 -#define DRV_VERSION_BUILD 21 +#define DRV_VERSION_BUILD 46 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -1355,6 +1355,9 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) * @is_netdev: make sure its a netdev filter, else doesn't matter * * Returns ptr to the filter object or NULL when no memory available. + * + * NOTE: This function is expected to be called with mac_filter_list_lock + * being held. **/ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan, @@ -1413,6 +1416,9 @@ add_filter_out: * @vlan: the vlan * @is_vf: make sure it's a VF filter, else doesn't matter * @is_netdev: make sure it's a netdev filter, else doesn't matter + * + * NOTE: This function is expected to be called with mac_filter_list_lock + * being held. **/ void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan, @@ -1519,8 +1525,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p) element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); } else { + spin_lock_bh(&vsi->mac_filter_list_lock); i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); } if (ether_addr_equal(addr->sa_data, hw->mac.addr)) { @@ -1531,10 +1539,12 @@ static int i40e_set_mac(struct net_device *netdev, void *p) element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); } else { + spin_lock_bh(&vsi->mac_filter_list_lock); f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false); if (f) f->is_laa = true; + spin_unlock_bh(&vsi->mac_filter_list_lock); } i40e_sync_vsi_filters(vsi, false); @@ -1707,6 +1717,8 @@ static void i40e_set_rx_mode(struct net_device *netdev) struct netdev_hw_addr *mca; struct netdev_hw_addr *ha; + spin_lock_bh(&vsi->mac_filter_list_lock); + /* add addr if not already in the filter list */ netdev_for_each_uc_addr(uca, netdev) { if (!i40e_find_mac(vsi, uca->addr, false, true)) { @@ -1754,6 +1766,7 @@ static void i40e_set_rx_mode(struct net_device *netdev) bottom_of_search_loop: continue; } + spin_unlock_bh(&vsi->mac_filter_list_lock); /* check for other flag changes */ if (vsi->current_netdev_flags != vsi->netdev->flags) { @@ -1763,6 +1776,79 @@ bottom_of_search_loop: } /** + * i40e_mac_filter_entry_clone - Clones a MAC filter entry + * @src: source MAC filter entry to be clones + * + * Returns the pointer to newly cloned MAC filter entry or NULL + * in case of error + **/ +static struct i40e_mac_filter *i40e_mac_filter_entry_clone( + struct i40e_mac_filter *src) +{ + struct i40e_mac_filter *f; + + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + return NULL; + *f = *src; + + INIT_LIST_HEAD(&f->list); + + return f; +} + +/** + * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries + * @vsi: pointer to vsi struct + * @from: Pointer to list which contains MAC filter entries - changes to + * those entries needs to be undone. + * + * MAC filter entries from list were slated to be removed from device. + **/ +static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi, + struct list_head *from) +{ + struct i40e_mac_filter *f, *ftmp; + + list_for_each_entry_safe(f, ftmp, from, list) { + f->changed = true; + /* Move the element back into MAC filter list*/ + list_move_tail(&f->list, &vsi->mac_filter_list); + } +} + +/** + * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries + * @vsi: pointer to vsi struct + * + * MAC filter entries from list were slated to be added from device. + **/ +static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi) +{ + struct i40e_mac_filter *f, *ftmp; + + list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { + if (!f->changed && f->counter) + f->changed = true; + } +} + +/** + * i40e_cleanup_add_list - Deletes the element from add list and release + * memory + * @add_list: Pointer to list which contains MAC filter entries + **/ +static void i40e_cleanup_add_list(struct list_head *add_list) +{ + struct i40e_mac_filter *f, *ftmp; + + list_for_each_entry_safe(f, ftmp, add_list, list) { + list_del(&f->list); + kfree(f); + } +} + +/** * i40e_sync_vsi_filters - Update the VSI filter list to the HW * @vsi: ptr to the VSI * @grab_rtnl: whether RTNL needs to be grabbed @@ -1773,11 +1859,13 @@ bottom_of_search_loop: **/ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) { - struct i40e_mac_filter *f, *ftmp; + struct list_head tmp_del_list, tmp_add_list; + struct i40e_mac_filter *f, *ftmp, *fclone; bool promisc_forced_on = false; bool add_happened = false; int filter_list_len = 0; u32 changed_flags = 0; + bool err_cond = false; i40e_status ret = 0; struct i40e_pf *pf; int num_add = 0; @@ -1798,17 +1886,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) vsi->current_netdev_flags = vsi->netdev->flags; } + INIT_LIST_HEAD(&tmp_del_list); + INIT_LIST_HEAD(&tmp_add_list); + if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; - filter_list_len = pf->hw.aq.asq_buf_size / - sizeof(struct i40e_aqc_remove_macvlan_element_data); - del_list = kcalloc(filter_list_len, - sizeof(struct i40e_aqc_remove_macvlan_element_data), - GFP_KERNEL); - if (!del_list) - return -ENOMEM; - + spin_lock_bh(&vsi->mac_filter_list_lock); list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { if (!f->changed) continue; @@ -1816,6 +1900,58 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) if (f->counter != 0) continue; f->changed = false; + + /* Move the element into temporary del_list */ + list_move_tail(&f->list, &tmp_del_list); + } + + list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { + if (!f->changed) + continue; + + if (f->counter == 0) + continue; + f->changed = false; + + /* Clone MAC filter entry and add into temporary list */ + fclone = i40e_mac_filter_entry_clone(f); + if (!fclone) { + err_cond = true; + break; + } + list_add_tail(&fclone->list, &tmp_add_list); + } + + /* if failed to clone MAC filter entry - undo */ + if (err_cond) { + i40e_undo_del_filter_entries(vsi, &tmp_del_list); + i40e_undo_add_filter_entries(vsi); + } + spin_unlock_bh(&vsi->mac_filter_list_lock); + + if (err_cond) + i40e_cleanup_add_list(&tmp_add_list); + } + + /* Now process 'del_list' outside the lock */ + if (!list_empty(&tmp_del_list)) { + filter_list_len = pf->hw.aq.asq_buf_size / + sizeof(struct i40e_aqc_remove_macvlan_element_data); + del_list = kcalloc(filter_list_len, + sizeof(struct i40e_aqc_remove_macvlan_element_data), + GFP_KERNEL); + if (!del_list) { + i40e_cleanup_add_list(&tmp_add_list); + + /* Undo VSI's MAC filter entry element updates */ + spin_lock_bh(&vsi->mac_filter_list_lock); + i40e_undo_del_filter_entries(vsi, &tmp_del_list); + i40e_undo_add_filter_entries(vsi); + spin_unlock_bh(&vsi->mac_filter_list_lock); + return -ENOMEM; + } + + list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) { cmd_flags = 0; /* add to delete list */ @@ -1828,10 +1964,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) del_list[num_del].flags = cmd_flags; num_del++; - /* unlink from filter list */ - list_del(&f->list); - kfree(f); - /* flush a full buffer */ if (num_del == filter_list_len) { ret = i40e_aq_remove_macvlan(&pf->hw, @@ -1842,12 +1974,18 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) memset(del_list, 0, sizeof(*del_list)); if (ret && aq_err != I40E_AQ_RC_ENOENT) - dev_info(&pf->pdev->dev, - "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, aq_err)); + dev_err(&pf->pdev->dev, + "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, aq_err)); } + /* Release memory for MAC filter entries which were + * synced up with HW. + */ + list_del(&f->list); + kfree(f); } + if (num_del) { ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, del_list, num_del, NULL); @@ -1863,6 +2001,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) kfree(del_list); del_list = NULL; + } + + if (!list_empty(&tmp_add_list)) { /* do all the adds now */ filter_list_len = pf->hw.aq.asq_buf_size / @@ -1870,16 +2011,19 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) add_list = kcalloc(filter_list_len, sizeof(struct i40e_aqc_add_macvlan_element_data), GFP_KERNEL); - if (!add_list) + if (!add_list) { + /* Purge element from temporary lists */ + i40e_cleanup_add_list(&tmp_add_list); + + /* Undo add filter entries from VSI MAC filter list */ + spin_lock_bh(&vsi->mac_filter_list_lock); + i40e_undo_add_filter_entries(vsi); + spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; + } - list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { - if (!f->changed) - continue; + list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) { - if (f->counter == 0) - continue; - f->changed = false; add_happened = true; cmd_flags = 0; @@ -1906,7 +2050,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) break; memset(add_list, 0, sizeof(*add_list)); } + /* Entries from tmp_add_list were cloned from MAC + * filter list, hence clean those cloned entries + */ + list_del(&f->list); + kfree(f); } + if (num_add) { ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, add_list, num_add, NULL); @@ -2158,6 +2308,9 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) is_vf = (vsi->type == I40E_VSI_SRIOV); is_netdev = !!(vsi->netdev); + /* Locked once because all functions invoked below iterates list*/ + spin_lock_bh(&vsi->mac_filter_list_lock); + if (is_netdev) { add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid, is_vf, is_netdev); @@ -2165,6 +2318,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) dev_info(&vsi->back->pdev->dev, "Could not add vlan filter %d for %pM\n", vid, vsi->netdev->dev_addr); + spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; } } @@ -2175,6 +2329,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) dev_info(&vsi->back->pdev->dev, "Could not add vlan filter %d for %pM\n", vid, f->macaddr); + spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; } } @@ -2196,6 +2351,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) dev_info(&vsi->back->pdev->dev, "Could not add filter 0 for %pM\n", vsi->netdev->dev_addr); + spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; } } @@ -2204,22 +2360,28 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */ if (vid > 0 && !vsi->info.pvid) { list_for_each_entry(f, &vsi->mac_filter_list, list) { - if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, - is_vf, is_netdev)) { - i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, - is_vf, is_netdev); - add_f = i40e_add_filter(vsi, f->macaddr, - 0, is_vf, is_netdev); - if (!add_f) { - dev_info(&vsi->back->pdev->dev, - "Could not add filter 0 for %pM\n", - f->macaddr); - return -ENOMEM; - } + if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, + is_vf, is_netdev)) + continue; + i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, + is_vf, is_netdev); + add_f = i40e_add_filter(vsi, f->macaddr, + 0, is_vf, is_netdev); + if (!add_f) { + dev_info(&vsi->back->pdev->dev, + "Could not add filter 0 for %pM\n", + f->macaddr); + spin_unlock_bh(&vsi->mac_filter_list_lock); + return -ENOMEM; } } } + /* Make sure to release before sync_vsi_filter because that + * function will lock/unlock as necessary + */ + spin_unlock_bh(&vsi->mac_filter_list_lock); + if (test_bit(__I40E_DOWN, &vsi->back->state) || test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) return 0; @@ -2244,6 +2406,9 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) is_vf = (vsi->type == I40E_VSI_SRIOV); is_netdev = !!(netdev); + /* Locked once because all functions invoked below iterates list */ + spin_lock_bh(&vsi->mac_filter_list_lock); + if (is_netdev) i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev); @@ -2274,6 +2439,7 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) dev_info(&vsi->back->pdev->dev, "Could not add filter %d for %pM\n", I40E_VLAN_ANY, netdev->dev_addr); + spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; } } @@ -2282,16 +2448,22 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) list_for_each_entry(f, &vsi->mac_filter_list, list) { i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev); add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY, - is_vf, is_netdev); + is_vf, is_netdev); if (!add_f) { dev_info(&vsi->back->pdev->dev, "Could not add filter %d for %pM\n", I40E_VLAN_ANY, f->macaddr); + spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; } } } + /* Make sure to release before sync_vsi_filter because that + * function with lock/unlock as necessary + */ + spin_unlock_bh(&vsi->mac_filter_list_lock); + if (test_bit(__I40E_DOWN, &vsi->back->state) || test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) return 0; @@ -2901,11 +3073,9 @@ static int i40e_vsi_configure(struct i40e_vsi *vsi) static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; - struct i40e_q_vector *q_vector; struct i40e_hw *hw = &pf->hw; u16 vector; int i, q; - u32 val; u32 qp; /* The interrupt indexing is offset by 1 in the PFINT_ITRn @@ -2915,7 +3085,9 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) qp = vsi->base_queue; vector = vsi->base_vector; for (i = 0; i < vsi->num_q_vectors; i++, vector++) { - q_vector = vsi->q_vectors[i]; + struct i40e_q_vector *q_vector = vsi->q_vectors[i]; + + q_vector->itr_countdown = ITR_COUNTDOWN_START; q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); q_vector->rx.latency_range = I40E_LOW_LATENCY; wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), @@ -2924,10 +3096,14 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) q_vector->tx.latency_range = I40E_LOW_LATENCY; wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.itr); + wr32(hw, I40E_PFINT_RATEN(vector - 1), + INTRL_USEC_TO_REG(vsi->int_rate_limit)); /* Linked list for the queuepairs assigned to this vector */ wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); for (q = 0; q < q_vector->num_ringpairs; q++) { + u32 val; + val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | @@ -3007,6 +3183,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) u32 val; /* set the ITR configuration */ + q_vector->itr_countdown = ITR_COUNTDOWN_START; q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); q_vector->rx.latency_range = I40E_LOW_LATENCY; wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr); @@ -3092,7 +3269,7 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data) if (!q_vector->tx.ring && !q_vector->rx.ring) return IRQ_HANDLED; - napi_schedule(&q_vector->napi); + napi_schedule_irqoff(&q_vector->napi); return IRQ_HANDLED; } @@ -3261,6 +3438,8 @@ static irqreturn_t i40e_intr(int irq, void *data) /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_q_vector *q_vector = vsi->q_vectors[0]; /* temporarily disable queue cause for NAPI processing */ u32 qval = rd32(hw, I40E_QINT_RQCTL(0)); @@ -3273,7 +3452,7 @@ static irqreturn_t i40e_intr(int irq, void *data) wr32(hw, I40E_QINT_TQCTL(0), qval); if (!test_bit(__I40E_DOWN, &pf->state)) - napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi); + napi_schedule_irqoff(&q_vector->napi); } if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { @@ -3574,14 +3753,12 @@ static void i40e_netpoll(struct net_device *netdev) if (test_bit(__I40E_DOWN, &vsi->state)) return; - pf->flags |= I40E_FLAG_IN_NETPOLL; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { for (i = 0; i < vsi->num_q_vectors; i++) i40e_msix_clean_rings(0, vsi->q_vectors[i]); } else { i40e_intr(pf->pdev->irq, netdev); } - pf->flags &= ~I40E_FLAG_IN_NETPOLL; } #endif @@ -4844,8 +5021,8 @@ out: */ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) { - char speed[SPEED_SIZE] = "Unknown"; - char fc[FC_SIZE] = "RX/TX"; + char *speed = "Unknown"; + char *fc = "Unknown"; if (vsi->current_isup == isup) return; @@ -4866,19 +5043,19 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) switch (vsi->back->hw.phy.link_info.link_speed) { case I40E_LINK_SPEED_40GB: - strlcpy(speed, "40 Gbps", SPEED_SIZE); + speed = "40 G"; break; case I40E_LINK_SPEED_20GB: - strncpy(speed, "20 Gbps", SPEED_SIZE); + speed = "20 G"; break; case I40E_LINK_SPEED_10GB: - strlcpy(speed, "10 Gbps", SPEED_SIZE); + speed = "10 G"; break; case I40E_LINK_SPEED_1GB: - strlcpy(speed, "1000 Mbps", SPEED_SIZE); + speed = "1000 M"; break; case I40E_LINK_SPEED_100MB: - strncpy(speed, "100 Mbps", SPEED_SIZE); + speed = "100 M"; break; default: break; @@ -4886,20 +5063,20 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) switch (vsi->back->hw.fc.current_mode) { case I40E_FC_FULL: - strlcpy(fc, "RX/TX", FC_SIZE); + fc = "RX/TX"; break; case I40E_FC_TX_PAUSE: - strlcpy(fc, "TX", FC_SIZE); + fc = "TX"; break; case I40E_FC_RX_PAUSE: - strlcpy(fc, "RX", FC_SIZE); + fc = "RX"; break; default: - strlcpy(fc, "None", FC_SIZE); + fc = "None"; break; } - netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n", + netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n", speed, fc); } @@ -6220,8 +6397,9 @@ static void i40e_config_bridge_mode(struct i40e_veb *veb) { struct i40e_pf *pf = veb->pf; - dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", - veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + if (pf->hw.debug_mask & I40E_DEBUG_LAN) + dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", + veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); if (veb->bridge_mode & BRIDGE_MODE_VEPA) i40e_disable_pf_switch_lb(pf); else @@ -6353,12 +6531,6 @@ static int i40e_get_capabilities(struct i40e_pf *pf) } } while (err); - if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) || - (pf->hw.aq.fw_maj_ver < 2)) { - pf->hw.func_caps.num_msix_vectors++; - pf->hw.func_caps.num_msix_vectors_vf++; - } - if (pf->hw.debug_mask & I40E_DEBUG_USER) dev_info(&pf->pdev->dev, "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", @@ -6593,9 +6765,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) /* make sure our flow control settings are restored */ ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); if (ret) - dev_info(&pf->pdev->dev, "set fc fail, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* Rebuild the VSIs and VEBs that existed before reset. * They are still in our local switch element arrays, so only @@ -6665,6 +6837,15 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) if (pf->flags & I40E_FLAG_MSIX_ENABLED) ret = i40e_setup_misc_vector(pf); + /* Add a filter to drop all Flow control frames from any VSI from being + * transmitted. By doing so we stop a malicious VF from sending out + * PAUSE or PFC frames and potentially controlling traffic for other + * PF/VF VSIs. + * The FW can still send Flow control frames if enabled. + */ + i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, + pf->main_vsi_seid); + /* restart the VSIs that were rebuilt and running before the reset */ i40e_pf_unquiesce_all_vsi(pf); @@ -7047,6 +7228,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) vsi->idx = vsi_idx; vsi->rx_itr_setting = pf->rx_itr_default; vsi->tx_itr_setting = pf->tx_itr_default; + vsi->int_rate_limit = 0; vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? pf->rss_table_size : 64; vsi->netdev_registered = false; @@ -7065,6 +7247,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) /* Setup default MSIX irq handler for VSI */ i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); + /* Initialize VSI lock */ + spin_lock_init(&vsi->mac_filter_list_lock); pf->vsi[vsi_idx] = vsi; ret = vsi_idx; goto unlock_pf; @@ -7955,12 +8139,12 @@ static int i40e_sw_init(struct i40e_pf *pf) (pf->hw.func_caps.fd_filters_best_effort > 0)) { pf->flags |= I40E_FLAG_FD_ATR_ENABLED; pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; - if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { - pf->flags |= I40E_FLAG_FD_SB_ENABLED; - } else { + if (pf->flags & I40E_FLAG_MFP_ENABLED && + pf->hw.num_partitions > 1) dev_info(&pf->pdev->dev, "Flow Director Sideband mode Disabled in MFP mode\n"); - } + else + pf->flags |= I40E_FLAG_FD_SB_ENABLED; pf->fdir_pf_filter_count = pf->hw.func_caps.fd_filters_guaranteed; pf->hw.fdir_shared_filter_count = @@ -7970,6 +8154,7 @@ static int i40e_sw_init(struct i40e_pf *pf) if (pf->hw.func_caps.vmdq) { pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; pf->flags |= I40E_FLAG_VMDQ_ENABLED; + pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf); } #ifdef I40E_FCOE @@ -8330,13 +8515,15 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, * @seq: RTNL message seq # * @dev: the netdev being configured * @filter_mask: unused + * @nlflags: netlink flags passed in * * Return the mode in which the hardware bridge is operating in * i.e VEB or VEPA. **/ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, - u32 filter_mask, int nlflags) + u32 __always_unused filter_mask, + int nlflags) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; @@ -8365,7 +8552,7 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, /** * i40e_features_check - Validate encapsulated packet conforms to limits * @skb: skb buff - * @netdev: This physical port's netdev + * @dev: This physical port's netdev * @features: Offload features that the stack believes apply **/ static netdev_features_t i40e_features_check(struct sk_buff *skb, @@ -8446,6 +8633,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_GRE | NETIF_F_TSO; netdev->features = NETIF_F_SG | @@ -8453,6 +8641,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) NETIF_F_SCTP_CSUM | NETIF_F_HIGHDMA | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_GRE | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER | @@ -8478,17 +8667,26 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) * default a MAC-VLAN filter that accepts any tagged packet * which must be replaced by a normal filter. */ - if (!i40e_rm_default_mac_filter(vsi, mac_addr)) + if (!i40e_rm_default_mac_filter(vsi, mac_addr)) { + spin_lock_bh(&vsi->mac_filter_list_lock); i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true); + spin_unlock_bh(&vsi->mac_filter_list_lock); + } } else { /* relate the VSI_VMDQ name to the VSI_MAIN name */ snprintf(netdev->name, IFNAMSIZ, "%sv%%d", pf->vsi[pf->lan_vsi]->netdev->name); random_ether_addr(mac_addr); + + spin_lock_bh(&vsi->mac_filter_list_lock); i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); } + + spin_lock_bh(&vsi->mac_filter_list_lock); i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); ether_addr_copy(netdev->dev_addr, mac_addr); ether_addr_copy(netdev->perm_addr, mac_addr); @@ -8544,12 +8742,22 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) return 1; veb = pf->veb[vsi->veb_idx]; + if (!veb) { + dev_info(&pf->pdev->dev, + "There is no veb associated with the bridge\n"); + return -ENOENT; + } + /* Uplink is a bridge in VEPA mode */ - if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA)) + if (veb->bridge_mode & BRIDGE_MODE_VEPA) { return 0; + } else { + /* Uplink is a bridge in VEB mode */ + return 1; + } - /* Uplink is a bridge in VEB mode */ - return 1; + /* VEPA is now default bridge, so return 0 */ + return 0; } /** @@ -8562,10 +8770,13 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) static int i40e_add_vsi(struct i40e_vsi *vsi) { int ret = -ENODEV; - struct i40e_mac_filter *f, *ftmp; + u8 laa_macaddr[ETH_ALEN]; + bool found_laa_mac_filter = false; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_vsi_context ctxt; + struct i40e_mac_filter *f, *ftmp; + u8 enabled_tc = 0x1; /* TC0 enabled */ int f_count = 0; @@ -8737,32 +8948,41 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) vsi->id = ctxt.vsi_number; } + spin_lock_bh(&vsi->mac_filter_list_lock); /* If macvlan filters already exist, force them to get loaded */ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { f->changed = true; f_count++; + /* Expected to have only one MAC filter entry for LAA in list */ if (f->is_laa && vsi->type == I40E_VSI_MAIN) { - struct i40e_aqc_remove_macvlan_element_data element; + ether_addr_copy(laa_macaddr, f->macaddr); + found_laa_mac_filter = true; + } + } + spin_unlock_bh(&vsi->mac_filter_list_lock); - memset(&element, 0, sizeof(element)); - ether_addr_copy(element.mac_addr, f->macaddr); - element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; - ret = i40e_aq_remove_macvlan(hw, vsi->seid, - &element, 1, NULL); - if (ret) { - /* some older FW has a different default */ - element.flags |= - I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; - i40e_aq_remove_macvlan(hw, vsi->seid, - &element, 1, NULL); - } + if (found_laa_mac_filter) { + struct i40e_aqc_remove_macvlan_element_data element; - i40e_aq_mac_address_write(hw, - I40E_AQC_WRITE_TYPE_LAA_WOL, - f->macaddr, NULL); + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, laa_macaddr); + element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; + ret = i40e_aq_remove_macvlan(hw, vsi->seid, + &element, 1, NULL); + if (ret) { + /* some older FW has a different default */ + element.flags |= + I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; + i40e_aq_remove_macvlan(hw, vsi->seid, + &element, 1, NULL); } + + i40e_aq_mac_address_write(hw, + I40E_AQC_WRITE_TYPE_LAA_WOL, + laa_macaddr, NULL); } + if (f_count) { vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; pf->flags |= I40E_FLAG_FILTER_SYNC; @@ -8825,9 +9045,12 @@ int i40e_vsi_release(struct i40e_vsi *vsi) i40e_vsi_disable_irq(vsi); } + spin_lock_bh(&vsi->mac_filter_list_lock); list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) i40e_del_filter(vsi, f->macaddr, f->vlan, f->is_vf, f->is_netdev); + spin_unlock_bh(&vsi->mac_filter_list_lock); + i40e_sync_vsi_filters(vsi, false); i40e_vsi_delete(vsi); @@ -9727,7 +9950,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) i40e_config_rss(pf); /* fill in link information and enable LSE reporting */ - i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); + i40e_update_link_info(&pf->hw); i40e_link_event(pf); /* Initialize user-specific link properties */ @@ -9845,8 +10068,14 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) } pf->queues_left = queues_left; + dev_dbg(&pf->pdev->dev, + "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n", + pf->hw.func_caps.num_tx_qp, + !!(pf->flags & I40E_FLAG_FD_SB_ENABLED), + pf->num_lan_qps, pf->rss_size, pf->num_req_vfs, pf->num_vf_qps, + pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left); #ifdef I40E_FCOE - dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps); + dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps); #endif } @@ -9923,6 +10152,10 @@ static void i40e_print_features(struct i40e_pf *pf) if (pf->flags & I40E_FLAG_FCOE_ENABLED) buf += sprintf(buf, "FCOE "); #endif + if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) + buf += sprintf(buf, "VEB "); + else + buf += sprintf(buf, "VEPA "); BUG_ON(buf > (string + INFO_STRING_LEN)); dev_info(&pf->pdev->dev, "%s\n", string); @@ -9948,9 +10181,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) static u16 pfs_found; u16 wol_nvm_bits; u16 link_status; - int err = 0; + int err; u32 len; u32 i; + u8 set_fc_aq_fail; err = pci_enable_device_mem(pdev); if (err) @@ -10062,7 +10296,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pf->hw.fc.requested_mode = I40E_FC_NONE; err = i40e_init_adminq(hw); - dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw)); + + /* provide nvm, fw, api versions */ + dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n", + hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, + hw->aq.api_maj_ver, hw->aq.api_min_ver, + i40e_nvm_version_str(hw)); + if (err) { dev_info(&pdev->dev, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); @@ -10209,6 +10449,25 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); goto err_vsis; } + + /* Make sure flow control is set according to current settings */ + err = i40e_set_fc(hw, &set_fc_aq_fail, true); + if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET) + dev_dbg(&pf->pdev->dev, + "Set fc with err %s aq_err %s on get_phy_cap\n", + i40e_stat_str(hw, err), + i40e_aq_str(hw, hw->aq.asq_last_status)); + if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET) + dev_dbg(&pf->pdev->dev, + "Set fc with err %s aq_err %s on set_phy_config\n", + i40e_stat_str(hw, err), + i40e_aq_str(hw, hw->aq.asq_last_status)); + if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE) + dev_dbg(&pf->pdev->dev, + "Set fc with err %s aq_err %s on get_link_info\n", + i40e_stat_str(hw, err), + i40e_aq_str(hw, hw->aq.asq_last_status)); + /* if FDIR VSI was set up, start it now */ for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { @@ -10299,37 +10558,82 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) i40e_fcoe_vsi_setup(pf); #endif - /* Get the negotiated link width and speed from PCI config space */ - pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status); +#define PCI_SPEED_SIZE 8 +#define PCI_WIDTH_SIZE 8 + /* Devices on the IOSF bus do not have this information + * and will report PCI Gen 1 x 1 by default so don't bother + * checking them. + */ + if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) { + char speed[PCI_SPEED_SIZE] = "Unknown"; + char width[PCI_WIDTH_SIZE] = "Unknown"; - i40e_set_pci_config_data(hw, link_status); + /* Get the negotiated link width and speed from PCI config + * space + */ + pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, + &link_status); + + i40e_set_pci_config_data(hw, link_status); + + switch (hw->bus.speed) { + case i40e_bus_speed_8000: + strncpy(speed, "8.0", PCI_SPEED_SIZE); break; + case i40e_bus_speed_5000: + strncpy(speed, "5.0", PCI_SPEED_SIZE); break; + case i40e_bus_speed_2500: + strncpy(speed, "2.5", PCI_SPEED_SIZE); break; + default: + break; + } + switch (hw->bus.width) { + case i40e_bus_width_pcie_x8: + strncpy(width, "8", PCI_WIDTH_SIZE); break; + case i40e_bus_width_pcie_x4: + strncpy(width, "4", PCI_WIDTH_SIZE); break; + case i40e_bus_width_pcie_x2: + strncpy(width, "2", PCI_WIDTH_SIZE); break; + case i40e_bus_width_pcie_x1: + strncpy(width, "1", PCI_WIDTH_SIZE); break; + default: + break; + } - dev_info(&pdev->dev, "PCI-Express: %s %s\n", - (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" : - hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" : - hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" : - "Unknown"), - (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" : - hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" : - hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" : - hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" : - "Unknown")); + dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n", + speed, width); - if (hw->bus.width < i40e_bus_width_pcie_x8 || - hw->bus.speed < i40e_bus_speed_8000) { - dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); - dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); + if (hw->bus.width < i40e_bus_width_pcie_x8 || + hw->bus.speed < i40e_bus_speed_8000) { + dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); + dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); + } } /* get the requested speeds from the fw */ err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); if (err) - dev_info(&pf->pdev->dev, - "get phy capabilities failed, err %s aq_err %s, advertised speed settings may not be correct\n", - i40e_stat_str(&pf->hw, err), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n", + i40e_stat_str(&pf->hw, err), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); pf->hw.phy.link_info.requested_speeds = abilities.link_speed; + /* get the supported phy types from the fw */ + err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL); + if (err) + dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n", + i40e_stat_str(&pf->hw, err), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type); + + /* Add a filter to drop all Flow control frames from any VSI from being + * transmitted. By doing so we stop a malicious VF from sending out + * PAUSE or PFC frames and potentially controlling traffic for other + * PF/VF VSIs. + * The FW can still send Flow control frames if enabled. + */ + i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, + pf->main_vsi_seid); + /* print a string summarizing features */ i40e_print_features(pf); @@ -10377,6 +10681,7 @@ err_dma: static void i40e_remove(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_hw *hw = &pf->hw; i40e_status ret_code; int i; @@ -10384,6 +10689,10 @@ static void i40e_remove(struct pci_dev *pdev) i40e_ptp_stop(pf); + /* Disable RSS in hw */ + wr32(hw, I40E_PFQF_HENA(0), 0); + wr32(hw, I40E_PFQF_HENA(1), 0); + /* no more scheduling of any task */ set_bit(__I40E_DOWN, &pf->state); del_timer_sync(&pf->service_timer); diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 2142e1004a2f..6100cdd9ad13 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -290,9 +290,18 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data) { - if (hw->mac.type == I40E_MAC_X722) - return i40e_read_nvm_word_aq(hw, offset, data); - return i40e_read_nvm_word_srctl(hw, offset, data); + enum i40e_status_code ret_code = 0; + + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (!ret_code) { + ret_code = i40e_read_nvm_word_aq(hw, offset, data); + i40e_release_nvm(hw); + } + } else { + ret_code = i40e_read_nvm_word_srctl(hw, offset, data); + } + return ret_code; } /** @@ -397,9 +406,19 @@ read_nvm_buffer_aq_exit: i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data) { - if (hw->mac.type == I40E_MAC_X722) - return i40e_read_nvm_buffer_aq(hw, offset, words, data); - return i40e_read_nvm_buffer_srctl(hw, offset, words, data); + enum i40e_status_code ret_code = 0; + + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (!ret_code) { + ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, + data); + i40e_release_nvm(hw); + } + } else { + ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); + } + return ret_code; } /** @@ -465,7 +484,7 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum) { - i40e_status ret_code = 0; + i40e_status ret_code; struct i40e_virt_mem vmem; u16 pcie_alt_module = 0; u16 checksum_local = 0; @@ -545,15 +564,16 @@ i40e_calc_nvm_checksum_exit: **/ i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw) { - i40e_status ret_code = 0; + i40e_status ret_code; u16 checksum; __le16 le_sum; ret_code = i40e_calc_nvm_checksum(hw, &checksum); - le_sum = cpu_to_le16(checksum); - if (!ret_code) + if (!ret_code) { + le_sum = cpu_to_le16(checksum); ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD, 1, &le_sum, true); + } return ret_code; } @@ -632,7 +652,7 @@ static inline u8 i40e_nvmupd_get_transaction(u32 val) return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT); } -static char *i40e_nvm_update_state_str[] = { +static const char * const i40e_nvm_update_state_str[] = { "I40E_NVMUPD_INVALID", "I40E_NVMUPD_READ_CON", "I40E_NVMUPD_READ_SNT", diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index e51e1567837c..bb9d583e5416 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -58,8 +58,8 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void i40e_idle_aq(struct i40e_hw *hw); bool i40e_check_asq_alive(struct i40e_hw *hw); i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); -char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); -char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err); +const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); +const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err); i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, bool pf_lut, u8 *lut, u16 lut_size); @@ -259,6 +259,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw); void i40e_clear_hw(struct i40e_hw *hw); void i40e_clear_pxe_mode(struct i40e_hw *hw); i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up); +i40e_status i40e_update_link_info(struct i40e_hw *hw); i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr); i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, u32 *max_bw, u32 *min_bw, bool *min_valid, @@ -321,4 +322,6 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, void *buff, u16 *ret_buff_size, u8 *ret_next_table, u32 *ret_next_index, struct i40e_asq_cmd_details *cmd_details); +void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, + u16 vsi_seid); #endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 552c84e2e05d..565ca7c835bc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -674,8 +674,8 @@ void i40e_ptp_init(struct i40e_pf *pf) struct timespec64 ts; u32 regval; - dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__, - netdev->name); + if (pf->hw.debug_mask & I40E_DEBUG_LAN) + dev_info(&pf->pdev->dev, "PHC enabled\n"); pf->flags |= I40E_FLAG_PTP; /* Ensure the clocks are running. */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 01e5ece8046d..635b3ac17877 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -815,6 +815,8 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) * i40e_set_new_dynamic_itr - Find new ITR level * @rc: structure containing ring performance data * + * Returns true if ITR changed, false if not + * * Stores a new ITR value based on packets and byte counts during * the last interrupt. The advantage of per interrupt computation * is faster updates and more accurate ITR for the current traffic @@ -823,21 +825,32 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) * testing data as well as attempting to minimize response time * while increasing bulk throughput. **/ -static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) +static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) { enum i40e_latency_range new_latency_range = rc->latency_range; + struct i40e_q_vector *qv = rc->ring->q_vector; u32 new_itr = rc->itr; int bytes_per_int; + int usecs; if (rc->total_packets == 0 || !rc->itr) - return; + return false; /* simple throttlerate management - * 0-10MB/s lowest (100000 ints/s) + * 0-10MB/s lowest (50000 ints/s) * 10-20MB/s low (20000 ints/s) - * 20-1249MB/s bulk (8000 ints/s) + * 20-1249MB/s bulk (18000 ints/s) + * > 40000 Rx packets per second (8000 ints/s) + * + * The math works out because the divisor is in 10^(-6) which + * turns the bytes/us input value into MB/s values, but + * make sure to use usecs, as the register values written + * are in 2 usec increments in the ITR registers, and make sure + * to use the smoothed values that the countdown timer gives us. */ - bytes_per_int = rc->total_bytes / rc->itr; + usecs = (rc->itr << 1) * ITR_COUNTDOWN_START; + bytes_per_int = rc->total_bytes / usecs; + switch (new_latency_range) { case I40E_LOWEST_LATENCY: if (bytes_per_int > 10) @@ -850,35 +863,52 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) new_latency_range = I40E_LOWEST_LATENCY; break; case I40E_BULK_LATENCY: - if (bytes_per_int <= 20) - new_latency_range = I40E_LOW_LATENCY; - break; + case I40E_ULTRA_LATENCY: default: if (bytes_per_int <= 20) new_latency_range = I40E_LOW_LATENCY; break; } + + /* this is to adjust RX more aggressively when streaming small + * packets. The value of 40000 was picked as it is just beyond + * what the hardware can receive per second if in low latency + * mode. + */ +#define RX_ULTRA_PACKET_RATE 40000 + + if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) && + (&qv->rx == rc)) + new_latency_range = I40E_ULTRA_LATENCY; + rc->latency_range = new_latency_range; switch (new_latency_range) { case I40E_LOWEST_LATENCY: - new_itr = I40E_ITR_100K; + new_itr = I40E_ITR_50K; break; case I40E_LOW_LATENCY: new_itr = I40E_ITR_20K; break; case I40E_BULK_LATENCY: + new_itr = I40E_ITR_18K; + break; + case I40E_ULTRA_LATENCY: new_itr = I40E_ITR_8K; break; default: break; } - if (new_itr != rc->itr) - rc->itr = new_itr; - rc->total_bytes = 0; rc->total_packets = 0; + + if (new_itr != rc->itr) { + rc->itr = new_itr; + return true; + } + + return false; } /** @@ -1268,16 +1298,11 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) { struct i40e_q_vector *q_vector = rx_ring->q_vector; - struct i40e_vsi *vsi = rx_ring->vsi; - u64 flags = vsi->back->flags; if (vlan_tag & VLAN_VID_MASK) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - if (flags & I40E_FLAG_IN_NETPOLL) - netif_rx(skb); - else - napi_gro_receive(&q_vector->napi, skb); + napi_gro_receive(&q_vector->napi, skb); } /** @@ -1752,6 +1777,21 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) return total_rx_packets; } +static u32 i40e_buildreg_itr(const int type, const u16 itr) +{ + u32 val; + + val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | + (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT); + + return val; +} + +/* a small macro to shorten up some long lines */ +#define INTREG I40E_PFINT_DYN_CTLN + /** * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt * @vsi: the VSI we care about @@ -1762,54 +1802,69 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { struct i40e_hw *hw = &vsi->back->hw; - u16 old_itr; + bool rx = false, tx = false; + u32 rxval, txval; int vector; - u32 val; vector = (q_vector->v_idx + vsi->base_vector); + + /* avoid dynamic calculation if in countdown mode OR if + * all dynamic is disabled + */ + rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0); + + if (q_vector->itr_countdown > 0 || + (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) && + !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) { + goto enable_int; + } + if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) { - old_itr = q_vector->rx.itr; - i40e_set_new_dynamic_itr(&q_vector->rx); - if (old_itr != q_vector->rx.itr) { - val = I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | - (I40E_RX_ITR << - I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | - (q_vector->rx.itr << - I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT); - } else { - val = I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | - (I40E_ITR_NONE << - I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); - } - if (!test_bit(__I40E_DOWN, &vsi->state)) - wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); - } else { - i40e_irq_dynamic_enable(vsi, q_vector->v_idx); + rx = i40e_set_new_dynamic_itr(&q_vector->rx); + rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr); } + if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) { - old_itr = q_vector->tx.itr; - i40e_set_new_dynamic_itr(&q_vector->tx); - if (old_itr != q_vector->tx.itr) { - val = I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | - (I40E_TX_ITR << - I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | - (q_vector->tx.itr << - I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT); - } else { - val = I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | - (I40E_ITR_NONE << - I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); - } - if (!test_bit(__I40E_DOWN, &vsi->state)) - wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->v_idx + - vsi->base_vector - 1), val); - } else { - i40e_irq_dynamic_enable(vsi, q_vector->v_idx); + tx = i40e_set_new_dynamic_itr(&q_vector->tx); + txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr); + } + + if (rx || tx) { + /* get the higher of the two ITR adjustments and + * use the same value for both ITR registers + * when in adaptive mode (Rx and/or Tx) + */ + u16 itr = max(q_vector->tx.itr, q_vector->rx.itr); + + q_vector->tx.itr = q_vector->rx.itr = itr; + txval = i40e_buildreg_itr(I40E_TX_ITR, itr); + tx = true; + rxval = i40e_buildreg_itr(I40E_RX_ITR, itr); + rx = true; } + + /* only need to enable the interrupt once, but need + * to possibly update both ITR values + */ + if (rx) { + /* set the INTENA_MSK_MASK so that this first write + * won't actually enable the interrupt, instead just + * updating the ITR (it's bit 31 PF and VF) + */ + rxval |= BIT(31); + /* don't check _DOWN because interrupt isn't being enabled */ + wr32(hw, INTREG(vector - 1), rxval); + } + +enable_int: + if (!test_bit(__I40E_DOWN, &vsi->state)) + wr32(hw, INTREG(vector - 1), txval); + + if (q_vector->itr_countdown) + q_vector->itr_countdown--; + else + q_vector->itr_countdown = ITR_COUNTDOWN_START; + } /** @@ -1830,7 +1885,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) bool clean_complete = true; bool arm_wb = false; int budget_per_ring; - int cleaned; + int work_done = 0; if (test_bit(__I40E_DOWN, &vsi->state)) { napi_complete(napi); @@ -1846,22 +1901,31 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) ring->arm_wb = false; } + /* Handle case where we are called by netpoll with a budget of 0 */ + if (budget <= 0) + goto tx_only; + /* We attempt to distribute budget to each Rx queue fairly, but don't * allow the budget to go below 1 because that would exit polling early. */ budget_per_ring = max(budget/q_vector->num_ringpairs, 1); i40e_for_each_ring(ring, q_vector->rx) { + int cleaned; + if (ring_is_ps_enabled(ring)) cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring); else cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); + + work_done += cleaned; /* if we didn't clean as many as budgeted, we must be done */ clean_complete &= (budget_per_ring != cleaned); } /* If work not completed, return budget and polling will return */ if (!clean_complete) { +tx_only: if (arm_wb) i40e_force_wb(vsi, q_vector); return budget; @@ -1871,7 +1935,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) q_vector->arm_wb_state = false; /* Work is done so exit the polling mode and re-enable the interrupt */ - napi_complete(napi); + napi_complete_done(napi, work_done); if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { i40e_update_enable_itr(vsi, q_vector); } else { /* Legacy mode */ @@ -2123,6 +2187,7 @@ out: * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending * @hdr_len: ptr to the size of the packet header + * @cd_type_cmd_tso_mss: ptr to u64 object * @cd_tunneling: ptr to context descriptor bits * * Returns 0 if no TSO can happen, 1 if tso is going, or error @@ -2182,6 +2247,7 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending * @tx_flags: the collected send information + * @cd_type_cmd_tso_mss: ptr to u64 object * * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen **/ @@ -2224,6 +2290,7 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, * @tx_flags: pointer to Tx flags currently set * @td_cmd: Tx descriptor command bits to set * @td_offset: Tx descriptor header offsets to set + * @tx_ring: Tx descriptor ring * @cd_tunneling: ptr to context desc bits **/ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 75cecfa6e338..6779fb771d6a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -32,11 +32,14 @@ #define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ #define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */ #define I40E_ITR_100K 0x0005 +#define I40E_ITR_50K 0x000A #define I40E_ITR_20K 0x0019 +#define I40E_ITR_18K 0x001B #define I40E_ITR_8K 0x003E #define I40E_ITR_4K 0x007A -#define I40E_ITR_RX_DEF I40E_ITR_8K -#define I40E_ITR_TX_DEF I40E_ITR_4K +#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ +#define I40E_ITR_RX_DEF I40E_ITR_20K +#define I40E_ITR_TX_DEF I40E_ITR_20K #define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ #define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */ #define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */ @@ -44,6 +47,15 @@ #define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1) #define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC)) #define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1) +/* 0x40 is the enable bit for interrupt rate limiting, and must be set if + * the value of the rate limit is non-zero + */ +#define INTRL_ENA BIT(6) +#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2) +#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0) +#define I40E_INTRL_8K 125 /* 8000 ints/sec */ +#define I40E_INTRL_62K 16 /* 62500 ints/sec */ +#define I40E_INTRL_83K 12 /* 83333 ints/sec */ #define I40E_QUEUE_END_OF_LIST 0x7FF @@ -286,6 +298,7 @@ enum i40e_latency_range { I40E_LOWEST_LATENCY = 0, I40E_LOW_LATENCY = 1, I40E_BULK_LATENCY = 2, + I40E_ULTRA_LATENCY = 3, }; struct i40e_ring_container { diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index d1ec5a4326cf..dd2da356d9a1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -33,31 +33,7 @@ #include "i40e_adminq.h" #include "i40e_hmc.h" #include "i40e_lan_hmc.h" - -/* Device IDs */ -#define I40E_DEV_ID_SFP_XL710 0x1572 -#define I40E_DEV_ID_QEMU 0x1574 -#define I40E_DEV_ID_KX_A 0x157F -#define I40E_DEV_ID_KX_B 0x1580 -#define I40E_DEV_ID_KX_C 0x1581 -#define I40E_DEV_ID_QSFP_A 0x1583 -#define I40E_DEV_ID_QSFP_B 0x1584 -#define I40E_DEV_ID_QSFP_C 0x1585 -#define I40E_DEV_ID_10G_BASE_T 0x1586 -#define I40E_DEV_ID_20G_KR2 0x1587 -#define I40E_DEV_ID_20G_KR2_A 0x1588 -#define I40E_DEV_ID_10G_BASE_T4 0x1589 -#define I40E_DEV_ID_VF 0x154C -#define I40E_DEV_ID_VF_HV 0x1571 -#define I40E_DEV_ID_SFP_X722 0x37D0 -#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 -#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 -#define I40E_DEV_ID_X722_VF 0x37CD -#define I40E_DEV_ID_X722_VF_HV 0x37D9 - -#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ - (d) == I40E_DEV_ID_QSFP_B || \ - (d) == I40E_DEV_ID_QSFP_C) +#include "i40e_devids.h" /* I40E_MASK is a macro used on 32 bit registers */ #define I40E_MASK(mask, shift) (mask << shift) @@ -191,16 +167,65 @@ struct i40e_link_status { bool crc_enable; u8 pacing; u8 requested_speeds; + u8 module_type[3]; + /* 1st byte: module identifier */ +#define I40E_MODULE_TYPE_SFP 0x03 +#define I40E_MODULE_TYPE_QSFP 0x0D + /* 2nd byte: ethernet compliance codes for 10/40G */ +#define I40E_MODULE_TYPE_40G_ACTIVE 0x01 +#define I40E_MODULE_TYPE_40G_LR4 0x02 +#define I40E_MODULE_TYPE_40G_SR4 0x04 +#define I40E_MODULE_TYPE_40G_CR4 0x08 +#define I40E_MODULE_TYPE_10G_BASE_SR 0x10 +#define I40E_MODULE_TYPE_10G_BASE_LR 0x20 +#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40 +#define I40E_MODULE_TYPE_10G_BASE_ER 0x80 + /* 3rd byte: ethernet compliance codes for 1G */ +#define I40E_MODULE_TYPE_1000BASE_SX 0x01 +#define I40E_MODULE_TYPE_1000BASE_LX 0x02 +#define I40E_MODULE_TYPE_1000BASE_CX 0x04 +#define I40E_MODULE_TYPE_1000BASE_T 0x08 +}; + +enum i40e_aq_capabilities_phy_type { + I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII), + I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX), + I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4), + I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR), + I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4), + I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI), + I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI), + I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI), + I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI), + I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI), + I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU), + I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU), + I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC), + I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC), + I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX), + I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T), + I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T), + I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR), + I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR), + I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU), + I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1), + I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4), + I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4), + I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4), + I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX), + I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX), + I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL = + BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL), + I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2) }; struct i40e_phy_info { struct i40e_link_status link_info; struct i40e_link_status link_info_old; - u32 autoneg_advertised; - u32 phy_id; - u32 module_type; bool get_link_info; enum i40e_media_type media_type; + /* all the phy types the NVM is capable of */ + enum i40e_aq_capabilities_phy_type phy_types; }; #define I40E_HW_CAP_MAX_GPIO 30 @@ -289,6 +314,7 @@ struct i40e_nvm_info { bool blank_nvm_mode; /* is NVM empty (no FW present)*/ u16 version; /* NVM package version */ u32 eetrack; /* NVM data version */ + u32 oem_ver; /* OEM version info */ }; /* definitions used in NVM update support */ @@ -417,6 +443,8 @@ struct i40e_fc_info { #define I40E_APP_PROTOID_FIP 0x8914 #define I40E_APP_SEL_ETHTYPE 0x1 #define I40E_APP_SEL_TCPIP 0x2 +#define I40E_CEE_APP_SEL_ETHTYPE 0x0 +#define I40E_CEE_APP_SEL_TCPIP 0x1 /* CEE or IEEE 802.1Qaz ETS Configuration data */ struct i40e_dcb_ets_config { @@ -447,6 +475,8 @@ struct i40e_dcbx_config { u8 dcbx_mode; #define I40E_DCBX_MODE_CEE 0x1 #define I40E_DCBX_MODE_IEEE 0x2 + u8 app_mode; +#define I40E_DCBX_APPS_NON_WILLING 0x1 u32 numapps; u32 tlv_status; /* CEE mode TLV status */ struct i40e_dcb_ets_config etscfg; @@ -514,6 +544,9 @@ struct i40e_hw { struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */ struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */ +#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0) + u64 flags; + /* debug mask */ u32 debug_mask; char err_str[16]; @@ -1035,8 +1068,8 @@ enum i40e_filter_program_desc_fd_status { }; #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 -#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \ - BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) +#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) #define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 #define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ @@ -1204,6 +1237,8 @@ struct i40e_hw_port_stats { #define I40E_SR_EMP_MODULE_PTR 0x0F #define I40E_SR_PBA_FLAGS 0x15 #define I40E_SR_PBA_BLOCK_PTR 0x16 +#define I40E_SR_BOOT_CONFIG_PTR 0x17 +#define I40E_NVM_OEM_VER_OFF 0x83 #define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 #define I40E_SR_NVM_WAKE_ON_LAN 0x19 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h index 95d0f8c5d484..ae879826084b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h @@ -150,6 +150,7 @@ struct i40e_virtchnl_vsi_resource { #define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 +#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 #define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index ee747dc5d617..44462b40f2d7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -547,6 +547,8 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) */ if (vf->port_vlan_id) i40e_vsi_add_pvid(vsi, vf->port_vlan_id); + + spin_lock_bh(&vsi->mac_filter_list_lock); f = i40e_add_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id ? vf->port_vlan_id : -1, true, false); @@ -559,6 +561,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) if (!f) dev_info(&pf->pdev->dev, "Could not allocate VF broadcast filter\n"); + spin_unlock_bh(&vsi->mac_filter_list_lock); } /* program mac filter */ @@ -703,6 +706,7 @@ static void i40e_free_vf_res(struct i40e_vf *vf) */ vf->num_queue_pairs = 0; vf->vf_states = 0; + clear_bit(I40E_VF_STAT_INIT, &vf->vf_states); } /** @@ -841,11 +845,11 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) complete_reset: /* reallocate VF resources to reset the VSI state */ i40e_free_vf_res(vf); - i40e_alloc_vf_res(vf); - i40e_enable_vf_mappings(vf); - set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); - clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); - + if (!i40e_alloc_vf_res(vf)) { + i40e_enable_vf_mappings(vf); + set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); + clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); + } /* tell the VF the reset is done */ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); i40e_flush(hw); @@ -940,6 +944,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) if (pci_num_vf(pf->pdev) != num_alloc_vfs) { ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); if (ret) { + pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; pf->num_alloc_vfs = 0; goto err_iov; } @@ -964,8 +969,6 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) /* VF resources get allocated during reset */ i40e_reset_vf(&vfs[i], false); - /* enable VF vplan_qtable mappings */ - i40e_enable_vf_mappings(&vfs[i]); } pf->num_alloc_vfs = num_alloc_vfs; @@ -1103,6 +1106,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, } } else { vf->num_valid_msgs++; + /* reset the invalid counter, if a valid message is received. */ + vf->num_invalid_msgs = 0; } aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, @@ -1204,6 +1209,10 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) } else { vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG; } + + if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) + vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING; + vfres->num_vsis = num_vsis; vfres->num_queue_pairs = vf->num_queue_pairs; vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; @@ -1593,6 +1602,11 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) } vsi = pf->vsi[vf->lan_vsi_idx]; + /* Lock once, because all function inside for loop accesses VSI's + * MAC filter list which needs to be protected using same lock. + */ + spin_lock_bh(&vsi->mac_filter_list_lock); + /* add new addresses to the list */ for (i = 0; i < al->num_elements; i++) { struct i40e_mac_filter *f; @@ -1611,9 +1625,11 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) dev_err(&pf->pdev->dev, "Unable to add VF MAC filter\n"); ret = I40E_ERR_PARAM; + spin_unlock_bh(&vsi->mac_filter_list_lock); goto error_param; } } + spin_unlock_bh(&vsi->mac_filter_list_lock); /* program the updated filter list */ if (i40e_sync_vsi_filters(vsi, false)) @@ -1661,10 +1677,12 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) } vsi = pf->vsi[vf->lan_vsi_idx]; + spin_lock_bh(&vsi->mac_filter_list_lock); /* delete addresses from the list */ for (i = 0; i < al->num_elements; i++) i40e_del_filter(vsi, al->list[i].addr, I40E_VLAN_ANY, true, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); /* program the updated filter list */ if (i40e_sync_vsi_filters(vsi, false)) @@ -2061,6 +2079,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) goto error_param; } + /* Lock once because below invoked function add/del_filter requires + * mac_filter_list_lock to be held + */ + spin_lock_bh(&vsi->mac_filter_list_lock); + /* delete the temporary mac address */ i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id ? vf->port_vlan_id : -1, @@ -2072,6 +2095,8 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) list_for_each_entry(f, &vsi->mac_filter_list, list) i40e_del_filter(vsi, f->macaddr, f->vlan, true, false); + spin_unlock_bh(&vsi->mac_filter_list_lock); + dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); /* program mac filter */ if (i40e_sync_vsi_filters(vsi, false)) { @@ -2104,6 +2129,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT); struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; + bool is_vsi_in_vlan = false; struct i40e_vsi *vsi; struct i40e_vf *vf; int ret = 0; @@ -2133,7 +2159,11 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, /* duplicate request, so just return success */ goto error_pvid; - if (le16_to_cpu(vsi->info.pvid) == 0 && i40e_is_vsi_in_vlan(vsi)) { + spin_lock_bh(&vsi->mac_filter_list_lock); + is_vsi_in_vlan = i40e_is_vsi_in_vlan(vsi); + spin_unlock_bh(&vsi->mac_filter_list_lock); + + if (le16_to_cpu(vsi->info.pvid) == 0 && is_vsi_in_vlan) { dev_err(&pf->pdev->dev, "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", vf_id); diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 736f6f08b4f2..da44995def42 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -29,8 +29,6 @@ #include "i40e.h" -#define I40E_MAX_MACVLAN_FILTERS 256 -#define I40E_MAX_VLAN_FILTERS 256 #define I40E_MAX_VLANID 4095 #define I40E_VIRTCHNL_SUPPORTED_QTYPES 2 @@ -98,7 +96,8 @@ struct i40e_vf { u8 num_queue_pairs; /* num of qps assigned to VF vsis */ u64 num_mdd_events; /* num of mdd events detected */ - u64 num_invalid_msgs; /* num of malformed or invalid msgs detected */ + /* num of continuous malformed or invalid msgs detected */ + u64 num_invalid_msgs; u64 num_valid_msgs; /* num of valid msgs detected */ unsigned long vf_caps; /* vf's adv. capabilities */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index 3eba36913c1d..fd123ca60761 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c @@ -373,7 +373,6 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw) hw->aq.asq.next_to_use = 0; hw->aq.asq.next_to_clean = 0; - hw->aq.asq.count = hw->aq.num_asq_entries; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_asq_ring(hw); @@ -391,6 +390,7 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw) goto init_adminq_free_rings; /* success! */ + hw->aq.asq.count = hw->aq.num_asq_entries; goto init_adminq_exit; init_adminq_free_rings: @@ -432,7 +432,6 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw) hw->aq.arq.next_to_use = 0; hw->aq.arq.next_to_clean = 0; - hw->aq.arq.count = hw->aq.num_arq_entries; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_arq_ring(hw); @@ -450,6 +449,7 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw) goto init_adminq_free_rings; /* success! */ + hw->aq.arq.count = hw->aq.num_arq_entries; goto init_adminq_exit; init_adminq_free_rings: diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index c8022092d369..fcb9ef34cc7a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -1719,11 +1719,13 @@ struct i40e_aqc_get_link_status { u8 phy_type; /* i40e_aq_phy_type */ u8 link_speed; /* i40e_aq_link_speed */ u8 link_info; -#define I40E_AQ_LINK_UP 0x01 +#define I40E_AQ_LINK_UP 0x01 /* obsolete */ +#define I40E_AQ_LINK_UP_FUNCTION 0x01 #define I40E_AQ_LINK_FAULT 0x02 #define I40E_AQ_LINK_FAULT_TX 0x04 #define I40E_AQ_LINK_FAULT_RX 0x08 #define I40E_AQ_LINK_FAULT_REMOTE 0x10 +#define I40E_AQ_LINK_UP_PORT 0x20 #define I40E_AQ_MEDIA_AVAILABLE 0x40 #define I40E_AQ_SIGNAL_DETECT 0x80 u8 an_info; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index b98b642b897a..72b1942a94aa 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -87,7 +87,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) * @hw: pointer to the HW structure * @aq_err: the AQ error code to convert **/ -char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) +const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) { switch (aq_err) { case I40E_AQ_RC_OK: @@ -147,7 +147,7 @@ char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) * @hw: pointer to the HW structure * @stat_err: the status error code to convert **/ -char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err) +const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err) { switch (stat_err) { case 0: @@ -331,25 +331,11 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, len = buf_len; /* write the full 16-byte chunks */ for (i = 0; i < (len - 16); i += 16) - i40e_debug(hw, mask, - "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", - i, buf[i], buf[i + 1], buf[i + 2], - buf[i + 3], buf[i + 4], buf[i + 5], - buf[i + 6], buf[i + 7], buf[i + 8], - buf[i + 9], buf[i + 10], buf[i + 11], - buf[i + 12], buf[i + 13], buf[i + 14], - buf[i + 15]); + i40e_debug(hw, mask, "\t0x%04X %16ph\n", i, buf + i); /* write whatever's left over without overrunning the buffer */ - if (i < len) { - char d_buf[80]; - int j = 0; - - memset(d_buf, 0, sizeof(d_buf)); - j += sprintf(d_buf, "\t0x%04X ", i); - while (i < len) - j += sprintf(&d_buf[j], " %02X", buf[i++]); - i40e_debug(hw, mask, "%s\n", d_buf); - } + if (i < len) + i40e_debug(hw, mask, "\t0x%04X %*ph\n", + i, len - i, buf + i); } } @@ -443,9 +429,6 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); - cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut)); - cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut)); - status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL); return status; @@ -520,8 +503,6 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); - cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)key)); - cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)key)); status = i40evf_asq_send_command(hw, &desc, key, key_size, NULL); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h new file mode 100644 index 000000000000..e6a39c9862e8 --- /dev/null +++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h @@ -0,0 +1,55 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_DEVIDS_H_ +#define _I40E_DEVIDS_H_ + +/* Device IDs */ +#define I40E_DEV_ID_SFP_XL710 0x1572 +#define I40E_DEV_ID_QEMU 0x1574 +#define I40E_DEV_ID_KX_A 0x157F +#define I40E_DEV_ID_KX_B 0x1580 +#define I40E_DEV_ID_KX_C 0x1581 +#define I40E_DEV_ID_QSFP_A 0x1583 +#define I40E_DEV_ID_QSFP_B 0x1584 +#define I40E_DEV_ID_QSFP_C 0x1585 +#define I40E_DEV_ID_10G_BASE_T 0x1586 +#define I40E_DEV_ID_20G_KR2 0x1587 +#define I40E_DEV_ID_20G_KR2_A 0x1588 +#define I40E_DEV_ID_10G_BASE_T4 0x1589 +#define I40E_DEV_ID_VF 0x154C +#define I40E_DEV_ID_VF_HV 0x1571 +#define I40E_DEV_ID_SFP_X722 0x37D0 +#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 +#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 +#define I40E_DEV_ID_X722_VF 0x37CD +#define I40E_DEV_ID_X722_VF_HV 0x37D9 + +#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ + (d) == I40E_DEV_ID_QSFP_B || \ + (d) == I40E_DEV_ID_QSFP_C) + +#endif /* _I40E_DEVIDS_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h index 55ae4b0f8192..cbd9a1b078ab 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -60,8 +60,8 @@ void i40e_idle_aq(struct i40e_hw *hw); void i40evf_resume_aq(struct i40e_hw *hw); bool i40evf_check_asq_alive(struct i40e_hw *hw); i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); -char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); -char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err); +const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); +const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err); i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, bool pf_lut, u8 *lut, u16 lut_size); @@ -101,4 +101,6 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, u16 vsi_seid, u16 queue, bool is_add, struct i40e_control_filter_stats *stats, struct i40e_asq_cmd_details *cmd_details); +void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, + u16 vsi_seid); #endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 0e71eb4633d5..47e9a90d6b10 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -318,6 +318,8 @@ static void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector * i40e_set_new_dynamic_itr - Find new ITR level * @rc: structure containing ring performance data * + * Returns true if ITR changed, false if not + * * Stores a new ITR value based on packets and byte counts during * the last interrupt. The advantage of per interrupt computation * is faster updates and more accurate ITR for the current traffic @@ -326,21 +328,32 @@ static void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector * testing data as well as attempting to minimize response time * while increasing bulk throughput. **/ -static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) +static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) { enum i40e_latency_range new_latency_range = rc->latency_range; + struct i40e_q_vector *qv = rc->ring->q_vector; u32 new_itr = rc->itr; int bytes_per_int; + int usecs; if (rc->total_packets == 0 || !rc->itr) - return; + return false; /* simple throttlerate management - * 0-10MB/s lowest (100000 ints/s) + * 0-10MB/s lowest (50000 ints/s) * 10-20MB/s low (20000 ints/s) - * 20-1249MB/s bulk (8000 ints/s) + * 20-1249MB/s bulk (18000 ints/s) + * > 40000 Rx packets per second (8000 ints/s) + * + * The math works out because the divisor is in 10^(-6) which + * turns the bytes/us input value into MB/s values, but + * make sure to use usecs, as the register values written + * are in 2 usec increments in the ITR registers, and make sure + * to use the smoothed values that the countdown timer gives us. */ - bytes_per_int = rc->total_bytes / rc->itr; + usecs = (rc->itr << 1) * ITR_COUNTDOWN_START; + bytes_per_int = rc->total_bytes / usecs; + switch (new_latency_range) { case I40E_LOWEST_LATENCY: if (bytes_per_int > 10) @@ -353,35 +366,52 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) new_latency_range = I40E_LOWEST_LATENCY; break; case I40E_BULK_LATENCY: - if (bytes_per_int <= 20) - new_latency_range = I40E_LOW_LATENCY; - break; + case I40E_ULTRA_LATENCY: default: if (bytes_per_int <= 20) new_latency_range = I40E_LOW_LATENCY; break; } + + /* this is to adjust RX more aggressively when streaming small + * packets. The value of 40000 was picked as it is just beyond + * what the hardware can receive per second if in low latency + * mode. + */ +#define RX_ULTRA_PACKET_RATE 40000 + + if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) && + (&qv->rx == rc)) + new_latency_range = I40E_ULTRA_LATENCY; + rc->latency_range = new_latency_range; switch (new_latency_range) { case I40E_LOWEST_LATENCY: - new_itr = I40E_ITR_100K; + new_itr = I40E_ITR_50K; break; case I40E_LOW_LATENCY: new_itr = I40E_ITR_20K; break; case I40E_BULK_LATENCY: + new_itr = I40E_ITR_18K; + break; + case I40E_ULTRA_LATENCY: new_itr = I40E_ITR_8K; break; default: break; } - if (new_itr != rc->itr) - rc->itr = new_itr; - rc->total_bytes = 0; rc->total_packets = 0; + + if (new_itr != rc->itr) { + rc->itr = new_itr; + return true; + } + + return false; } /* @@ -742,16 +772,11 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) { struct i40e_q_vector *q_vector = rx_ring->q_vector; - struct i40e_vsi *vsi = rx_ring->vsi; - u64 flags = vsi->back->flags; if (vlan_tag & VLAN_VID_MASK) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - if (flags & I40E_FLAG_IN_NETPOLL) - netif_rx(skb); - else - napi_gro_receive(&q_vector->napi, skb); + napi_gro_receive(&q_vector->napi, skb); } /** @@ -1192,6 +1217,21 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) return total_rx_packets; } +static u32 i40e_buildreg_itr(const int type, const u16 itr) +{ + u32 val; + + val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | + (type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | + (itr << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT); + + return val; +} + +/* a small macro to shorten up some long lines */ +#define INTREG I40E_VFINT_DYN_CTLN1 + /** * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt * @vsi: the VSI we care about @@ -1202,55 +1242,67 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { struct i40e_hw *hw = &vsi->back->hw; - u16 old_itr; + bool rx = false, tx = false; + u32 rxval, txval; int vector; - u32 val; vector = (q_vector->v_idx + vsi->base_vector); + + /* avoid dynamic calculation if in countdown mode OR if + * all dynamic is disabled + */ + rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0); + + if (q_vector->itr_countdown > 0 || + (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) && + !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) { + goto enable_int; + } + if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) { - old_itr = q_vector->rx.itr; - i40e_set_new_dynamic_itr(&q_vector->rx); - if (old_itr != q_vector->rx.itr) { - val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | - (I40E_RX_ITR << - I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | - (q_vector->rx.itr << - I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT); - } else { - val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | - (I40E_ITR_NONE << - I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT); - } - if (!test_bit(__I40E_DOWN, &vsi->state)) - wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val); - } else { - i40evf_irq_enable_queues(vsi->back, 1 - << q_vector->v_idx); + rx = i40e_set_new_dynamic_itr(&q_vector->rx); + rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr); } if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) { - old_itr = q_vector->tx.itr; - i40e_set_new_dynamic_itr(&q_vector->tx); - if (old_itr != q_vector->tx.itr) { - val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | - (I40E_TX_ITR << - I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | - (q_vector->tx.itr << - I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT); + tx = i40e_set_new_dynamic_itr(&q_vector->tx); + txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr); + } + if (rx || tx) { + /* get the higher of the two ITR adjustments and + * use the same value for both ITR registers + * when in adaptive mode (Rx and/or Tx) + */ + u16 itr = max(q_vector->tx.itr, q_vector->rx.itr); - } else { - val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | - (I40E_ITR_NONE << - I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT); - } - if (!test_bit(__I40E_DOWN, &vsi->state)) - wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val); - } else { - i40evf_irq_enable_queues(vsi->back, BIT(q_vector->v_idx)); + q_vector->tx.itr = q_vector->rx.itr = itr; + txval = i40e_buildreg_itr(I40E_TX_ITR, itr); + tx = true; + rxval = i40e_buildreg_itr(I40E_RX_ITR, itr); + rx = true; + } + + /* only need to enable the interrupt once, but need + * to possibly update both ITR values + */ + if (rx) { + /* set the INTENA_MSK_MASK so that this first write + * won't actually enable the interrupt, instead just + * updating the ITR (it's bit 31 PF and VF) + */ + rxval |= BIT(31); + /* don't check _DOWN because interrupt isn't being enabled */ + wr32(hw, INTREG(vector - 1), rxval); } + +enable_int: + if (!test_bit(__I40E_DOWN, &vsi->state)) + wr32(hw, INTREG(vector - 1), txval); + + if (q_vector->itr_countdown) + q_vector->itr_countdown--; + else + q_vector->itr_countdown = ITR_COUNTDOWN_START; + } /** @@ -1271,7 +1323,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) bool clean_complete = true; bool arm_wb = false; int budget_per_ring; - int cleaned; + int work_done = 0; if (test_bit(__I40E_DOWN, &vsi->state)) { napi_complete(napi); @@ -1287,22 +1339,31 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) ring->arm_wb = false; } + /* Handle case where we are called by netpoll with a budget of 0 */ + if (budget <= 0) + goto tx_only; + /* We attempt to distribute budget to each Rx queue fairly, but don't * allow the budget to go below 1 because that would exit polling early. */ budget_per_ring = max(budget/q_vector->num_ringpairs, 1); i40e_for_each_ring(ring, q_vector->rx) { + int cleaned; + if (ring_is_ps_enabled(ring)) cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring); else cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); + + work_done += cleaned; /* if we didn't clean as many as budgeted, we must be done */ clean_complete &= (budget_per_ring != cleaned); } /* If work not completed, return budget and polling will return */ if (!clean_complete) { +tx_only: if (arm_wb) i40evf_force_wb(vsi, q_vector); return budget; @@ -1312,7 +1373,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) q_vector->arm_wb_state = false; /* Work is done so exit the polling mode and re-enable the interrupt */ - napi_complete(napi); + napi_complete_done(napi, work_done); i40e_update_enable_itr(vsi, q_vector); return 0; } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 0c13ece00366..ebc1bf77f036 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -32,11 +32,14 @@ #define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ #define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */ #define I40E_ITR_100K 0x0005 +#define I40E_ITR_50K 0x000A #define I40E_ITR_20K 0x0019 +#define I40E_ITR_18K 0x001B #define I40E_ITR_8K 0x003E #define I40E_ITR_4K 0x007A -#define I40E_ITR_RX_DEF I40E_ITR_8K -#define I40E_ITR_TX_DEF I40E_ITR_4K +#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ +#define I40E_ITR_RX_DEF I40E_ITR_20K +#define I40E_ITR_TX_DEF I40E_ITR_20K #define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ #define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */ #define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */ @@ -44,6 +47,15 @@ #define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1) #define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC)) #define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1) +/* 0x40 is the enable bit for interrupt rate limiting, and must be set if + * the value of the rate limit is non-zero + */ +#define INTRL_ENA BIT(6) +#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2) +#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0) +#define I40E_INTRL_8K 125 /* 8000 ints/sec */ +#define I40E_INTRL_62K 16 /* 62500 ints/sec */ +#define I40E_INTRL_83K 12 /* 83333 ints/sec */ #define I40E_QUEUE_END_OF_LIST 0x7FF @@ -79,16 +91,16 @@ enum i40e_dyn_idx_t { BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) #define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \ - BIT(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ - BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ - BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ - BIT(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ - BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ - BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) #define i40e_pf_get_default_rss_hena(pf) \ (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ - I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) + I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) /* Supported Rx Buffer Sizes */ #define I40E_RXBUFFER_512 512 /* Used for packet split */ @@ -281,6 +293,7 @@ enum i40e_latency_range { I40E_LOWEST_LATENCY = 0, I40E_LOW_LATENCY = 1, I40E_BULK_LATENCY = 2, + I40E_ULTRA_LATENCY = 3, }; struct i40e_ring_container { diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index a59b60ffd0ce..301fe2b6dd03 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -33,31 +33,7 @@ #include "i40e_adminq.h" #include "i40e_hmc.h" #include "i40e_lan_hmc.h" - -/* Device IDs */ -#define I40E_DEV_ID_SFP_XL710 0x1572 -#define I40E_DEV_ID_QEMU 0x1574 -#define I40E_DEV_ID_KX_A 0x157F -#define I40E_DEV_ID_KX_B 0x1580 -#define I40E_DEV_ID_KX_C 0x1581 -#define I40E_DEV_ID_QSFP_A 0x1583 -#define I40E_DEV_ID_QSFP_B 0x1584 -#define I40E_DEV_ID_QSFP_C 0x1585 -#define I40E_DEV_ID_10G_BASE_T 0x1586 -#define I40E_DEV_ID_20G_KR2 0x1587 -#define I40E_DEV_ID_20G_KR2_A 0x1588 -#define I40E_DEV_ID_10G_BASE_T4 0x1589 -#define I40E_DEV_ID_VF 0x154C -#define I40E_DEV_ID_VF_HV 0x1571 -#define I40E_DEV_ID_SFP_X722 0x37D0 -#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 -#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 -#define I40E_DEV_ID_X722_VF 0x37CD -#define I40E_DEV_ID_X722_VF_HV 0x37D9 - -#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ - (d) == I40E_DEV_ID_QSFP_B || \ - (d) == I40E_DEV_ID_QSFP_C) +#include "i40e_devids.h" /* I40E_MASK is a macro used on 32 bit registers */ #define I40E_MASK(mask, shift) (mask << shift) @@ -191,16 +167,65 @@ struct i40e_link_status { bool crc_enable; u8 pacing; u8 requested_speeds; + u8 module_type[3]; + /* 1st byte: module identifier */ +#define I40E_MODULE_TYPE_SFP 0x03 +#define I40E_MODULE_TYPE_QSFP 0x0D + /* 2nd byte: ethernet compliance codes for 10/40G */ +#define I40E_MODULE_TYPE_40G_ACTIVE 0x01 +#define I40E_MODULE_TYPE_40G_LR4 0x02 +#define I40E_MODULE_TYPE_40G_SR4 0x04 +#define I40E_MODULE_TYPE_40G_CR4 0x08 +#define I40E_MODULE_TYPE_10G_BASE_SR 0x10 +#define I40E_MODULE_TYPE_10G_BASE_LR 0x20 +#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40 +#define I40E_MODULE_TYPE_10G_BASE_ER 0x80 + /* 3rd byte: ethernet compliance codes for 1G */ +#define I40E_MODULE_TYPE_1000BASE_SX 0x01 +#define I40E_MODULE_TYPE_1000BASE_LX 0x02 +#define I40E_MODULE_TYPE_1000BASE_CX 0x04 +#define I40E_MODULE_TYPE_1000BASE_T 0x08 +}; + +enum i40e_aq_capabilities_phy_type { + I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII), + I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX), + I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4), + I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR), + I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4), + I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI), + I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI), + I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI), + I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI), + I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI), + I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU), + I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU), + I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC), + I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC), + I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX), + I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T), + I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T), + I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR), + I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR), + I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU), + I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1), + I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4), + I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4), + I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4), + I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX), + I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX), + I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL = + BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL), + I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2) }; struct i40e_phy_info { struct i40e_link_status link_info; struct i40e_link_status link_info_old; - u32 autoneg_advertised; - u32 phy_id; - u32 module_type; bool get_link_info; enum i40e_media_type media_type; + /* all the phy types the NVM is capable of */ + enum i40e_aq_capabilities_phy_type phy_types; }; #define I40E_HW_CAP_MAX_GPIO 30 @@ -288,6 +313,7 @@ struct i40e_nvm_info { bool blank_nvm_mode; /* is NVM empty (no FW present)*/ u16 version; /* NVM package version */ u32 eetrack; /* NVM data version */ + u32 oem_ver; /* OEM version info */ }; /* definitions used in NVM update support */ @@ -1029,8 +1055,8 @@ enum i40e_filter_program_desc_fd_status { }; #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 -#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \ - BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) +#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) #define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 #define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ @@ -1173,6 +1199,7 @@ struct i40e_hw_port_stats { /* Checksum and Shadow RAM pointers */ #define I40E_SR_NVM_CONTROL_WORD 0x00 #define I40E_SR_EMP_MODULE_PTR 0x0F +#define I40E_NVM_OEM_VER_OFF 0x83 #define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 #define I40E_SR_NVM_WAKE_ON_LAN 0x19 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h index cadda642c98c..9f7b279b9d9c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h @@ -150,6 +150,7 @@ struct i40e_virtchnl_vsi_resource { #define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 +#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 #define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index e7a223ea6c25..22fc3d49c4b9 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -87,7 +87,7 @@ struct i40e_vsi { #define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */ #define I40EVF_MAX_AQ_BUF_SIZE 4096 #define I40EVF_AQ_LEN 32 -#define I40EVF_AQ_MAX_ERR 10 /* times to try before resetting AQ */ +#define I40EVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */ #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) @@ -112,6 +112,8 @@ struct i40e_q_vector { struct i40e_ring_container tx; u32 ring_mask; u8 num_ringpairs; /* total number of ring pairs in vector */ +#define ITR_COUNTDOWN_START 100 + u8 itr_countdown; /* when 0 or 1 update ITR */ int v_idx; /* vector index in list */ char name[IFNAMSIZ + 9]; bool arm_wb_state; @@ -211,7 +213,6 @@ struct i40evf_adapter { #define I40EVF_FLAG_RX_1BUF_CAPABLE BIT(1) #define I40EVF_FLAG_RX_PS_CAPABLE BIT(2) #define I40EVF_FLAG_RX_PS_ENABLED BIT(3) -#define I40EVF_FLAG_IN_NETPOLL BIT(4) #define I40EVF_FLAG_IMIR_ENABLED BIT(5) #define I40EVF_FLAG_MQ_CAPABLE BIT(6) #define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7) @@ -220,10 +221,10 @@ struct i40evf_adapter { #define I40EVF_FLAG_RESET_NEEDED BIT(10) #define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(11) #define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12) +#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13) /* duplicates for common code */ #define I40E_FLAG_FDIR_ATR_ENABLED 0 #define I40E_FLAG_DCB_ENABLED 0 -#define I40E_FLAG_IN_NETPOLL I40EVF_FLAG_IN_NETPOLL #define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED #define I40E_FLAG_WB_ON_ITR_CAPABLE I40EVF_FLAG_WB_ON_ITR_CAPABLE #define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index c00e4959f026..d962164dfb0f 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -34,7 +34,7 @@ char i40evf_driver_name[] = "i40evf"; static const char i40evf_driver_string[] = "Intel(R) XL710/X710 Virtual Function Network Driver"; -#define DRV_VERSION "1.3.13" +#define DRV_VERSION "1.3.33" const char i40evf_driver_version[] = DRV_VERSION; static const char i40evf_copyright[] = "Copyright (c) 2013 - 2015 Intel Corporation."; @@ -282,6 +282,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask) /** * i40evf_irq_enable - Enable default interrupt generation settings * @adapter: board private structure + * @flush: boolean value whether to run rd32() **/ void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush) { @@ -305,15 +306,14 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data) struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40e_hw *hw = &adapter->hw; u32 val; - u32 ena_mask; /* handle non-queue interrupts */ - val = rd32(hw, I40E_VFINT_ICR01); - ena_mask = rd32(hw, I40E_VFINT_ICR0_ENA1); + rd32(hw, I40E_VFINT_ICR01); + rd32(hw, I40E_VFINT_ICR0_ENA1); - val = rd32(hw, I40E_VFINT_DYN_CTL01); - val = val | I40E_VFINT_DYN_CTL01_CLEARPBA_MASK; + val = rd32(hw, I40E_VFINT_DYN_CTL01) | + I40E_VFINT_DYN_CTL01_CLEARPBA_MASK; wr32(hw, I40E_VFINT_DYN_CTL01, val); /* schedule work on the private workqueue */ @@ -334,7 +334,7 @@ static irqreturn_t i40evf_msix_clean_rings(int irq, void *data) if (!q_vector->tx.ring && !q_vector->rx.ring) return IRQ_HANDLED; - napi_schedule(&q_vector->napi); + napi_schedule_irqoff(&q_vector->napi); return IRQ_HANDLED; } @@ -357,6 +357,7 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx) q_vector->rx.ring = rx_ring; q_vector->rx.count++; q_vector->rx.latency_range = I40E_LOW_LATENCY; + q_vector->itr_countdown = ITR_COUNTDOWN_START; } /** @@ -377,6 +378,7 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) q_vector->tx.ring = tx_ring; q_vector->tx.count++; q_vector->tx.latency_range = I40E_LOW_LATENCY; + q_vector->itr_countdown = ITR_COUNTDOWN_START; q_vector->num_ringpairs++; q_vector->ring_mask |= BIT(t_idx); } @@ -444,6 +446,29 @@ out: return err; } +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * i40evf_netpoll - A Polling 'interrupt' handler + * @netdev: network interface device structure + * + * This is used by netconsole to send skbs without having to re-enable + * interrupts. It's not called while the normal interrupt routine is executing. + **/ +static void i40evf_netpoll(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + int q_vectors = adapter->num_msix_vectors - NONQ_VECS; + int i; + + /* if interface is down do nothing */ + if (test_bit(__I40E_DOWN, &adapter->vsi.state)) + return; + + for (i = 0; i < q_vectors; i++) + i40evf_msix_clean_rings(0, adapter->q_vector[i]); +} + +#endif /** * i40evf_request_traffic_irqs - Initialize MSI-X interrupts * @adapter: board private structure @@ -841,6 +866,15 @@ static int i40evf_set_mac(struct net_device *netdev, void *p) if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) return 0; + if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF) + return -EPERM; + + f = i40evf_find_filter(adapter, hw->mac.addr); + if (f) { + f->remove = true; + adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; + } + f = i40evf_add_filter(adapter, addr->sa_data); if (f) { ether_addr_copy(hw->mac.addr, addr->sa_data); @@ -1114,6 +1148,8 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) tx_ring->netdev = adapter->netdev; tx_ring->dev = &adapter->pdev->dev; tx_ring->count = adapter->tx_desc_count; + if (adapter->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) + tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR; adapter->tx_rings[i] = tx_ring; rx_ring = &tx_ring[1]; @@ -1573,6 +1609,7 @@ static void i40evf_reset_task(struct work_struct *work) reset_task); struct net_device *netdev = adapter->netdev; struct i40e_hw *hw = &adapter->hw; + struct i40evf_vlan_filter *vlf; struct i40evf_mac_filter *f; u32 reg_val; int i = 0, err; @@ -1617,7 +1654,7 @@ static void i40evf_reset_task(struct work_struct *work) /* extra wait to make sure minimum wait is met */ msleep(I40EVF_RESET_WAIT_MS); if (i == I40EVF_RESET_WAIT_COUNT) { - struct i40evf_mac_filter *f, *ftmp; + struct i40evf_mac_filter *ftmp; struct i40evf_vlan_filter *fv, *fvtmp; /* reset never finished */ @@ -1696,8 +1733,8 @@ continue_reset: f->add = true; } /* re-add all VLAN filters */ - list_for_each_entry(f, &adapter->vlan_filter_list, list) { - f->add = true; + list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { + vlf->add = true; } adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; @@ -2038,6 +2075,9 @@ static const struct net_device_ops i40evf_netdev_ops = { .ndo_tx_timeout = i40evf_tx_timeout, .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = i40evf_netpoll, +#endif }; /** @@ -2086,7 +2126,10 @@ int i40evf_process_config(struct i40evf_adapter *adapter) if (adapter->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) { - netdev->vlan_features = netdev->features; + netdev->vlan_features = netdev->features & + ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER); netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; @@ -2244,10 +2287,13 @@ static void i40evf_init_task(struct work_struct *work) if (!is_valid_ether_addr(adapter->hw.mac.addr)) { dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", adapter->hw.mac.addr); - random_ether_addr(adapter->hw.mac.addr); + eth_hw_addr_random(netdev); + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + } else { + adapter->flags |= I40EVF_FLAG_ADDR_SET_BY_PF; + ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } - ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); - ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); init_timer(&adapter->watchdog_timer); adapter->watchdog_timer.function = &i40evf_watchdog_timer; @@ -2263,6 +2309,9 @@ static void i40evf_init_task(struct work_struct *work) if (err) goto err_sw_init; i40evf_map_rings_to_vectors(adapter); + if (adapter->vf_res->vf_offload_flags & + I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) + adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE; if (!RSS_AQ(adapter)) i40evf_configure_rss(adapter); err = i40evf_request_misc_irq(adapter); @@ -2311,11 +2360,14 @@ err_alloc: err: /* Things went into the weeds, so try again later */ if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) { - dev_err(&pdev->dev, "Failed to communicate with PF; giving up\n"); + dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n"); adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; - return; /* do not reschedule */ + i40evf_shutdown_adminq(hw); + adapter->state = __I40EVF_STARTUP; + schedule_delayed_work(&adapter->init_task, HZ * 5); + return; } - schedule_delayed_work(&adapter->init_task, HZ * 3); + schedule_delayed_work(&adapter->init_task, HZ); } /** diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 4f056efecba4..32e620e1eb5c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -156,7 +156,8 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 | I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG | - I40E_VIRTCHNL_VF_OFFLOAD_VLAN; + I40E_VIRTCHNL_VF_OFFLOAD_VLAN | + I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES; adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG; if (PF_IS_V11(adapter)) diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 74262768b09b..2529bc625de4 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -842,10 +842,6 @@ static void igb_get_drvinfo(struct net_device *netdev, sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->n_stats = IGB_STATS_LEN; - drvinfo->testinfo_len = IGB_TEST_LEN; - drvinfo->regdump_len = igb_get_regs_len(netdev); - drvinfo->eedump_len = igb_get_eeprom_len(netdev); } static void igb_get_ringparam(struct net_device *netdev, diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 7e6267503790..ea7b09887245 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -151,7 +151,7 @@ static void igb_setup_dca(struct igb_adapter *); #endif /* CONFIG_IGB_DCA */ static int igb_poll(struct napi_struct *, int); static bool igb_clean_tx_irq(struct igb_q_vector *); -static bool igb_clean_rx_irq(struct igb_q_vector *, int); +static int igb_clean_rx_irq(struct igb_q_vector *, int); static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); static void igb_tx_timeout(struct net_device *); static void igb_reset_task(struct work_struct *); @@ -6364,6 +6364,7 @@ static int igb_poll(struct napi_struct *napi, int budget) struct igb_q_vector, napi); bool clean_complete = true; + int work_done = 0; #ifdef CONFIG_IGB_DCA if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) @@ -6372,15 +6373,19 @@ static int igb_poll(struct napi_struct *napi, int budget) if (q_vector->tx.ring) clean_complete = igb_clean_tx_irq(q_vector); - if (q_vector->rx.ring) - clean_complete &= igb_clean_rx_irq(q_vector, budget); + if (q_vector->rx.ring) { + int cleaned = igb_clean_rx_irq(q_vector, budget); + + work_done += cleaned; + clean_complete &= (cleaned < budget); + } /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; /* If not enough Rx work done, exit the polling mode */ - napi_complete(napi); + napi_complete_done(napi, work_done); igb_ring_irq_enable(q_vector); return 0; @@ -6904,7 +6909,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring, skb->protocol = eth_type_trans(skb, rx_ring->netdev); } -static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) +static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) { struct igb_ring *rx_ring = q_vector->rx.ring; struct sk_buff *skb = rx_ring->skb; @@ -6978,7 +6983,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) if (cleaned_count) igb_alloc_rx_buffers(rx_ring, cleaned_count); - return total_packets < budget; + return total_packets; } static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index c6996feb1cb4..b74ce53d7b52 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -196,8 +196,6 @@ static void igbvf_get_drvinfo(struct net_device *netdev, sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->regdump_len = igbvf_get_regs_len(netdev); - drvinfo->eedump_len = igbvf_get_eeprom_len(netdev); } static void igbvf_get_ringparam(struct net_device *netdev, diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index e86d41ed9260..297af801f051 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1211,7 +1211,7 @@ static int igbvf_poll(struct napi_struct *napi, int budget) /* If not enough Rx work done, exit the polling mode */ if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); if (adapter->requested_itr & 3) igbvf_set_itr(adapter); diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c index b311e9e710d2..d2b29b490ae0 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c @@ -479,9 +479,6 @@ ixgb_get_drvinfo(struct net_device *netdev, sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->n_stats = IXGB_STATS_LEN; - drvinfo->regdump_len = ixgb_get_regs_len(netdev); - drvinfo->eedump_len = ixgb_get_eeprom_len(netdev); } static void diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index dda0f678339a..1d2174526a4c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -152,9 +152,17 @@ struct vf_data_storage { u16 vlan_count; u8 spoofchk_enabled; bool rss_query_enabled; + u8 trusted; + int xcast_mode; unsigned int vf_api; }; +enum ixgbevf_xcast_modes { + IXGBEVF_XCAST_MODE_NONE = 0, + IXGBEVF_XCAST_MODE_MULTI, + IXGBEVF_XCAST_MODE_ALLMULTI, +}; + struct vf_macvlans { struct list_head l; int vf; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 94c4912b2330..d681273bd39d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -943,9 +943,6 @@ static void ixgbe_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->n_stats = IXGBE_STATS_LEN; - drvinfo->testinfo_len = IXGBE_TEST_LEN; - drvinfo->regdump_len = ixgbe_get_regs_len(netdev); } static void ixgbe_get_ringparam(struct net_device *netdev, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 191003901adb..47395ff5d908 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2775,7 +2775,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget) container_of(napi, struct ixgbe_q_vector, napi); struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_ring *ring; - int per_ring_budget; + int per_ring_budget, work_done = 0; bool clean_complete = true; #ifdef CONFIG_IXGBE_DCA @@ -2796,9 +2796,13 @@ int ixgbe_poll(struct napi_struct *napi, int budget) else per_ring_budget = budget; - ixgbe_for_each_ring(ring, q_vector->rx) - clean_complete &= (ixgbe_clean_rx_irq(q_vector, ring, - per_ring_budget) < per_ring_budget); + ixgbe_for_each_ring(ring, q_vector->rx) { + int cleaned = ixgbe_clean_rx_irq(q_vector, ring, + per_ring_budget); + + work_done += cleaned; + clean_complete &= (cleaned < per_ring_budget); + } ixgbe_qv_unlock_napi(q_vector); /* If all work not completed, return budget and keep polling */ @@ -2806,7 +2810,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget) return budget; /* all work done, exit the polling mode */ - napi_complete(napi); + napi_complete_done(napi, work_done); if (adapter->rx_itr_setting & 1) ixgbe_set_itr(q_vector); if (!test_bit(__IXGBE_DOWN, &adapter->state)) @@ -3723,14 +3727,20 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0), adapter->num_vfs); - /* Ensure LLDP is set for Ethertype Antispoofing if we will be + /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be * calling set_ethertype_anti_spoofing for each VF in loop below */ - if (hw->mac.ops.set_ethertype_anti_spoofing) + if (hw->mac.ops.set_ethertype_anti_spoofing) { IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP), - (IXGBE_ETQF_FILTER_EN | /* enable filter */ - IXGBE_ETQF_TX_ANTISPOOF | /* tx antispoof */ - IXGBE_ETH_P_LLDP)); /* LLDP eth type */ + (IXGBE_ETQF_FILTER_EN | + IXGBE_ETQF_TX_ANTISPOOF | + IXGBE_ETH_P_LLDP)); + + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC), + (IXGBE_ETQF_FILTER_EN | + IXGBE_ETQF_TX_ANTISPOOF | + ETH_P_PAUSE)); + } /* For VFs that have spoof checking turned off */ for (i = 0; i < adapter->num_vfs; i++) { @@ -5301,7 +5311,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus()); adapter->ring_feature[RING_F_RSS].limit = rss; adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; - adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; adapter->max_q_vectors = MAX_Q_VECTORS_82599; adapter->atr_sample_rate = 20; fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus()); @@ -5327,7 +5336,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) switch (hw->mac.type) { case ixgbe_mac_82598EB: adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE; - adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; if (hw->device_id == IXGBE_DEV_ID_82598AT) adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; @@ -8399,6 +8407,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en, + .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust, .ndo_get_vf_config = ixgbe_ndo_get_vf_config, .ndo_get_stats64 = ixgbe_get_stats64, #ifdef CONFIG_IXGBE_DCB diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index b1e4703ff2a5..8daa95f74548 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -102,6 +102,8 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ #define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ +#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c + /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 1d17b5872dd1..fcd8b27a0ccb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -116,6 +116,12 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter) * we want to disable the querying by default. */ adapter->vfinfo[i].rss_query_enabled = 0; + + /* Untrust all VFs */ + adapter->vfinfo[i].trusted = false; + + /* set the default xcast mode */ + adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE; } return 0; @@ -1001,6 +1007,59 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, return 0; } +static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + int xcast_mode = msgbuf[1]; + u32 vmolr, disable, enable; + + /* verify the PF is supporting the correct APIs */ + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_12: + break; + default: + return -EOPNOTSUPP; + } + + if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI && + !adapter->vfinfo[vf].trusted) { + xcast_mode = IXGBEVF_XCAST_MODE_MULTI; + } + + if (adapter->vfinfo[vf].xcast_mode == xcast_mode) + goto out; + + switch (xcast_mode) { + case IXGBEVF_XCAST_MODE_NONE: + disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; + enable = 0; + break; + case IXGBEVF_XCAST_MODE_MULTI: + disable = IXGBE_VMOLR_MPE; + enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE; + break; + case IXGBEVF_XCAST_MODE_ALLMULTI: + disable = 0; + enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; + break; + default: + return -EOPNOTSUPP; + } + + vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); + vmolr &= ~disable; + vmolr |= enable; + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); + + adapter->vfinfo[vf].xcast_mode = xcast_mode; + +out: + msgbuf[1] = xcast_mode; + + return 0; +} + static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) { u32 mbx_size = IXGBE_VFMAILBOX_SIZE; @@ -1063,6 +1122,9 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) case IXGBE_VF_GET_RSS_KEY: retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf); break; + case IXGBE_VF_UPDATE_XCAST_MODE: + retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf); + break; default: e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); retval = IXGBE_ERR_MBX; @@ -1124,6 +1186,17 @@ void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); } +static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 ping; + + ping = IXGBE_PF_CONTROL_MSG; + if (adapter->vfinfo[vf].clear_to_send) + ping |= IXGBE_VT_MSGTYPE_CTS; + ixgbe_write_mbx(hw, &ping, 1, vf); +} + void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -1416,6 +1489,28 @@ int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, return 0; } +int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (vf >= adapter->num_vfs) + return -EINVAL; + + /* nothing to do */ + if (adapter->vfinfo[vf].trusted == setting) + return 0; + + adapter->vfinfo[vf].trusted = setting; + + /* reset VF to reconfigure features */ + adapter->vfinfo[vf].clear_to_send = false; + ixgbe_ping_vf(adapter, vf); + + e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not "); + + return 0; +} + int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi) { @@ -1430,5 +1525,6 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev, ivi->qos = adapter->vfinfo[vf].pf_qos; ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled; + ivi->trusted = adapter->vfinfo[vf].trusted; return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index 2c197e6d1fe7..dad925706f4c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -49,6 +49,7 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, bool setting); +int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting); int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 939c90c4ff39..995f03107eac 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1752,6 +1752,9 @@ enum { * FCoE (0x8906): Filter 2 * 1588 (0x88f7): Filter 3 * FIP (0x8914): Filter 4 + * LLDP (0x88CC): Filter 5 + * LACP (0x8809): Filter 6 + * FC (0x8808): Filter 7 */ #define IXGBE_ETQF_FILTER_EAPOL 0 #define IXGBE_ETQF_FILTER_FCOE 2 @@ -1759,6 +1762,7 @@ enum { #define IXGBE_ETQF_FILTER_FIP 4 #define IXGBE_ETQF_FILTER_LLDP 5 #define IXGBE_ETQF_FILTER_LACP 6 +#define IXGBE_ETQF_FILTER_FC 7 /* VLAN Control Bit Masks */ #define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index ed7b2899affe..ebe0ac950b14 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -198,6 +198,7 @@ static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value) * ixgbe_reset_cs4227 - Reset CS4227 using port expander * @hw: pointer to hardware structure * + * This function assumes that the caller has acquired the proper semaphore. * Returns error code */ static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw) @@ -296,6 +297,14 @@ static void ixgbe_check_cs4227(struct ixgbe_hw *hw) hw->mac.ops.release_swfw_sync(hw, swfw_mask); msleep(IXGBE_CS4227_CHECK_DELAY); } + /* If still pending, assume other instance failed. */ + if (retry == IXGBE_CS4227_RETRIES) { + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status) { + hw_err(hw, "semaphore failed with %d\n", status); + return; + } + } /* Reset the CS4227. */ status = ixgbe_reset_cs4227(hw); @@ -1608,7 +1617,7 @@ static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw) if (status) return status; - if (lsc) + if (lsc && phy->ops.setup_internal_link) return phy->ops.setup_internal_link(hw); return 0; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 04c7ec8446e0..ec3147279621 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -471,6 +471,12 @@ enum ixgbevf_boards { board_X550EM_x_vf, }; +enum ixgbevf_xcast_modes { + IXGBEVF_XCAST_MODE_NONE = 0, + IXGBEVF_XCAST_MODE_MULTI, + IXGBEVF_XCAST_MODE_ALLMULTI, +}; + extern const struct ixgbevf_info ixgbevf_82599_vf_info; extern const struct ixgbevf_info ixgbevf_X540_vf_info; extern const struct ixgbevf_info ixgbevf_X550_vf_info; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 35da2d74e73e..592ff237d692 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1008,7 +1008,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) container_of(napi, struct ixgbevf_q_vector, napi); struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_ring *ring; - int per_ring_budget; + int per_ring_budget, work_done = 0; bool clean_complete = true; ixgbevf_for_each_ring(ring, q_vector->tx) @@ -1027,10 +1027,12 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) else per_ring_budget = budget; - ixgbevf_for_each_ring(ring, q_vector->rx) - clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring, - per_ring_budget) - < per_ring_budget); + ixgbevf_for_each_ring(ring, q_vector->rx) { + int cleaned = ixgbevf_clean_rx_irq(q_vector, ring, + per_ring_budget); + work_done += cleaned; + clean_complete &= (cleaned < per_ring_budget); + } #ifdef CONFIG_NET_RX_BUSY_POLL ixgbevf_qv_unlock_napi(q_vector); @@ -1040,7 +1042,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) if (!clean_complete) return budget; /* all work done, exit the polling mode */ - napi_complete(napi); + napi_complete_done(napi, work_done); if (adapter->rx_itr_setting & 1) ixgbevf_set_itr(q_vector); if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && @@ -1892,9 +1894,17 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; + unsigned int flags = netdev->flags; + int xcast_mode; + + xcast_mode = (flags & IFF_ALLMULTI) ? IXGBEVF_XCAST_MODE_ALLMULTI : + (flags & (IFF_BROADCAST | IFF_MULTICAST)) ? + IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE; spin_lock_bh(&adapter->mbx_lock); + hw->mac.ops.update_xcast_mode(hw, netdev, xcast_mode); + /* reprogram multicast list */ hw->mac.ops.update_mc_addr_list(hw, netdev); diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h index 82f44e06e5fc..340cdd469455 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -112,6 +112,8 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ #define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS hash key */ +#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c + /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index d1339b050627..427f3605cbfc 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -469,6 +469,46 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, } /** + * ixgbevf_update_xcast_mode - Update Multicast mode + * @hw: pointer to the HW structure + * @netdev: pointer to net device structure + * @xcast_mode: new multicast mode + * + * Updates the Multicast Mode of VF. + **/ +static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, + struct net_device *netdev, int xcast_mode) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[2]; + s32 err; + + switch (hw->api_version) { + case ixgbe_mbox_api_12: + break; + default: + return -EOPNOTSUPP; + } + + msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE; + msgbuf[1] = xcast_mode; + + err = mbx->ops.write_posted(hw, msgbuf, 2); + if (err) + return err; + + err = mbx->ops.read_posted(hw, msgbuf, 2); + if (err) + return err; + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK)) + return -EPERM; + + return 0; +} + +/** * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address * @hw: pointer to the HW structure * @vlan: 12 bit VLAN ID @@ -727,6 +767,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = { .check_link = ixgbevf_check_mac_link_vf, .set_rar = ixgbevf_set_rar_vf, .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, + .update_xcast_mode = ixgbevf_update_xcast_mode, .set_uc_addr = ixgbevf_set_uc_addr_vf, .set_vfta = ixgbevf_set_vfta_vf, }; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index d40f036b6df0..ef9f7736b4dc 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -63,6 +63,7 @@ struct ixgbe_mac_operations { s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *); s32 (*init_rx_addrs)(struct ixgbe_hw *); s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); + s32 (*update_xcast_mode)(struct ixgbe_hw *, struct net_device *, int); s32 (*enable_mc)(struct ixgbe_hw *); s32 (*disable_mc)(struct ixgbe_hw *); s32 (*clear_vfta)(struct ixgbe_hw *); diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index c78ae1868097..6bf725921e79 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -759,11 +759,23 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq, desc->l4i_chk = 0; desc->byte_cnt = length; - desc->buf_ptr = dma_map_single(dev->dev.parent, data, - length, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev->dev.parent, desc->buf_ptr))) { - WARN(1, "dma_map_single failed!\n"); - return -ENOMEM; + + if (length <= 8 && (uintptr_t)data & 0x7) { + /* Copy unaligned small data fragment to TSO header data area */ + memcpy(txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE, + data, length); + desc->buf_ptr = txq->tso_hdrs_dma + + txq->tx_curr_desc * TSO_HEADER_SIZE; + } else { + /* Alignment is okay, map buffer and hand off to hardware */ + txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; + desc->buf_ptr = dma_map_single(dev->dev.parent, data, + length, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dev.parent, + desc->buf_ptr))) { + WARN(1, "dma_map_single failed!\n"); + return -ENOMEM; + } } cmd_sts = BUFFER_OWNED_BY_DMA; @@ -779,7 +791,8 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq, } static inline void -txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length) +txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length, + u32 *first_cmd_sts, bool first_desc) { struct mv643xx_eth_private *mp = txq_to_mp(txq); int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); @@ -788,6 +801,7 @@ txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length) int ret; u32 cmd_csum = 0; u16 l4i_chk = 0; + u32 cmd_sts; tx_index = txq->tx_curr_desc; desc = &txq->tx_desc_area[tx_index]; @@ -803,9 +817,17 @@ txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length) desc->byte_cnt = hdr_len; desc->buf_ptr = txq->tso_hdrs_dma + txq->tx_curr_desc * TSO_HEADER_SIZE; - desc->cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC | + cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC | GEN_CRC; + /* Defer updating the first command descriptor until all + * following descriptors have been written. + */ + if (first_desc) + *first_cmd_sts = cmd_sts; + else + desc->cmd_sts = cmd_sts; + txq->tx_curr_desc++; if (txq->tx_curr_desc == txq->tx_ring_size) txq->tx_curr_desc = 0; @@ -819,6 +841,8 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb, int desc_count = 0; struct tso_t tso; int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + struct tx_desc *first_tx_desc; + u32 first_cmd_sts = 0; /* Count needed descriptors */ if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) { @@ -826,11 +850,14 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb, return -EBUSY; } + first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc]; + /* Initialize the TSO handler, and prepare the first payload */ tso_start(skb, &tso); total_len = skb->len - hdr_len; while (total_len > 0) { + bool first_desc = (desc_count == 0); char *hdr; data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); @@ -840,7 +867,8 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb, /* prepare packet headers: MAC + IP + TCP */ hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE; tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); - txq_put_hdr_tso(skb, txq, data_left); + txq_put_hdr_tso(skb, txq, data_left, &first_cmd_sts, + first_desc); while (data_left > 0) { int size; @@ -860,6 +888,10 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb, __skb_queue_tail(&txq->tx_skb, skb); skb_tx_timestamp(skb); + /* ensure all other descriptors are written before first cmd_sts */ + wmb(); + first_tx_desc->cmd_sts = first_cmd_sts; + /* clear TX_END status */ mp->work_tx_end &= ~(1 << txq->index); @@ -1586,7 +1618,6 @@ static void mv643xx_eth_get_drvinfo(struct net_device *dev, sizeof(drvinfo->version)); strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info)); - drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats); } static int mv643xx_eth_nway_reset(struct net_device *dev) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index dd6fe942acf9..a47496a020d9 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -101,6 +101,8 @@ #define MVNETA_TXQ_CMD 0x2448 #define MVNETA_TXQ_DISABLE_SHIFT 8 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff +#define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484 +#define MVNETA_OVERRUN_FRAME_COUNT 0x2488 #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4 #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31) #define MVNETA_ACC_MODE 0x2500 @@ -192,7 +194,7 @@ #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11) #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) -#define MVNETA_MIB_COUNTERS_BASE 0x3080 +#define MVNETA_MIB_COUNTERS_BASE 0x3000 #define MVNETA_MIB_LATE_COLLISION 0x7c #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 #define MVNETA_DA_FILT_OTH_MCAST 0x3500 @@ -278,6 +280,50 @@ #define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) +struct mvneta_statistic { + unsigned short offset; + unsigned short type; + const char name[ETH_GSTRING_LEN]; +}; + +#define T_REG_32 32 +#define T_REG_64 64 + +static const struct mvneta_statistic mvneta_statistics[] = { + { 0x3000, T_REG_64, "good_octets_received", }, + { 0x3010, T_REG_32, "good_frames_received", }, + { 0x3008, T_REG_32, "bad_octets_received", }, + { 0x3014, T_REG_32, "bad_frames_received", }, + { 0x3018, T_REG_32, "broadcast_frames_received", }, + { 0x301c, T_REG_32, "multicast_frames_received", }, + { 0x3050, T_REG_32, "unrec_mac_control_received", }, + { 0x3058, T_REG_32, "good_fc_received", }, + { 0x305c, T_REG_32, "bad_fc_received", }, + { 0x3060, T_REG_32, "undersize_received", }, + { 0x3064, T_REG_32, "fragments_received", }, + { 0x3068, T_REG_32, "oversize_received", }, + { 0x306c, T_REG_32, "jabber_received", }, + { 0x3070, T_REG_32, "mac_receive_error", }, + { 0x3074, T_REG_32, "bad_crc_event", }, + { 0x3078, T_REG_32, "collision", }, + { 0x307c, T_REG_32, "late_collision", }, + { 0x2484, T_REG_32, "rx_discard", }, + { 0x2488, T_REG_32, "rx_overrun", }, + { 0x3020, T_REG_32, "frames_64_octets", }, + { 0x3024, T_REG_32, "frames_65_to_127_octets", }, + { 0x3028, T_REG_32, "frames_128_to_255_octets", }, + { 0x302c, T_REG_32, "frames_256_to_511_octets", }, + { 0x3030, T_REG_32, "frames_512_to_1023_octets", }, + { 0x3034, T_REG_32, "frames_1024_to_max_octets", }, + { 0x3038, T_REG_64, "good_octets_sent", }, + { 0x3040, T_REG_32, "good_frames_sent", }, + { 0x3044, T_REG_32, "excessive_collision", }, + { 0x3048, T_REG_32, "multicast_frames_sent", }, + { 0x304c, T_REG_32, "broadcast_frames_sent", }, + { 0x3054, T_REG_32, "fc_sent", }, + { 0x300c, T_REG_32, "internal_mac_transmit_err", }, +}; + struct mvneta_pcpu_stats { struct u64_stats_sync syncp; u64 rx_packets; @@ -324,6 +370,8 @@ struct mvneta_port { unsigned int speed; unsigned int tx_csum_limit; int use_inband_status:1; + + u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)]; }; /* The mvneta_tx_desc and mvneta_rx_desc structures describe the @@ -530,6 +578,8 @@ static void mvneta_mib_counters_clear(struct mvneta_port *pp) /* Perform dummy reads from MIB counters */ for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4) dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); + dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT); + dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT); } /* Get System Network Statistics */ @@ -758,7 +808,6 @@ static void mvneta_port_up(struct mvneta_port *pp) u32 q_map; /* Enable all initialized TXs. */ - mvneta_mib_counters_clear(pp); q_map = 0; for (queue = 0; queue < txq_number; queue++) { struct mvneta_tx_queue *txq = &pp->txqs[queue]; @@ -1035,6 +1084,8 @@ static void mvneta_defaults_set(struct mvneta_port *pp) mvreg_write(pp, MVNETA_INTR_ENABLE, (MVNETA_RXQ_INTR_ENABLE_ALL_MASK | MVNETA_TXQ_INTR_ENABLE_ALL_MASK)); + + mvneta_mib_counters_clear(pp); } /* Set max sizes for tx queues */ @@ -2982,6 +3033,65 @@ static int mvneta_ethtool_set_ringparam(struct net_device *dev, return 0; } +static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset, + u8 *data) +{ + if (sset == ETH_SS_STATS) { + int i; + + for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) + memcpy(data + i * ETH_GSTRING_LEN, + mvneta_statistics[i].name, ETH_GSTRING_LEN); + } +} + +static void mvneta_ethtool_update_stats(struct mvneta_port *pp) +{ + const struct mvneta_statistic *s; + void __iomem *base = pp->base; + u32 high, low, val; + int i; + + for (i = 0, s = mvneta_statistics; + s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics); + s++, i++) { + val = 0; + + switch (s->type) { + case T_REG_32: + val = readl_relaxed(base + s->offset); + break; + case T_REG_64: + /* Docs say to read low 32-bit then high */ + low = readl_relaxed(base + s->offset); + high = readl_relaxed(base + s->offset + 4); + val = (u64)high << 32 | low; + break; + } + + pp->ethtool_stats[i] += val; + } +} + +static void mvneta_ethtool_get_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct mvneta_port *pp = netdev_priv(dev); + int i; + + mvneta_ethtool_update_stats(pp); + + for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) + *data++ = pp->ethtool_stats[i]; +} + +static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset) +{ + if (sset == ETH_SS_STATS) + return ARRAY_SIZE(mvneta_statistics); + return -EOPNOTSUPP; +} + static const struct net_device_ops mvneta_netdev_ops = { .ndo_open = mvneta_open, .ndo_stop = mvneta_stop, @@ -3003,6 +3113,9 @@ const struct ethtool_ops mvneta_eth_tool_ops = { .get_drvinfo = mvneta_ethtool_get_drvinfo, .get_ringparam = mvneta_ethtool_get_ringparam, .set_ringparam = mvneta_ethtool_set_ringparam, + .get_strings = mvneta_ethtool_get_strings, + .get_ethtool_stats = mvneta_ethtool_get_stats, + .get_sset_count = mvneta_ethtool_get_sset_count, }; /* Initialize hw */ diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index f79d8124321e..ddb5541882f5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -95,9 +95,6 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) (u16) (mdev->dev->caps.fw_ver & 0xffff)); strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev), sizeof(drvinfo->bus_info)); - drvinfo->n_stats = 0; - drvinfo->regdump_len = 0; - drvinfo->eedump_len = 0; } static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = { diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 597d8923c8e1..886e1bc86374 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2816,7 +2816,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, struct mlx4_en_priv *priv; int i; int err; - u64 mac_u64; dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), MAX_TX_RINGS, MAX_RX_RINGS); @@ -2908,17 +2907,17 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, dev->addr_len = ETH_ALEN; mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); if (!is_valid_ether_addr(dev->dev_addr)) { - if (mlx4_is_slave(priv->mdev->dev)) { - eth_hw_addr_random(dev); - en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr); - mac_u64 = mlx4_mac_to_u64(dev->dev_addr); - mdev->dev->caps.def_mac[priv->port] = mac_u64; - } else { - en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", - priv->port, dev->dev_addr); - err = -EINVAL; - goto out; - } + en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", + priv->port, dev->dev_addr); + err = -EINVAL; + goto out; + } else if (mlx4_is_slave(priv->mdev->dev) && + (priv->mdev->dev->port_random_macs & 1 << priv->port)) { + /* Random MAC was assigned in mlx4_slave_cap + * in mlx4_core module + */ + dev->addr_assign_type |= NET_ADDR_RANDOM; + en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr); } memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac)); diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 8e81e53c370e..c34488479365 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -1364,6 +1364,10 @@ int mlx4_test_interrupts(struct mlx4_dev *dev) * and performing a NOP command */ for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) { + /* Make sure request_irq was called */ + if (!priv->eq_table.eq[i].have_irq) + continue; + /* Temporary use polling for command completions */ mlx4_cmd_use_polling(dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index e8ec1dec5789..f13a4d7bbf95 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -2840,3 +2840,19 @@ int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val) return -EOPNOTSUPP; } EXPORT_SYMBOL(set_phv_bit); + +void mlx4_replace_zero_macs(struct mlx4_dev *dev) +{ + int i; + u8 mac_addr[ETH_ALEN]; + + dev->port_random_macs = 0; + for (i = 1; i <= dev->caps.num_ports; ++i) + if (!dev->caps.def_mac[i] && + dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) { + eth_random_addr(mac_addr); + dev->port_random_macs |= 1 << i; + dev->caps.def_mac[i] = mlx4_mac_to_u64(mac_addr); + } +} +EXPORT_SYMBOL_GPL(mlx4_replace_zero_macs); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 006757f80988..85f1b1e7e505 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -863,6 +863,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) return -ENODEV; } + mlx4_replace_zero_macs(dev); + dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); @@ -2669,14 +2671,11 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) if (msi_x) { int nreq = dev->caps.num_ports * num_online_cpus() + 1; - bool shared_ports = false; nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, nreq); - if (nreq > MAX_MSIX) { + if (nreq > MAX_MSIX) nreq = MAX_MSIX; - shared_ports = true; - } entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); if (!entries) @@ -2699,9 +2698,6 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports, dev->caps.num_ports); - if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) - shared_ports = true; - for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) { if (i == MLX4_EQ_ASYNC) continue; @@ -2709,7 +2705,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) priv->eq_table.eq[i].irq = entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector; - if (shared_ports) { + if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) { bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, dev->caps.num_ports); /* We don't set affinity hint when there diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 232b2b55f23b..e1cf9036af22 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -1378,6 +1378,8 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work); void mlx4_init_quotas(struct mlx4_dev *dev); +/* for VFs, replace zero MACs with randomly-generated MACs at driver start */ +void mlx4_replace_zero_macs(struct mlx4_dev *dev); int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port); /* Returns the VF index of slave */ int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave); diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 78f51e103880..93195191f45b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -318,7 +318,7 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, key, NULL); } else { mailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR_OR_NULL(mailbox)) + if (IS_ERR(mailbox)) return PTR_ERR(mailbox); err = mlx4_cmd_box(dev, 0, mailbox->dma, key, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index c3e54b7e8780..fabfc9e0a948 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -256,8 +256,154 @@ static void dump_buf(void *buf, int size, int data_only, int offset) enum { MLX5_DRIVER_STATUS_ABORTED = 0xfe, + MLX5_DRIVER_SYND = 0xbadd00de, }; +static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, + u32 *synd, u8 *status) +{ + *synd = 0; + *status = 0; + + switch (op) { + case MLX5_CMD_OP_TEARDOWN_HCA: + case MLX5_CMD_OP_DISABLE_HCA: + case MLX5_CMD_OP_MANAGE_PAGES: + case MLX5_CMD_OP_DESTROY_MKEY: + case MLX5_CMD_OP_DESTROY_EQ: + case MLX5_CMD_OP_DESTROY_CQ: + case MLX5_CMD_OP_DESTROY_QP: + case MLX5_CMD_OP_DESTROY_PSV: + case MLX5_CMD_OP_DESTROY_SRQ: + case MLX5_CMD_OP_DESTROY_XRC_SRQ: + case MLX5_CMD_OP_DESTROY_DCT: + case MLX5_CMD_OP_DEALLOC_Q_COUNTER: + case MLX5_CMD_OP_DEALLOC_PD: + case MLX5_CMD_OP_DEALLOC_UAR: + case MLX5_CMD_OP_DETTACH_FROM_MCG: + case MLX5_CMD_OP_DEALLOC_XRCD: + case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN: + case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: + case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY: + case MLX5_CMD_OP_DESTROY_TIR: + case MLX5_CMD_OP_DESTROY_SQ: + case MLX5_CMD_OP_DESTROY_RQ: + case MLX5_CMD_OP_DESTROY_RMP: + case MLX5_CMD_OP_DESTROY_TIS: + case MLX5_CMD_OP_DESTROY_RQT: + case MLX5_CMD_OP_DESTROY_FLOW_TABLE: + case MLX5_CMD_OP_DESTROY_FLOW_GROUP: + case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: + return MLX5_CMD_STAT_OK; + + case MLX5_CMD_OP_QUERY_HCA_CAP: + case MLX5_CMD_OP_QUERY_ADAPTER: + case MLX5_CMD_OP_INIT_HCA: + case MLX5_CMD_OP_ENABLE_HCA: + case MLX5_CMD_OP_QUERY_PAGES: + case MLX5_CMD_OP_SET_HCA_CAP: + case MLX5_CMD_OP_QUERY_ISSI: + case MLX5_CMD_OP_SET_ISSI: + case MLX5_CMD_OP_CREATE_MKEY: + case MLX5_CMD_OP_QUERY_MKEY: + case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: + case MLX5_CMD_OP_PAGE_FAULT_RESUME: + case MLX5_CMD_OP_CREATE_EQ: + case MLX5_CMD_OP_QUERY_EQ: + case MLX5_CMD_OP_GEN_EQE: + case MLX5_CMD_OP_CREATE_CQ: + case MLX5_CMD_OP_QUERY_CQ: + case MLX5_CMD_OP_MODIFY_CQ: + case MLX5_CMD_OP_CREATE_QP: + case MLX5_CMD_OP_RST2INIT_QP: + case MLX5_CMD_OP_INIT2RTR_QP: + case MLX5_CMD_OP_RTR2RTS_QP: + case MLX5_CMD_OP_RTS2RTS_QP: + case MLX5_CMD_OP_SQERR2RTS_QP: + case MLX5_CMD_OP_2ERR_QP: + case MLX5_CMD_OP_2RST_QP: + case MLX5_CMD_OP_QUERY_QP: + case MLX5_CMD_OP_SQD_RTS_QP: + case MLX5_CMD_OP_INIT2INIT_QP: + case MLX5_CMD_OP_CREATE_PSV: + case MLX5_CMD_OP_CREATE_SRQ: + case MLX5_CMD_OP_QUERY_SRQ: + case MLX5_CMD_OP_ARM_RQ: + case MLX5_CMD_OP_CREATE_XRC_SRQ: + case MLX5_CMD_OP_QUERY_XRC_SRQ: + case MLX5_CMD_OP_ARM_XRC_SRQ: + case MLX5_CMD_OP_CREATE_DCT: + case MLX5_CMD_OP_DRAIN_DCT: + case MLX5_CMD_OP_QUERY_DCT: + case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: + case MLX5_CMD_OP_QUERY_VPORT_STATE: + case MLX5_CMD_OP_MODIFY_VPORT_STATE: + case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: + case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: + case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: + case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: + case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: + case MLX5_CMD_OP_SET_ROCE_ADDRESS: + case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: + case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: + case MLX5_CMD_OP_QUERY_HCA_VPORT_GID: + case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY: + case MLX5_CMD_OP_QUERY_VPORT_COUNTER: + case MLX5_CMD_OP_ALLOC_Q_COUNTER: + case MLX5_CMD_OP_QUERY_Q_COUNTER: + case MLX5_CMD_OP_ALLOC_PD: + case MLX5_CMD_OP_ALLOC_UAR: + case MLX5_CMD_OP_CONFIG_INT_MODERATION: + case MLX5_CMD_OP_ACCESS_REG: + case MLX5_CMD_OP_ATTACH_TO_MCG: + case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG: + case MLX5_CMD_OP_MAD_IFC: + case MLX5_CMD_OP_QUERY_MAD_DEMUX: + case MLX5_CMD_OP_SET_MAD_DEMUX: + case MLX5_CMD_OP_NOP: + case MLX5_CMD_OP_ALLOC_XRCD: + case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: + case MLX5_CMD_OP_QUERY_CONG_STATUS: + case MLX5_CMD_OP_MODIFY_CONG_STATUS: + case MLX5_CMD_OP_QUERY_CONG_PARAMS: + case MLX5_CMD_OP_MODIFY_CONG_PARAMS: + case MLX5_CMD_OP_QUERY_CONG_STATISTICS: + case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: + case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: + case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: + case MLX5_CMD_OP_CREATE_TIR: + case MLX5_CMD_OP_MODIFY_TIR: + case MLX5_CMD_OP_QUERY_TIR: + case MLX5_CMD_OP_CREATE_SQ: + case MLX5_CMD_OP_MODIFY_SQ: + case MLX5_CMD_OP_QUERY_SQ: + case MLX5_CMD_OP_CREATE_RQ: + case MLX5_CMD_OP_MODIFY_RQ: + case MLX5_CMD_OP_QUERY_RQ: + case MLX5_CMD_OP_CREATE_RMP: + case MLX5_CMD_OP_MODIFY_RMP: + case MLX5_CMD_OP_QUERY_RMP: + case MLX5_CMD_OP_CREATE_TIS: + case MLX5_CMD_OP_MODIFY_TIS: + case MLX5_CMD_OP_QUERY_TIS: + case MLX5_CMD_OP_CREATE_RQT: + case MLX5_CMD_OP_MODIFY_RQT: + case MLX5_CMD_OP_QUERY_RQT: + case MLX5_CMD_OP_CREATE_FLOW_TABLE: + case MLX5_CMD_OP_QUERY_FLOW_TABLE: + case MLX5_CMD_OP_CREATE_FLOW_GROUP: + case MLX5_CMD_OP_QUERY_FLOW_GROUP: + case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: + case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: + *status = MLX5_DRIVER_STATUS_ABORTED; + *synd = MLX5_DRIVER_SYND; + return -EIO; + default: + mlx5_core_err(dev, "Unknown FW command (%d)\n", op); + return -EINVAL; + } +} + const char *mlx5_command_str(int command) { switch (command) { @@ -592,6 +738,16 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) return err; } +static __be32 *get_synd_ptr(struct mlx5_outbox_hdr *out) +{ + return &out->syndrome; +} + +static u8 *get_status_ptr(struct mlx5_outbox_hdr *out) +{ + return &out->status; +} + /* Notes: * 1. Callback functions may not sleep * 2. page queue commands do not support asynchrous completion @@ -1200,6 +1356,11 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, return msg; } +static u16 opcode_from_in(struct mlx5_inbox_hdr *in) +{ + return be16_to_cpu(in->opcode); +} + static int is_manage_pages(struct mlx5_inbox_hdr *in) { return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES; @@ -1214,6 +1375,15 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, gfp_t gfp; int err; u8 status = 0; + u32 drv_synd; + + if (pci_channel_offline(dev->pdev) || + dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { + err = mlx5_internal_err_ret_value(dev, opcode_from_in(in), &drv_synd, &status); + *get_synd_ptr(out) = cpu_to_be32(drv_synd); + *get_status_ptr(out) = status; + return err; + } pages_queue = is_manage_pages(in); gfp = callback ? GFP_ATOMIC : GFP_KERNEL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c index e71563ce05d1..22d603f78273 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c @@ -598,6 +598,8 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) return; priv->vlan.filter_disabled = false; + if (priv->netdev->flags & IFF_PROMISC) + return; mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); } @@ -607,6 +609,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) return; priv->vlan.filter_disabled = true; + if (priv->netdev->flags & IFF_PROMISC) + return; mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); } @@ -717,8 +721,12 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled; bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; - if (enable_promisc) + if (enable_promisc) { mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC); + if (!priv->vlan.filter_disabled) + mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, + 0); + } if (enable_allmulti) mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); if (enable_broadcast) @@ -730,8 +738,12 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast); if (disable_allmulti) mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti); - if (disable_promisc) + if (disable_promisc) { + if (!priv->vlan.filter_disabled) + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, + 0); mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc); + } ea->promisc_enabled = promisc_enabled; ea->allmulti_enabled = allmulti_enabled; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 9b81e1ceb8de..f5deb642d0d6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -34,6 +34,7 @@ #include <linux/module.h> #include <linux/random.h> #include <linux/vmalloc.h> +#include <linux/hardirq.h> #include <linux/mlx5/driver.h> #include <linux/mlx5/cmd.h> #include "mlx5_core.h" @@ -57,6 +58,91 @@ enum { MLX5_HEALTH_SYNDR_HIGH_TEMP = 0x10 }; +enum { + MLX5_NIC_IFC_FULL = 0, + MLX5_NIC_IFC_DISABLED = 1, + MLX5_NIC_IFC_NO_DRAM_NIC = 2 +}; + +static u8 get_nic_interface(struct mlx5_core_dev *dev) +{ + return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3; +} + +static void trigger_cmd_completions(struct mlx5_core_dev *dev) +{ + unsigned long flags; + u64 vector; + + /* wait for pending handlers to complete */ + synchronize_irq(dev->priv.msix_arr[MLX5_EQ_VEC_CMD].vector); + spin_lock_irqsave(&dev->cmd.alloc_lock, flags); + vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1); + if (!vector) + goto no_trig; + + vector |= MLX5_TRIGGERED_CMD_COMP; + spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); + + mlx5_core_dbg(dev, "vector 0x%llx\n", vector); + mlx5_cmd_comp_handler(dev, vector); + return; + +no_trig: + spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); +} + +static int in_fatal(struct mlx5_core_dev *dev) +{ + struct mlx5_core_health *health = &dev->priv.health; + struct health_buffer __iomem *h = health->health; + + if (get_nic_interface(dev) == MLX5_NIC_IFC_DISABLED) + return 1; + + if (ioread32be(&h->fw_ver) == 0xffffffff) + return 1; + + return 0; +} + +void mlx5_enter_error_state(struct mlx5_core_dev *dev) +{ + if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) + return; + + mlx5_core_err(dev, "start\n"); + if (pci_channel_offline(dev->pdev) || in_fatal(dev)) + dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; + + mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0); + mlx5_core_err(dev, "end\n"); +} + +static void mlx5_handle_bad_state(struct mlx5_core_dev *dev) +{ + u8 nic_interface = get_nic_interface(dev); + + switch (nic_interface) { + case MLX5_NIC_IFC_FULL: + mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n"); + break; + + case MLX5_NIC_IFC_DISABLED: + mlx5_core_warn(dev, "starting teardown\n"); + break; + + case MLX5_NIC_IFC_NO_DRAM_NIC: + mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n"); + break; + default: + mlx5_core_warn(dev, "Expected to see disabled NIC but it is has invalid value %d\n", + nic_interface); + } + + mlx5_disable_device(dev); +} + static void health_care(struct work_struct *work) { struct mlx5_core_health *health; @@ -67,6 +153,7 @@ static void health_care(struct work_struct *work) priv = container_of(health, struct mlx5_priv, health); dev = container_of(priv, struct mlx5_core_dev, priv); mlx5_core_warn(dev, "handling bad device here\n"); + mlx5_handle_bad_state(dev); } static const char *hsynd_str(u8 synd) @@ -122,6 +209,10 @@ static void print_health_info(struct mlx5_core_dev *dev) u32 fw; int i; + /* If the syndrom is 0, the device is OK and no need to print buffer */ + if (!ioread8(&h->synd)) + return; + for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) dev_err(&dev->pdev->dev, "assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i)); @@ -136,13 +227,29 @@ static void print_health_info(struct mlx5_core_dev *dev) dev_err(&dev->pdev->dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd)); } +static unsigned long get_next_poll_jiffies(void) +{ + unsigned long next; + + get_random_bytes(&next, sizeof(next)); + next %= HZ; + next += jiffies + MLX5_HEALTH_POLL_INTERVAL; + + return next; +} + static void poll_health(unsigned long data) { struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data; struct mlx5_core_health *health = &dev->priv.health; - unsigned long next; u32 count; + if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { + trigger_cmd_completions(dev); + mod_timer(&health->timer, get_next_poll_jiffies()); + return; + } + count = ioread32be(health->health_counter); if (count == health->prev) ++health->miss_counter; @@ -151,14 +258,16 @@ static void poll_health(unsigned long data) health->prev = count; if (health->miss_counter == MAX_MISSES) { - mlx5_core_err(dev, "device's health compromised\n"); + dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n"); print_health_info(dev); - queue_work(health->wq, &health->work); } else { - get_random_bytes(&next, sizeof(next)); - next %= HZ; - next += jiffies + MLX5_HEALTH_POLL_INTERVAL; - mod_timer(&health->timer, next); + mod_timer(&health->timer, get_next_poll_jiffies()); + } + + if (in_fatal(dev) && !health->sick) { + health->sick = true; + print_health_info(dev); + queue_work(health->wq, &health->work); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index b6edc58766ad..2388aec208fa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -39,12 +39,14 @@ #include <linux/slab.h> #include <linux/io-mapping.h> #include <linux/interrupt.h> +#include <linux/delay.h> #include <linux/mlx5/driver.h> #include <linux/mlx5/cq.h> #include <linux/mlx5/qp.h> #include <linux/mlx5/srq.h> #include <linux/debugfs.h> #include <linux/kmod.h> +#include <linux/delay.h> #include <linux/mlx5/mlx5_ifc.h> #include "mlx5_core.h" @@ -151,6 +153,25 @@ static struct mlx5_profile profile[] = { }, }; +#define FW_INIT_TIMEOUT_MILI 2000 +#define FW_INIT_WAIT_MS 2 + +static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili) +{ + unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili); + int err = 0; + + while (fw_initializing(dev)) { + if (time_after(jiffies, end)) { + err = -EBUSY; + break; + } + msleep(FW_INIT_WAIT_MS); + } + + return err; +} + static int set_dma_caps(struct pci_dev *pdev) { int err; @@ -181,6 +202,34 @@ static int set_dma_caps(struct pci_dev *pdev) return err; } +static int mlx5_pci_enable_device(struct mlx5_core_dev *dev) +{ + struct pci_dev *pdev = dev->pdev; + int err = 0; + + mutex_lock(&dev->pci_status_mutex); + if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) { + err = pci_enable_device(pdev); + if (!err) + dev->pci_status = MLX5_PCI_STATUS_ENABLED; + } + mutex_unlock(&dev->pci_status_mutex); + + return err; +} + +static void mlx5_pci_disable_device(struct mlx5_core_dev *dev) +{ + struct pci_dev *pdev = dev->pdev; + + mutex_lock(&dev->pci_status_mutex); + if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) { + pci_disable_device(pdev); + dev->pci_status = MLX5_PCI_STATUS_DISABLED; + } + mutex_unlock(&dev->pci_status_mutex); +} + static int request_bar(struct pci_dev *pdev) { int err = 0; @@ -807,7 +856,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) if (!priv->dbg_root) return -ENOMEM; - err = pci_enable_device(pdev); + err = mlx5_pci_enable_device(dev); if (err) { dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); goto err_dbg; @@ -841,7 +890,7 @@ err_clr_master: pci_clear_master(dev->pdev); release_bar(dev->pdev); err_disable: - pci_disable_device(dev->pdev); + mlx5_pci_disable_device(dev); err_dbg: debugfs_remove(priv->dbg_root); @@ -853,7 +902,7 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv) iounmap(dev->iseg); pci_clear_master(dev->pdev); release_bar(dev->pdev); - pci_disable_device(dev->pdev); + mlx5_pci_disable_device(dev); debugfs_remove(priv->dbg_root); } @@ -863,13 +912,32 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) struct pci_dev *pdev = dev->pdev; int err; + mutex_lock(&dev->intf_state_mutex); + if (dev->interface_state == MLX5_INTERFACE_STATE_UP) { + dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n", + __func__); + goto out; + } + dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); + /* on load removing any previous indication of internal error, device is + * up + */ + dev->state = MLX5_DEVICE_STATE_UP; + err = mlx5_cmd_init(dev); if (err) { dev_err(&pdev->dev, "Failed initializing command interface, aborting\n"); - return err; + goto out_err; + } + + err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI); + if (err) { + dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n", + FW_INIT_TIMEOUT_MILI); + goto out_err; } mlx5_pagealloc_init(dev); @@ -994,6 +1062,10 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) if (err) pr_info("failed request module on %s\n", MLX5_IB_MOD); + dev->interface_state = MLX5_INTERFACE_STATE_UP; +out: + mutex_unlock(&dev->intf_state_mutex); + return 0; err_reg_dev: @@ -1024,7 +1096,7 @@ err_stop_poll: mlx5_stop_health_poll(dev); if (mlx5_cmd_teardown_hca(dev)) { dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); - return err; + goto out_err; } err_pagealloc_stop: @@ -1040,13 +1112,23 @@ err_pagealloc_cleanup: mlx5_pagealloc_cleanup(dev); mlx5_cmd_cleanup(dev); +out_err: + dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; + mutex_unlock(&dev->intf_state_mutex); + return err; } static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) { - int err; + int err = 0; + mutex_lock(&dev->intf_state_mutex); + if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) { + dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", + __func__); + goto out; + } mlx5_unregister_device(dev); mlx5_cleanup_mr_table(dev); mlx5_cleanup_srq_table(dev); @@ -1072,10 +1154,12 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) mlx5_cmd_cleanup(dev); out: + dev->interface_state = MLX5_INTERFACE_STATE_DOWN; + mutex_unlock(&dev->intf_state_mutex); return err; } -static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, +void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, unsigned long param) { struct mlx5_priv *priv = &dev->priv; @@ -1125,6 +1209,8 @@ static int init_one(struct pci_dev *pdev, INIT_LIST_HEAD(&priv->ctx_list); spin_lock_init(&priv->ctx_lock); + mutex_init(&dev->pci_status_mutex); + mutex_init(&dev->intf_state_mutex); err = mlx5_pci_init(dev, priv); if (err) { dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err); @@ -1172,6 +1258,112 @@ static void remove_one(struct pci_dev *pdev) kfree(dev); } +static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + struct mlx5_priv *priv = &dev->priv; + + dev_info(&pdev->dev, "%s was called\n", __func__); + mlx5_enter_error_state(dev); + mlx5_unload_one(dev, priv); + mlx5_pci_disable_device(dev); + return state == pci_channel_io_perm_failure ? + PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) +{ + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + int err = 0; + + dev_info(&pdev->dev, "%s was called\n", __func__); + + err = mlx5_pci_enable_device(dev); + if (err) { + dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n" + , __func__, err); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; +} + +void mlx5_disable_device(struct mlx5_core_dev *dev) +{ + mlx5_pci_err_detected(dev->pdev, 0); +} + +/* wait for the device to show vital signs. For now we check + * that we can read the device ID and that the health buffer + * shows a non zero value which is different than 0xffffffff + */ +static void wait_vital(struct pci_dev *pdev) +{ + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + struct mlx5_core_health *health = &dev->priv.health; + const int niter = 100; + u32 count; + u16 did; + int i; + + /* Wait for firmware to be ready after reset */ + msleep(1000); + for (i = 0; i < niter; i++) { + if (pci_read_config_word(pdev, 2, &did)) { + dev_warn(&pdev->dev, "failed reading config word\n"); + break; + } + if (did == pdev->device) { + dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i); + break; + } + msleep(50); + } + if (i == niter) + dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__); + + for (i = 0; i < niter; i++) { + count = ioread32be(health->health_counter); + if (count && count != 0xffffffff) { + dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i); + break; + } + msleep(50); + } + + if (i == niter) + dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__); +} + +static void mlx5_pci_resume(struct pci_dev *pdev) +{ + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + struct mlx5_priv *priv = &dev->priv; + int err; + + dev_info(&pdev->dev, "%s was called\n", __func__); + + pci_save_state(pdev); + wait_vital(pdev); + + err = mlx5_load_one(dev, priv); + if (err) + dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n" + , __func__, err); + else + dev_info(&pdev->dev, "%s: device recovered\n", __func__); +} + +static const struct pci_error_handlers mlx5_err_handler = { + .error_detected = mlx5_pci_err_detected, + .slot_reset = mlx5_pci_slot_reset, + .resume = mlx5_pci_resume +}; + static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */ { PCI_VDEVICE(MELLANOX, 0x1012) }, /* Connect-IB VF */ @@ -1188,7 +1380,8 @@ static struct pci_driver mlx5_core_driver = { .name = DRIVER_NAME, .id_table = mlx5_core_pci_table, .probe = init_one, - .remove = remove_one + .remove = remove_one, + .err_handler = &mlx5_err_handler }; static int __init init(void) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 30c0be721b08..cee5b7a839bc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -86,6 +86,10 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev); int mlx5_query_board_id(struct mlx5_core_dev *dev); int mlx5_cmd_init_hca(struct mlx5_core_dev *dev); int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); +void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, + unsigned long param); +void mlx5_enter_error_state(struct mlx5_core_dev *dev); +void mlx5_disable_device(struct mlx5_core_dev *dev); void mlx5e_init(void); void mlx5e_cleanup(void); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 76432a510ac2..1cda5d268ec9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -493,15 +493,20 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) struct fw_page *fwp; struct rb_node *p; int nclaimed = 0; - int err; + int err = 0; do { p = rb_first(&dev->priv.page_root); if (p) { fwp = rb_entry(p, struct fw_page, rb_node); - err = reclaim_pages(dev, fwp->func_id, - optimal_reclaimed_pages(), - &nclaimed); + if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { + free_4k(dev, fwp->addr); + nclaimed = 1; + } else { + err = reclaim_pages(dev, fwp->func_id, + optimal_reclaimed_pages(), + &nclaimed); + } if (err) { mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index ae302614e74b..a87e773e93f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -302,7 +302,7 @@ static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc, u32 in[MLX5_ST_SZ_DW(pvlc_reg)]; memset(in, 0, sizeof(in)); - MLX5_SET(ptys_reg, in, local_port, local_port); + MLX5_SET(pvlc_reg, in, local_port, local_port); return mlx5_core_access_reg(dev, in, sizeof(in), pvlc, pvlc_size, MLX5_REG_PVLC, 0, 0); diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index 2941d9c5ae48..e36e12219c9b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -30,3 +30,14 @@ config MLXSW_SWITCHX2 To compile this driver as a module, choose M here: the module will be called mlxsw_switchx2. + +config MLXSW_SPECTRUM + tristate "Mellanox Technologies Spectrum support" + depends on MLXSW_CORE && NET_SWITCHDEV + default m + ---help--- + This driver supports Mellanox Technologies Spectrum Ethernet + Switch ASICs. + + To compile this driver as a module, choose M here: the + module will be called mlxsw_spectrum. diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 0a05f65ee814..af015818fd19 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -4,3 +4,6 @@ obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o mlxsw_pci-objs := pci.o obj-$(CONFIG_MLXSW_SWITCHX2) += mlxsw_switchx2.o mlxsw_switchx2-objs := switchx2.o +obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o +mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ + spectrum_switchdev.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h index 770db17eb03f..cd63b8263688 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h +++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h @@ -464,6 +464,8 @@ MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_rq, 0x10, 0, 8); * passed in this command must be pinned. */ +#define MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX 32 + static inline int mlxsw_cmd_map_fa(struct mlxsw_core *mlxsw_core, char *in_mbox, u32 vpm_entries_count) { @@ -568,7 +570,7 @@ MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vlan_groups, 0x0C, 6, 1); */ MLXSW_ITEM32(cmd_mbox, config_profile, set_max_regions, 0x0C, 7, 1); -/* cmd_mbox_config_profile_set_fid_based +/* cmd_mbox_config_profile_set_flood_mode * Capability bit. Setting a bit to 1 configures the profile * according to the mailbox contents. */ @@ -649,12 +651,8 @@ MLXSW_ITEM32(cmd_mbox, config_profile, max_vlan_groups, 0x28, 0, 12); MLXSW_ITEM32(cmd_mbox, config_profile, max_regions, 0x2C, 0, 16); /* cmd_mbox_config_profile_max_flood_tables - * Maximum number of Flooding Tables. Flooding Tables are associated to - * the different packet types for the different switch partitions. - * Note that the table size depends on the fid_based mode. - * In SwitchX silicon, tables are split equally between the switch - * partitions. e.g. for 2 swids and 8 tables, the first 4 are associated - * with swid-1 and the last 4 are associated with swid-2. + * Maximum number of single-entry flooding tables. Different flooding tables + * can be associated with different packet types. */ MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4); @@ -665,15 +663,42 @@ MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4); */ MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4); -/* cmd_mbox_config_profile_fid_based - * FID Based Flood Mode - * 00 Do not use FID to offset the index into the Port Group Table/Multicast ID - * 01 Use FID to offset the index to the Port Group Table (pgi) - * 10 Use FID to offset the index to the Port Group Table (pgi) and - * the Multicast ID +/* cmd_mbox_config_profile_flood_mode + * Flooding mode to use. + * 0-2 - Backward compatible modes for SwitchX devices. + * 3 - Mixed mode, where: + * max_flood_tables indicates the number of single-entry tables. + * max_vid_flood_tables indicates the number of per-VID tables. + * max_fid_offset_flood_tables indicates the number of FID-offset tables. + * max_fid_flood_tables indicates the number of per-FID tables. */ MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 2); +/* cmd_mbox_config_profile_max_fid_offset_flood_tables + * Maximum number of FID-offset flooding tables. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, + max_fid_offset_flood_tables, 0x34, 24, 4); + +/* cmd_mbox_config_profile_fid_offset_flood_table_size + * The size (number of entries) of each FID-offset flood table. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, + fid_offset_flood_table_size, 0x34, 0, 16); + +/* cmd_mbox_config_profile_max_fid_flood_tables + * Maximum number of per-FID flooding tables. + * + * Note: This flooding tables cover special FIDs only (vFIDs), starting at + * FID value 4K and higher. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, max_fid_flood_tables, 0x38, 24, 4); + +/* cmd_mbox_config_profile_fid_flood_table_size + * The size (number of entries) of each per-FID table. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, fid_flood_table_size, 0x38, 0, 16); + /* cmd_mbox_config_profile_max_ib_mc * Maximum number of multicast FDB records for InfiniBand * FDB (in 512 chunks) per InfiniBand switch partition. diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index dbcaf5df8967..bd80ac714a8a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -374,26 +374,31 @@ static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, int err; int ret; + mlxsw_core->emad.trans_active = true; + err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info); if (err) { dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n", mlxsw_core->emad.tid); dev_kfree_skb(skb); - return err; + goto trans_inactive_out; } - mlxsw_core->emad.trans_active = true; ret = wait_event_timeout(mlxsw_core->emad.wait, !(mlxsw_core->emad.trans_active), msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS)); if (!ret) { dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n", mlxsw_core->emad.tid); - mlxsw_core->emad.trans_active = false; - return -EIO; + err = -EIO; + goto trans_inactive_out; } return 0; + +trans_inactive_out: + mlxsw_core->emad.trans_active = false; + return err; } static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core, @@ -506,7 +511,6 @@ static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core) return err; mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, - MLXSW_REG_HTGT_TRAP_GROUP_EMAD, MLXSW_TRAP_ID_ETHEMAD); return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); } @@ -551,8 +555,8 @@ static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core) { char hpkt_pl[MLXSW_REG_HPKT_LEN]; + mlxsw_core->emad.use_emad = false; mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD, - MLXSW_REG_HTGT_TRAP_GROUP_EMAD, MLXSW_TRAP_ID_ETHEMAD); mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 165808471188..807827350a89 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -54,6 +54,7 @@ MODULE_ALIAS(MLXSW_MODULE_ALIAS_PREFIX kind) #define MLXSW_DEVICE_KIND_SWITCHX2 "switchx2" +#define MLXSW_DEVICE_KIND_SPECTRUM "spectrum" struct mlxsw_core; struct mlxsw_driver; @@ -153,6 +154,10 @@ struct mlxsw_config_profile { u8 max_flood_tables; u8 max_vid_flood_tables; u8 flood_mode; + u8 max_fid_offset_flood_tables; + u16 fid_offset_flood_table_size; + u8 max_fid_flood_tables; + u16 fid_flood_table_size; u16 max_ib_mc; u16 max_pkey; u8 ar_sec; diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h index ffd55d030ce2..a94dbda6590b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/item.h +++ b/drivers/net/ethernet/mellanox/mlxsw/item.h @@ -171,15 +171,21 @@ static inline void __mlxsw_item_set64(char *buf, struct mlxsw_item *item, } static inline void __mlxsw_item_memcpy_from(char *buf, char *dst, - struct mlxsw_item *item) + struct mlxsw_item *item, + unsigned short index) { - memcpy(dst, &buf[item->offset], item->size.bytes); + unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char)); + + memcpy(dst, &buf[offset], item->size.bytes); } -static inline void __mlxsw_item_memcpy_to(char *buf, char *src, - struct mlxsw_item *item) +static inline void __mlxsw_item_memcpy_to(char *buf, const char *src, + struct mlxsw_item *item, + unsigned short index) { - memcpy(&buf[item->offset], src, item->size.bytes); + unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char)); + + memcpy(&buf[offset], src, item->size.bytes); } static inline u16 @@ -187,6 +193,7 @@ __mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift) { u16 max_index, be_index; u16 offset; /* byte offset inside the array */ + u8 in_byte_index; BUG_ON(index && !item->element_size); if (item->offset % sizeof(u32) != 0 || @@ -199,7 +206,8 @@ __mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift) max_index = (item->size.bytes << 3) / item->element_size - 1; be_index = max_index - index; offset = be_index * item->element_size >> 3; - *shift = index % (BITS_PER_BYTE / item->element_size) << 1; + in_byte_index = index % (BITS_PER_BYTE / item->element_size); + *shift = in_byte_index * item->element_size; return item->offset + offset; } @@ -371,12 +379,40 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ static inline void \ mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, char *dst) \ { \ - __mlxsw_item_memcpy_from(buf, dst, &__ITEM_NAME(_type, _cname, _iname));\ + __mlxsw_item_memcpy_from(buf, dst, \ + &__ITEM_NAME(_type, _cname, _iname), 0); \ +} \ +static inline void \ +mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src) \ +{ \ + __mlxsw_item_memcpy_to(buf, src, \ + &__ITEM_NAME(_type, _cname, _iname), 0); \ +} + +#define MLXSW_ITEM_BUF_INDEXED(_type, _cname, _iname, _offset, _sizebytes, \ + _step, _instepoffset) \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ + .offset = _offset, \ + .step = _step, \ + .in_step_offset = _instepoffset, \ + .size = {.bytes = _sizebytes,}, \ + .name = #_type "_" #_cname "_" #_iname, \ +}; \ +static inline void \ +mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, \ + unsigned short index, \ + char *dst) \ +{ \ + __mlxsw_item_memcpy_from(buf, dst, \ + &__ITEM_NAME(_type, _cname, _iname), index); \ } \ static inline void \ -mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, char *src) \ +mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \ + unsigned short index, \ + const char *src) \ { \ - __mlxsw_item_memcpy_to(buf, src, &__ITEM_NAME(_type, _cname, _iname)); \ + __mlxsw_item_memcpy_to(buf, src, \ + &__ITEM_NAME(_type, _cname, _iname), index); \ } #define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes, \ diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 462cea31ecbb..371ea3f56aed 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -57,6 +57,7 @@ static const char mlxsw_pci_driver_name[] = "mlxsw_pci"; static const struct pci_device_id mlxsw_pci_id_table[] = { {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0}, + {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, {0, } }; @@ -67,6 +68,8 @@ static const char *mlxsw_pci_device_kind_get(const struct pci_device_id *id) switch (id->device) { case PCI_DEVICE_ID_MELLANOX_SWITCHX2: return MLXSW_DEVICE_KIND_SWITCHX2; + case PCI_DEVICE_ID_MELLANOX_SPECTRUM: + return MLXSW_DEVICE_KIND_SPECTRUM; default: BUG(); } @@ -171,8 +174,8 @@ struct mlxsw_pci { struct msix_entry msix_entry; struct mlxsw_core *core; struct { - u16 num_pages; struct mlxsw_pci_mem_item *items; + unsigned int count; } fw_area; struct { struct mlxsw_pci_mem_item out_mbox; @@ -431,8 +434,7 @@ static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe, mapaddr = pci_map_single(pdev, frag_data, frag_len, direction); if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) { - if (net_ratelimit()) - dev_err(&pdev->dev, "failed to dma map tx frag\n"); + dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n"); return -EIO; } mlxsw_pci_wqe_address_set(wqe, index, mapaddr); @@ -497,6 +499,7 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, struct mlxsw_pci_queue *q) { struct mlxsw_pci_queue_elem_info *elem_info; + u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci); int i; int err; @@ -504,9 +507,9 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, q->consumer_counter = 0; /* Set CQ of same number of this RDQ with base - * above MLXSW_PCI_SDQS_MAX as the lower ones are assigned to SDQs. + * above SDQ count as the lower ones are assigned to SDQs. */ - mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num + MLXSW_PCI_SDQS_COUNT); + mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num); mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); @@ -699,8 +702,8 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, put_new_skb: memset(wqe, 0, q->elem_size); err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info); - if (err && net_ratelimit()) - dev_dbg(&pdev->dev, "Failed to alloc skb for RDQ\n"); + if (err) + dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n"); /* Everything is set up, ring doorbell to pass elem to HW */ q->producer_counter++; mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); @@ -830,7 +833,8 @@ static void mlxsw_pci_eq_tasklet(unsigned long data) { struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data; struct mlxsw_pci *mlxsw_pci = q->pci; - unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_COUNT)]; + u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci); + unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)]; char *eqe; u8 cqn; bool cq_handle = false; @@ -866,7 +870,7 @@ static void mlxsw_pci_eq_tasklet(unsigned long data) if (!cq_handle) return; - for_each_set_bit(cqn, active_cqns, MLXSW_PCI_CQS_COUNT) { + for_each_set_bit(cqn, active_cqns, cq_count) { q = mlxsw_pci_cq_get(mlxsw_pci, cqn); mlxsw_pci_queue_tasklet_schedule(q); } @@ -1067,10 +1071,8 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox) num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox); eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox); - if ((num_sdqs != MLXSW_PCI_SDQS_COUNT) || - (num_rdqs != MLXSW_PCI_RDQS_COUNT) || - (num_cqs != MLXSW_PCI_CQS_COUNT) || - (num_eqs != MLXSW_PCI_EQS_COUNT)) { + if (num_sdqs + num_rdqs > num_cqs || + num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) { dev_err(&pdev->dev, "Unsupported number of queues\n"); return -EINVAL; } @@ -1215,6 +1217,14 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox, mbox, profile->max_flood_tables); mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set( mbox, profile->max_vid_flood_tables); + mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set( + mbox, profile->max_fid_offset_flood_tables); + mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set( + mbox, profile->fid_offset_flood_table_size); + mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set( + mbox, profile->max_fid_flood_tables); + mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set( + mbox, profile->fid_flood_table_size); } if (profile->used_flood_mode) { mlxsw_cmd_mbox_config_profile_set_flood_mode_set( @@ -1272,6 +1282,7 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox, u16 num_pages) { struct mlxsw_pci_mem_item *mem_item; + int nent = 0; int i; int err; @@ -1279,7 +1290,7 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox, GFP_KERNEL); if (!mlxsw_pci->fw_area.items) return -ENOMEM; - mlxsw_pci->fw_area.num_pages = num_pages; + mlxsw_pci->fw_area.count = num_pages; mlxsw_cmd_mbox_zero(mbox); for (i = 0; i < num_pages; i++) { @@ -1293,13 +1304,22 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox, err = -ENOMEM; goto err_alloc; } - mlxsw_cmd_mbox_map_fa_pa_set(mbox, i, mem_item->mapaddr); - mlxsw_cmd_mbox_map_fa_log2size_set(mbox, i, 0); /* 1 page */ + mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr); + mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */ + if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) { + err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent); + if (err) + goto err_cmd_map_fa; + nent = 0; + mlxsw_cmd_mbox_zero(mbox); + } } - err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, num_pages); - if (err) - goto err_cmd_map_fa; + if (nent) { + err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent); + if (err) + goto err_cmd_map_fa; + } return 0; @@ -1322,7 +1342,7 @@ static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci) mlxsw_cmd_unmap_fa(mlxsw_pci->core); - for (i = 0; i < mlxsw_pci->fw_area.num_pages; i++) { + for (i = 0; i < mlxsw_pci->fw_area.count; i++) { mem_item = &mlxsw_pci->fw_area.items[i]; pci_free_consistent(mlxsw_pci->pdev, mem_item->size, @@ -1582,11 +1602,11 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, if (in_mbox) memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size); - mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, in_mapaddr >> 32); - mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, in_mapaddr); + mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr)); + mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr)); - mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, out_mapaddr >> 32); - mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, out_mapaddr); + mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr)); + mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr)); mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod); mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h index 1ef9664b4512..142f33d978c5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h @@ -40,6 +40,7 @@ #include "item.h" #define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738 +#define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84 #define MLXSW_PCI_BAR0_SIZE (1024 * 1024) /* 1MB */ #define MLXSW_PCI_PAGE_SIZE 4096 @@ -71,9 +72,7 @@ #define MLXSW_PCI_DOORBELL(offset, type_offset, num) \ ((offset) + (type_offset) + (num) * 4) -#define MLXSW_PCI_RDQS_COUNT 24 -#define MLXSW_PCI_SDQS_COUNT 24 -#define MLXSW_PCI_CQS_COUNT (MLXSW_PCI_RDQS_COUNT + MLXSW_PCI_SDQS_COUNT) +#define MLXSW_PCI_CQS_MAX 96 #define MLXSW_PCI_EQS_COUNT 2 #define MLXSW_PCI_EQ_ASYNC_NUM 0 #define MLXSW_PCI_EQ_COMP_NUM 1 diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 096e1c12175a..4fcba46bbae0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -99,57 +99,6 @@ static const struct mlxsw_reg_info mlxsw_reg_spad = { */ MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6); -/* SMID - Switch Multicast ID - * -------------------------- - * In multi-chip configuration, each device should maintain mapping between - * Multicast ID (MID) into a list of local ports. This mapping is used in all - * the devices other than the ingress device, and is implemented as part of the - * FDB. The MID record maps from a MID, which is a unique identi- fier of the - * multicast group within the stacking domain, into a list of local ports into - * which the packet is replicated. - */ -#define MLXSW_REG_SMID_ID 0x2007 -#define MLXSW_REG_SMID_LEN 0x420 - -static const struct mlxsw_reg_info mlxsw_reg_smid = { - .id = MLXSW_REG_SMID_ID, - .len = MLXSW_REG_SMID_LEN, -}; - -/* reg_smid_swid - * Switch partition ID. - * Access: Index - */ -MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8); - -/* reg_smid_mid - * Multicast identifier - global identifier that represents the multicast group - * across all devices - * Access: Index - */ -MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16); - -/* reg_smid_port - * Local port memebership (1 bit per port). - * Access: RW - */ -MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1); - -/* reg_smid_port_mask - * Local port mask (1 bit per port). - * Access: W - */ -MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1); - -static inline void mlxsw_reg_smid_pack(char *payload, u16 mid) -{ - MLXSW_REG_ZERO(smid, payload); - mlxsw_reg_smid_swid_set(payload, 0); - mlxsw_reg_smid_mid_set(payload, mid); - mlxsw_reg_smid_port_set(payload, MLXSW_PORT_CPU_PORT, 1); - mlxsw_reg_smid_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1); -} - /* SSPR - Switch System Port Record Register * ----------------------------------------- * Configures the system port to local port mapping. @@ -208,11 +157,359 @@ static inline void mlxsw_reg_sspr_pack(char *payload, u8 local_port) mlxsw_reg_sspr_system_port_set(payload, local_port); } +/* SFDAT - Switch Filtering Database Aging Time + * -------------------------------------------- + * Controls the Switch aging time. Aging time is able to be set per Switch + * Partition. + */ +#define MLXSW_REG_SFDAT_ID 0x2009 +#define MLXSW_REG_SFDAT_LEN 0x8 + +static const struct mlxsw_reg_info mlxsw_reg_sfdat = { + .id = MLXSW_REG_SFDAT_ID, + .len = MLXSW_REG_SFDAT_LEN, +}; + +/* reg_sfdat_swid + * Switch partition ID. + * Access: Index + */ +MLXSW_ITEM32(reg, sfdat, swid, 0x00, 24, 8); + +/* reg_sfdat_age_time + * Aging time in seconds + * Min - 10 seconds + * Max - 1,000,000 seconds + * Default is 300 seconds. + * Access: RW + */ +MLXSW_ITEM32(reg, sfdat, age_time, 0x04, 0, 20); + +static inline void mlxsw_reg_sfdat_pack(char *payload, u32 age_time) +{ + MLXSW_REG_ZERO(sfdat, payload); + mlxsw_reg_sfdat_swid_set(payload, 0); + mlxsw_reg_sfdat_age_time_set(payload, age_time); +} + +/* SFD - Switch Filtering Database + * ------------------------------- + * The following register defines the access to the filtering database. + * The register supports querying, adding, removing and modifying the database. + * The access is optimized for bulk updates in which case more than one + * FDB record is present in the same command. + */ +#define MLXSW_REG_SFD_ID 0x200A +#define MLXSW_REG_SFD_BASE_LEN 0x10 /* base length, without records */ +#define MLXSW_REG_SFD_REC_LEN 0x10 /* record length */ +#define MLXSW_REG_SFD_REC_MAX_COUNT 64 +#define MLXSW_REG_SFD_LEN (MLXSW_REG_SFD_BASE_LEN + \ + MLXSW_REG_SFD_REC_LEN * MLXSW_REG_SFD_REC_MAX_COUNT) + +static const struct mlxsw_reg_info mlxsw_reg_sfd = { + .id = MLXSW_REG_SFD_ID, + .len = MLXSW_REG_SFD_LEN, +}; + +/* reg_sfd_swid + * Switch partition ID for queries. Reserved on Write. + * Access: Index + */ +MLXSW_ITEM32(reg, sfd, swid, 0x00, 24, 8); + +enum mlxsw_reg_sfd_op { + /* Dump entire FDB a (process according to record_locator) */ + MLXSW_REG_SFD_OP_QUERY_DUMP = 0, + /* Query records by {MAC, VID/FID} value */ + MLXSW_REG_SFD_OP_QUERY_QUERY = 1, + /* Query and clear activity. Query records by {MAC, VID/FID} value */ + MLXSW_REG_SFD_OP_QUERY_QUERY_AND_CLEAR_ACTIVITY = 2, + /* Test. Response indicates if each of the records could be + * added to the FDB. + */ + MLXSW_REG_SFD_OP_WRITE_TEST = 0, + /* Add/modify. Aged-out records cannot be added. This command removes + * the learning notification of the {MAC, VID/FID}. Response includes + * the entries that were added to the FDB. + */ + MLXSW_REG_SFD_OP_WRITE_EDIT = 1, + /* Remove record by {MAC, VID/FID}. This command also removes + * the learning notification and aged-out notifications + * of the {MAC, VID/FID}. The response provides current (pre-removal) + * entries as non-aged-out. + */ + MLXSW_REG_SFD_OP_WRITE_REMOVE = 2, + /* Remove learned notification by {MAC, VID/FID}. The response provides + * the removed learning notification. + */ + MLXSW_REG_SFD_OP_WRITE_REMOVE_NOTIFICATION = 2, +}; + +/* reg_sfd_op + * Operation. + * Access: OP + */ +MLXSW_ITEM32(reg, sfd, op, 0x04, 30, 2); + +/* reg_sfd_record_locator + * Used for querying the FDB. Use record_locator=0 to initiate the + * query. When a record is returned, a new record_locator is + * returned to be used in the subsequent query. + * Reserved for database update. + * Access: Index + */ +MLXSW_ITEM32(reg, sfd, record_locator, 0x04, 0, 30); + +/* reg_sfd_num_rec + * Request: Number of records to read/add/modify/remove + * Response: Number of records read/added/replaced/removed + * See above description for more details. + * Ranges 0..64 + * Access: RW + */ +MLXSW_ITEM32(reg, sfd, num_rec, 0x08, 0, 8); + +static inline void mlxsw_reg_sfd_pack(char *payload, enum mlxsw_reg_sfd_op op, + u32 record_locator) +{ + MLXSW_REG_ZERO(sfd, payload); + mlxsw_reg_sfd_op_set(payload, op); + mlxsw_reg_sfd_record_locator_set(payload, record_locator); +} + +/* reg_sfd_rec_swid + * Switch partition ID. + * Access: Index + */ +MLXSW_ITEM32_INDEXED(reg, sfd, rec_swid, MLXSW_REG_SFD_BASE_LEN, 24, 8, + MLXSW_REG_SFD_REC_LEN, 0x00, false); + +enum mlxsw_reg_sfd_rec_type { + MLXSW_REG_SFD_REC_TYPE_UNICAST = 0x0, +}; + +/* reg_sfd_rec_type + * FDB record type. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, rec_type, MLXSW_REG_SFD_BASE_LEN, 20, 4, + MLXSW_REG_SFD_REC_LEN, 0x00, false); + +enum mlxsw_reg_sfd_rec_policy { + /* Replacement disabled, aging disabled. */ + MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY = 0, + /* (mlag remote): Replacement enabled, aging disabled, + * learning notification enabled on this port. + */ + MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG = 1, + /* (ingress device): Replacement enabled, aging enabled. */ + MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS = 3, +}; + +/* reg_sfd_rec_policy + * Policy. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, rec_policy, MLXSW_REG_SFD_BASE_LEN, 18, 2, + MLXSW_REG_SFD_REC_LEN, 0x00, false); + +/* reg_sfd_rec_a + * Activity. Set for new static entries. Set for static entries if a frame SMAC + * lookup hits on the entry. + * To clear the a bit, use "query and clear activity" op. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, sfd, rec_a, MLXSW_REG_SFD_BASE_LEN, 16, 1, + MLXSW_REG_SFD_REC_LEN, 0x00, false); + +/* reg_sfd_rec_mac + * MAC address. + * Access: Index + */ +MLXSW_ITEM_BUF_INDEXED(reg, sfd, rec_mac, MLXSW_REG_SFD_BASE_LEN, 6, + MLXSW_REG_SFD_REC_LEN, 0x02); + +enum mlxsw_reg_sfd_rec_action { + /* forward */ + MLXSW_REG_SFD_REC_ACTION_NOP = 0, + /* forward and trap, trap_id is FDB_TRAP */ + MLXSW_REG_SFD_REC_ACTION_MIRROR_TO_CPU = 1, + /* trap and do not forward, trap_id is FDB_TRAP */ + MLXSW_REG_SFD_REC_ACTION_TRAP = 3, + MLXSW_REG_SFD_REC_ACTION_DISCARD_ERROR = 15, +}; + +/* reg_sfd_rec_action + * Action to apply on the packet. + * Note: Dynamic entries can only be configured with NOP action. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, rec_action, MLXSW_REG_SFD_BASE_LEN, 28, 4, + MLXSW_REG_SFD_REC_LEN, 0x0C, false); + +/* reg_sfd_uc_sub_port + * LAG sub port. + * Must be 0 if multichannel VEPA is not enabled. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8, + MLXSW_REG_SFD_REC_LEN, 0x08, false); + +/* reg_sfd_uc_fid_vid + * Filtering ID or VLAN ID + * For SwitchX and SwitchX-2: + * - Dynamic entries (policy 2,3) use FID + * - Static entries (policy 0) use VID + * - When independent learning is configured, VID=FID + * For Spectrum: use FID for both Dynamic and Static entries. + * VID should not be used. + * Access: Index + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16, + MLXSW_REG_SFD_REC_LEN, 0x08, false); + +/* reg_sfd_uc_system_port + * Unique port identifier for the final destination of the packet. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_system_port, MLXSW_REG_SFD_BASE_LEN, 0, 16, + MLXSW_REG_SFD_REC_LEN, 0x0C, false); + +static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index, + enum mlxsw_reg_sfd_rec_policy policy, + const char *mac, u16 vid, + enum mlxsw_reg_sfd_rec_action action, + u8 local_port) +{ + u8 num_rec = mlxsw_reg_sfd_num_rec_get(payload); + + if (rec_index >= num_rec) + mlxsw_reg_sfd_num_rec_set(payload, rec_index + 1); + mlxsw_reg_sfd_rec_swid_set(payload, rec_index, 0); + mlxsw_reg_sfd_rec_type_set(payload, rec_index, + MLXSW_REG_SFD_REC_TYPE_UNICAST); + mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy); + mlxsw_reg_sfd_rec_mac_memcpy_to(payload, rec_index, mac); + mlxsw_reg_sfd_uc_sub_port_set(payload, rec_index, 0); + mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, vid); + mlxsw_reg_sfd_rec_action_set(payload, rec_index, action); + mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port); +} + +static inline void +mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index, + char *mac, u16 *p_vid, + u8 *p_local_port) +{ + mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac); + *p_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index); + *p_local_port = mlxsw_reg_sfd_uc_system_port_get(payload, rec_index); +} + +/* SFN - Switch FDB Notification Register + * ------------------------------------------- + * The switch provides notifications on newly learned FDB entries and + * aged out entries. The notifications can be polled by software. + */ +#define MLXSW_REG_SFN_ID 0x200B +#define MLXSW_REG_SFN_BASE_LEN 0x10 /* base length, without records */ +#define MLXSW_REG_SFN_REC_LEN 0x10 /* record length */ +#define MLXSW_REG_SFN_REC_MAX_COUNT 64 +#define MLXSW_REG_SFN_LEN (MLXSW_REG_SFN_BASE_LEN + \ + MLXSW_REG_SFN_REC_LEN * MLXSW_REG_SFN_REC_MAX_COUNT) + +static const struct mlxsw_reg_info mlxsw_reg_sfn = { + .id = MLXSW_REG_SFN_ID, + .len = MLXSW_REG_SFN_LEN, +}; + +/* reg_sfn_swid + * Switch partition ID. + * Access: Index + */ +MLXSW_ITEM32(reg, sfn, swid, 0x00, 24, 8); + +/* reg_sfn_num_rec + * Request: Number of learned notifications and aged-out notification + * records requested. + * Response: Number of notification records returned (must be smaller + * than or equal to the value requested) + * Ranges 0..64 + * Access: OP + */ +MLXSW_ITEM32(reg, sfn, num_rec, 0x04, 0, 8); + +static inline void mlxsw_reg_sfn_pack(char *payload) +{ + MLXSW_REG_ZERO(sfn, payload); + mlxsw_reg_sfn_swid_set(payload, 0); + mlxsw_reg_sfn_num_rec_set(payload, MLXSW_REG_SFN_REC_MAX_COUNT); +} + +/* reg_sfn_rec_swid + * Switch partition ID. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, sfn, rec_swid, MLXSW_REG_SFN_BASE_LEN, 24, 8, + MLXSW_REG_SFN_REC_LEN, 0x00, false); + +enum mlxsw_reg_sfn_rec_type { + /* MAC addresses learned on a regular port. */ + MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC = 0x5, + /* Aged-out MAC address on a regular port */ + MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC = 0x7, +}; + +/* reg_sfn_rec_type + * Notification record type. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, sfn, rec_type, MLXSW_REG_SFN_BASE_LEN, 20, 4, + MLXSW_REG_SFN_REC_LEN, 0x00, false); + +/* reg_sfn_rec_mac + * MAC address. + * Access: RO + */ +MLXSW_ITEM_BUF_INDEXED(reg, sfn, rec_mac, MLXSW_REG_SFN_BASE_LEN, 6, + MLXSW_REG_SFN_REC_LEN, 0x02); + +/* reg_sfd_mac_sub_port + * VEPA channel on the local port. + * 0 if multichannel VEPA is not enabled. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, sfn, mac_sub_port, MLXSW_REG_SFN_BASE_LEN, 16, 8, + MLXSW_REG_SFN_REC_LEN, 0x08, false); + +/* reg_sfd_mac_fid + * Filtering identifier. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, sfn, mac_fid, MLXSW_REG_SFN_BASE_LEN, 0, 16, + MLXSW_REG_SFN_REC_LEN, 0x08, false); + +/* reg_sfd_mac_system_port + * Unique port identifier for the final destination of the packet. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, sfn, mac_system_port, MLXSW_REG_SFN_BASE_LEN, 0, 16, + MLXSW_REG_SFN_REC_LEN, 0x0C, false); + +static inline void mlxsw_reg_sfn_mac_unpack(char *payload, int rec_index, + char *mac, u16 *p_vid, + u8 *p_local_port) +{ + mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac); + *p_vid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index); + *p_local_port = mlxsw_reg_sfn_mac_system_port_get(payload, rec_index); +} + /* SPMS - Switch Port MSTP/RSTP State Register * ------------------------------------------- * Configures the spanning tree state of a physical port. */ -#define MLXSW_REG_SPMS_ID 0x200d +#define MLXSW_REG_SPMS_ID 0x200D #define MLXSW_REG_SPMS_LEN 0x404 static const struct mlxsw_reg_info mlxsw_reg_spms = { @@ -243,20 +540,166 @@ enum mlxsw_reg_spms_state { */ MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2); -static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port, u16 vid, - enum mlxsw_reg_spms_state state) +static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port) { MLXSW_REG_ZERO(spms, payload); mlxsw_reg_spms_local_port_set(payload, local_port); +} + +static inline void mlxsw_reg_spms_vid_pack(char *payload, u16 vid, + enum mlxsw_reg_spms_state state) +{ mlxsw_reg_spms_state_set(payload, vid, state); } +/* SPVID - Switch Port VID + * ----------------------- + * The switch port VID configures the default VID for a port. + */ +#define MLXSW_REG_SPVID_ID 0x200E +#define MLXSW_REG_SPVID_LEN 0x08 + +static const struct mlxsw_reg_info mlxsw_reg_spvid = { + .id = MLXSW_REG_SPVID_ID, + .len = MLXSW_REG_SPVID_LEN, +}; + +/* reg_spvid_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, spvid, local_port, 0x00, 16, 8); + +/* reg_spvid_sub_port + * Virtual port within the physical port. + * Should be set to 0 when virtual ports are not enabled on the port. + * Access: Index + */ +MLXSW_ITEM32(reg, spvid, sub_port, 0x00, 8, 8); + +/* reg_spvid_pvid + * Port default VID + * Access: RW + */ +MLXSW_ITEM32(reg, spvid, pvid, 0x04, 0, 12); + +static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid) +{ + MLXSW_REG_ZERO(spvid, payload); + mlxsw_reg_spvid_local_port_set(payload, local_port); + mlxsw_reg_spvid_pvid_set(payload, pvid); +} + +/* SPVM - Switch Port VLAN Membership + * ---------------------------------- + * The Switch Port VLAN Membership register configures the VLAN membership + * of a port in a VLAN denoted by VID. VLAN membership is managed per + * virtual port. The register can be used to add and remove VID(s) from a port. + */ +#define MLXSW_REG_SPVM_ID 0x200F +#define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */ +#define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */ +#define MLXSW_REG_SPVM_REC_MAX_COUNT 256 +#define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN + \ + MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT) + +static const struct mlxsw_reg_info mlxsw_reg_spvm = { + .id = MLXSW_REG_SPVM_ID, + .len = MLXSW_REG_SPVM_LEN, +}; + +/* reg_spvm_pt + * Priority tagged. If this bit is set, packets forwarded to the port with + * untagged VLAN membership (u bit is set) will be tagged with priority tag + * (VID=0) + * Access: RW + */ +MLXSW_ITEM32(reg, spvm, pt, 0x00, 31, 1); + +/* reg_spvm_pte + * Priority Tagged Update Enable. On Write operations, if this bit is cleared, + * the pt bit will NOT be updated. To update the pt bit, pte must be set. + * Access: WO + */ +MLXSW_ITEM32(reg, spvm, pte, 0x00, 30, 1); + +/* reg_spvm_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, spvm, local_port, 0x00, 16, 8); + +/* reg_spvm_sub_port + * Virtual port within the physical port. + * Should be set to 0 when virtual ports are not enabled on the port. + * Access: Index + */ +MLXSW_ITEM32(reg, spvm, sub_port, 0x00, 8, 8); + +/* reg_spvm_num_rec + * Number of records to update. Each record contains: i, e, u, vid. + * Access: OP + */ +MLXSW_ITEM32(reg, spvm, num_rec, 0x00, 0, 8); + +/* reg_spvm_rec_i + * Ingress membership in VLAN ID. + * Access: Index + */ +MLXSW_ITEM32_INDEXED(reg, spvm, rec_i, + MLXSW_REG_SPVM_BASE_LEN, 14, 1, + MLXSW_REG_SPVM_REC_LEN, 0, false); + +/* reg_spvm_rec_e + * Egress membership in VLAN ID. + * Access: Index + */ +MLXSW_ITEM32_INDEXED(reg, spvm, rec_e, + MLXSW_REG_SPVM_BASE_LEN, 13, 1, + MLXSW_REG_SPVM_REC_LEN, 0, false); + +/* reg_spvm_rec_u + * Untagged - port is an untagged member - egress transmission uses untagged + * frames on VID<n> + * Access: Index + */ +MLXSW_ITEM32_INDEXED(reg, spvm, rec_u, + MLXSW_REG_SPVM_BASE_LEN, 12, 1, + MLXSW_REG_SPVM_REC_LEN, 0, false); + +/* reg_spvm_rec_vid + * Egress membership in VLAN ID. + * Access: Index + */ +MLXSW_ITEM32_INDEXED(reg, spvm, rec_vid, + MLXSW_REG_SPVM_BASE_LEN, 0, 12, + MLXSW_REG_SPVM_REC_LEN, 0, false); + +static inline void mlxsw_reg_spvm_pack(char *payload, u8 local_port, + u16 vid_begin, u16 vid_end, + bool is_member, bool untagged) +{ + int size = vid_end - vid_begin + 1; + int i; + + MLXSW_REG_ZERO(spvm, payload); + mlxsw_reg_spvm_local_port_set(payload, local_port); + mlxsw_reg_spvm_num_rec_set(payload, size); + + for (i = 0; i < size; i++) { + mlxsw_reg_spvm_rec_i_set(payload, i, is_member); + mlxsw_reg_spvm_rec_e_set(payload, i, is_member); + mlxsw_reg_spvm_rec_u_set(payload, i, untagged); + mlxsw_reg_spvm_rec_vid_set(payload, i, vid_begin + i); + } +} + /* SFGC - Switch Flooding Group Configuration * ------------------------------------------ * The following register controls the association of flooding tables and MIDs * to packet types used for flooding. */ -#define MLXSW_REG_SFGC_ID 0x2011 +#define MLXSW_REG_SFGC_ID 0x2011 #define MLXSW_REG_SFGC_LEN 0x10 static const struct mlxsw_reg_info mlxsw_reg_sfgc = { @@ -265,13 +708,15 @@ static const struct mlxsw_reg_info mlxsw_reg_sfgc = { }; enum mlxsw_reg_sfgc_type { - MLXSW_REG_SFGC_TYPE_BROADCAST = 0, - MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST = 1, - MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4 = 2, - MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6 = 3, - MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP = 5, - MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL = 6, - MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST = 7, + MLXSW_REG_SFGC_TYPE_BROADCAST, + MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST, + MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4, + MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6, + MLXSW_REG_SFGC_TYPE_RESERVED, + MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP, + MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL, + MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST, + MLXSW_REG_SFGC_TYPE_MAX, }; /* reg_sfgc_type @@ -408,7 +853,7 @@ static inline void mlxsw_reg_sftr_pack(char *payload, unsigned int flood_table, unsigned int index, enum mlxsw_flood_table_type table_type, - unsigned int range) + unsigned int range, u8 port, bool set) { MLXSW_REG_ZERO(sftr, payload); mlxsw_reg_sftr_swid_set(payload, 0); @@ -416,8 +861,8 @@ static inline void mlxsw_reg_sftr_pack(char *payload, mlxsw_reg_sftr_index_set(payload, index); mlxsw_reg_sftr_table_type_set(payload, table_type); mlxsw_reg_sftr_range_set(payload, range); - mlxsw_reg_sftr_port_set(payload, MLXSW_PORT_CPU_PORT, 1); - mlxsw_reg_sftr_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1); + mlxsw_reg_sftr_port_set(payload, port, set); + mlxsw_reg_sftr_port_mask_set(payload, port, 1); } /* SPMLR - Switch Port MAC Learning Register @@ -473,6 +918,285 @@ static inline void mlxsw_reg_spmlr_pack(char *payload, u8 local_port, mlxsw_reg_spmlr_learn_mode_set(payload, mode); } +/* SVFA - Switch VID to FID Allocation Register + * -------------------------------------------- + * Controls the VID to FID mapping and {Port, VID} to FID mapping for + * virtualized ports. + */ +#define MLXSW_REG_SVFA_ID 0x201C +#define MLXSW_REG_SVFA_LEN 0x10 + +static const struct mlxsw_reg_info mlxsw_reg_svfa = { + .id = MLXSW_REG_SVFA_ID, + .len = MLXSW_REG_SVFA_LEN, +}; + +/* reg_svfa_swid + * Switch partition ID. + * Access: Index + */ +MLXSW_ITEM32(reg, svfa, swid, 0x00, 24, 8); + +/* reg_svfa_local_port + * Local port number. + * Access: Index + * + * Note: Reserved for 802.1Q FIDs. + */ +MLXSW_ITEM32(reg, svfa, local_port, 0x00, 16, 8); + +enum mlxsw_reg_svfa_mt { + MLXSW_REG_SVFA_MT_VID_TO_FID, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, +}; + +/* reg_svfa_mapping_table + * Mapping table: + * 0 - VID to FID + * 1 - {Port, VID} to FID + * Access: Index + * + * Note: Reserved for SwitchX-2. + */ +MLXSW_ITEM32(reg, svfa, mapping_table, 0x00, 8, 3); + +/* reg_svfa_v + * Valid. + * Valid if set. + * Access: RW + * + * Note: Reserved for SwitchX-2. + */ +MLXSW_ITEM32(reg, svfa, v, 0x00, 0, 1); + +/* reg_svfa_fid + * Filtering ID. + * Access: RW + */ +MLXSW_ITEM32(reg, svfa, fid, 0x04, 16, 16); + +/* reg_svfa_vid + * VLAN ID. + * Access: Index + */ +MLXSW_ITEM32(reg, svfa, vid, 0x04, 0, 12); + +/* reg_svfa_counter_set_type + * Counter set type for flow counters. + * Access: RW + * + * Note: Reserved for SwitchX-2. + */ +MLXSW_ITEM32(reg, svfa, counter_set_type, 0x08, 24, 8); + +/* reg_svfa_counter_index + * Counter index for flow counters. + * Access: RW + * + * Note: Reserved for SwitchX-2. + */ +MLXSW_ITEM32(reg, svfa, counter_index, 0x08, 0, 24); + +static inline void mlxsw_reg_svfa_pack(char *payload, u8 local_port, + enum mlxsw_reg_svfa_mt mt, bool valid, + u16 fid, u16 vid) +{ + MLXSW_REG_ZERO(svfa, payload); + local_port = mt == MLXSW_REG_SVFA_MT_VID_TO_FID ? 0 : local_port; + mlxsw_reg_svfa_swid_set(payload, 0); + mlxsw_reg_svfa_local_port_set(payload, local_port); + mlxsw_reg_svfa_mapping_table_set(payload, mt); + mlxsw_reg_svfa_v_set(payload, valid); + mlxsw_reg_svfa_fid_set(payload, fid); + mlxsw_reg_svfa_vid_set(payload, vid); +} + +/* SVPE - Switch Virtual-Port Enabling Register + * -------------------------------------------- + * Enables port virtualization. + */ +#define MLXSW_REG_SVPE_ID 0x201E +#define MLXSW_REG_SVPE_LEN 0x4 + +static const struct mlxsw_reg_info mlxsw_reg_svpe = { + .id = MLXSW_REG_SVPE_ID, + .len = MLXSW_REG_SVPE_LEN, +}; + +/* reg_svpe_local_port + * Local port number + * Access: Index + * + * Note: CPU port is not supported (uses VLAN mode only). + */ +MLXSW_ITEM32(reg, svpe, local_port, 0x00, 16, 8); + +/* reg_svpe_vp_en + * Virtual port enable. + * 0 - Disable, VLAN mode (VID to FID). + * 1 - Enable, Virtual port mode ({Port, VID} to FID). + * Access: RW + */ +MLXSW_ITEM32(reg, svpe, vp_en, 0x00, 8, 1); + +static inline void mlxsw_reg_svpe_pack(char *payload, u8 local_port, + bool enable) +{ + MLXSW_REG_ZERO(svpe, payload); + mlxsw_reg_svpe_local_port_set(payload, local_port); + mlxsw_reg_svpe_vp_en_set(payload, enable); +} + +/* SFMR - Switch FID Management Register + * ------------------------------------- + * Creates and configures FIDs. + */ +#define MLXSW_REG_SFMR_ID 0x201F +#define MLXSW_REG_SFMR_LEN 0x18 + +static const struct mlxsw_reg_info mlxsw_reg_sfmr = { + .id = MLXSW_REG_SFMR_ID, + .len = MLXSW_REG_SFMR_LEN, +}; + +enum mlxsw_reg_sfmr_op { + MLXSW_REG_SFMR_OP_CREATE_FID, + MLXSW_REG_SFMR_OP_DESTROY_FID, +}; + +/* reg_sfmr_op + * Operation. + * 0 - Create or edit FID. + * 1 - Destroy FID. + * Access: WO + */ +MLXSW_ITEM32(reg, sfmr, op, 0x00, 24, 4); + +/* reg_sfmr_fid + * Filtering ID. + * Access: Index + */ +MLXSW_ITEM32(reg, sfmr, fid, 0x00, 0, 16); + +/* reg_sfmr_fid_offset + * FID offset. + * Used to point into the flooding table selected by SFGC register if + * the table is of type FID-Offset. Otherwise, this field is reserved. + * Access: RW + */ +MLXSW_ITEM32(reg, sfmr, fid_offset, 0x08, 0, 16); + +/* reg_sfmr_vtfp + * Valid Tunnel Flood Pointer. + * If not set, then nve_tunnel_flood_ptr is reserved and considered NULL. + * Access: RW + * + * Note: Reserved for 802.1Q FIDs. + */ +MLXSW_ITEM32(reg, sfmr, vtfp, 0x0C, 31, 1); + +/* reg_sfmr_nve_tunnel_flood_ptr + * Underlay Flooding and BC Pointer. + * Used as a pointer to the first entry of the group based link lists of + * flooding or BC entries (for NVE tunnels). + * Access: RW + */ +MLXSW_ITEM32(reg, sfmr, nve_tunnel_flood_ptr, 0x0C, 0, 24); + +/* reg_sfmr_vv + * VNI Valid. + * If not set, then vni is reserved. + * Access: RW + * + * Note: Reserved for 802.1Q FIDs. + */ +MLXSW_ITEM32(reg, sfmr, vv, 0x10, 31, 1); + +/* reg_sfmr_vni + * Virtual Network Identifier. + * Access: RW + * + * Note: A given VNI can only be assigned to one FID. + */ +MLXSW_ITEM32(reg, sfmr, vni, 0x10, 0, 24); + +static inline void mlxsw_reg_sfmr_pack(char *payload, + enum mlxsw_reg_sfmr_op op, u16 fid, + u16 fid_offset) +{ + MLXSW_REG_ZERO(sfmr, payload); + mlxsw_reg_sfmr_op_set(payload, op); + mlxsw_reg_sfmr_fid_set(payload, fid); + mlxsw_reg_sfmr_fid_offset_set(payload, fid_offset); + mlxsw_reg_sfmr_vtfp_set(payload, false); + mlxsw_reg_sfmr_vv_set(payload, false); +} + +/* SPVMLR - Switch Port VLAN MAC Learning Register + * ----------------------------------------------- + * Controls the switch MAC learning policy per {Port, VID}. + */ +#define MLXSW_REG_SPVMLR_ID 0x2020 +#define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */ +#define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */ +#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 256 +#define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \ + MLXSW_REG_SPVMLR_REC_LEN * \ + MLXSW_REG_SPVMLR_REC_MAX_COUNT) + +static const struct mlxsw_reg_info mlxsw_reg_spvmlr = { + .id = MLXSW_REG_SPVMLR_ID, + .len = MLXSW_REG_SPVMLR_LEN, +}; + +/* reg_spvmlr_local_port + * Local ingress port. + * Access: Index + * + * Note: CPU port is not supported. + */ +MLXSW_ITEM32(reg, spvmlr, local_port, 0x00, 16, 8); + +/* reg_spvmlr_num_rec + * Number of records to update. + * Access: OP + */ +MLXSW_ITEM32(reg, spvmlr, num_rec, 0x00, 0, 8); + +/* reg_spvmlr_rec_learn_enable + * 0 - Disable learning for {Port, VID}. + * 1 - Enable learning for {Port, VID}. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_learn_enable, MLXSW_REG_SPVMLR_BASE_LEN, + 31, 1, MLXSW_REG_SPVMLR_REC_LEN, 0x00, false); + +/* reg_spvmlr_rec_vid + * VLAN ID to be added/removed from port or for querying. + * Access: Index + */ +MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_vid, MLXSW_REG_SPVMLR_BASE_LEN, 0, 12, + MLXSW_REG_SPVMLR_REC_LEN, 0x00, false); + +static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port, + u16 vid_begin, u16 vid_end, + bool learn_enable) +{ + int num_rec = vid_end - vid_begin + 1; + int i; + + WARN_ON(num_rec < 1 || num_rec > MLXSW_REG_SPVMLR_REC_MAX_COUNT); + + MLXSW_REG_ZERO(spvmlr, payload); + mlxsw_reg_spvmlr_local_port_set(payload, local_port); + mlxsw_reg_spvmlr_num_rec_set(payload, num_rec); + + for (i = 0; i < num_rec; i++) { + mlxsw_reg_spvmlr_rec_learn_enable_set(payload, i, learn_enable); + mlxsw_reg_spvmlr_rec_vid_set(payload, i, vid_begin + i); + } +} + /* PMLP - Ports Module to Local Port Register * ------------------------------------------ * Configures the assignment of modules to local ports. @@ -1008,12 +1732,88 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port) mlxsw_reg_ppcnt_prio_tc_set(payload, 0); } +/* PBMC - Port Buffer Management Control Register + * ---------------------------------------------- + * The PBMC register configures and retrieves the port packet buffer + * allocation for different Prios, and the Pause threshold management. + */ +#define MLXSW_REG_PBMC_ID 0x500C +#define MLXSW_REG_PBMC_LEN 0x68 + +static const struct mlxsw_reg_info mlxsw_reg_pbmc = { + .id = MLXSW_REG_PBMC_ID, + .len = MLXSW_REG_PBMC_LEN, +}; + +/* reg_pbmc_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, pbmc, local_port, 0x00, 16, 8); + +/* reg_pbmc_xoff_timer_value + * When device generates a pause frame, it uses this value as the pause + * timer (time for the peer port to pause in quota-512 bit time). + * Access: RW + */ +MLXSW_ITEM32(reg, pbmc, xoff_timer_value, 0x04, 16, 16); + +/* reg_pbmc_xoff_refresh + * The time before a new pause frame should be sent to refresh the pause RW + * state. Using the same units as xoff_timer_value above (in quota-512 bit + * time). + * Access: RW + */ +MLXSW_ITEM32(reg, pbmc, xoff_refresh, 0x04, 0, 16); + +/* reg_pbmc_buf_lossy + * The field indicates if the buffer is lossy. + * 0 - Lossless + * 1 - Lossy + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, pbmc, buf_lossy, 0x0C, 25, 1, 0x08, 0x00, false); + +/* reg_pbmc_buf_epsb + * Eligible for Port Shared buffer. + * If epsb is set, packets assigned to buffer are allowed to insert the port + * shared buffer. + * When buf_lossy is MLXSW_REG_PBMC_LOSSY_LOSSY this field is reserved. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, pbmc, buf_epsb, 0x0C, 24, 1, 0x08, 0x00, false); + +/* reg_pbmc_buf_size + * The part of the packet buffer array is allocated for the specific buffer. + * Units are represented in cells. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, pbmc, buf_size, 0x0C, 0, 16, 0x08, 0x00, false); + +static inline void mlxsw_reg_pbmc_pack(char *payload, u8 local_port, + u16 xoff_timer_value, u16 xoff_refresh) +{ + MLXSW_REG_ZERO(pbmc, payload); + mlxsw_reg_pbmc_local_port_set(payload, local_port); + mlxsw_reg_pbmc_xoff_timer_value_set(payload, xoff_timer_value); + mlxsw_reg_pbmc_xoff_refresh_set(payload, xoff_refresh); +} + +static inline void mlxsw_reg_pbmc_lossy_buffer_pack(char *payload, + int buf_index, + u16 size) +{ + mlxsw_reg_pbmc_buf_lossy_set(payload, buf_index, 1); + mlxsw_reg_pbmc_buf_epsb_set(payload, buf_index, 0); + mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size); +} + /* PSPA - Port Switch Partition Allocation * --------------------------------------- * Controls the association of a port with a switch partition and enables * configuring ports as stacking ports. */ -#define MLXSW_REG_PSPA_ID 0x500d +#define MLXSW_REG_PSPA_ID 0x500D #define MLXSW_REG_PSPA_LEN 0x8 static const struct mlxsw_reg_info mlxsw_reg_pspa = { @@ -1074,8 +1874,11 @@ MLXSW_ITEM32(reg, htgt, swid, 0x00, 24, 8); */ MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4); -#define MLXSW_REG_HTGT_TRAP_GROUP_EMAD 0x0 -#define MLXSW_REG_HTGT_TRAP_GROUP_RX 0x1 +enum mlxsw_reg_htgt_trap_group { + MLXSW_REG_HTGT_TRAP_GROUP_EMAD, + MLXSW_REG_HTGT_TRAP_GROUP_RX, + MLXSW_REG_HTGT_TRAP_GROUP_CTRL, +}; /* reg_htgt_trap_group * Trap group number. User defined number specifying which trap groups @@ -1142,6 +1945,7 @@ MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6); #define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD 0x15 #define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX 0x14 +#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_CTRL 0x13 /* reg_htgt_local_path_rdq * Receive descriptor queue (RDQ) to use for the trap group. @@ -1149,21 +1953,29 @@ MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6); */ MLXSW_ITEM32(reg, htgt, local_path_rdq, 0x10, 0, 6); -static inline void mlxsw_reg_htgt_pack(char *payload, u8 trap_group) +static inline void mlxsw_reg_htgt_pack(char *payload, + enum mlxsw_reg_htgt_trap_group group) { u8 swid, rdq; MLXSW_REG_ZERO(htgt, payload); - if (MLXSW_REG_HTGT_TRAP_GROUP_EMAD == trap_group) { + switch (group) { + case MLXSW_REG_HTGT_TRAP_GROUP_EMAD: swid = MLXSW_PORT_SWID_ALL_SWIDS; rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD; - } else { + break; + case MLXSW_REG_HTGT_TRAP_GROUP_RX: swid = 0; rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX; + break; + case MLXSW_REG_HTGT_TRAP_GROUP_CTRL: + swid = 0; + rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_CTRL; + break; } mlxsw_reg_htgt_swid_set(payload, swid); mlxsw_reg_htgt_type_set(payload, MLXSW_REG_HTGT_PATH_TYPE_LOCAL); - mlxsw_reg_htgt_trap_group_set(payload, trap_group); + mlxsw_reg_htgt_trap_group_set(payload, group); mlxsw_reg_htgt_pide_set(payload, MLXSW_REG_HTGT_POLICER_DISABLE); mlxsw_reg_htgt_pid_set(payload, 0); mlxsw_reg_htgt_mirror_action_set(payload, MLXSW_REG_HTGT_TRAP_TO_CPU); @@ -1254,17 +2066,290 @@ enum { */ MLXSW_ITEM32(reg, hpkt, ctrl, 0x04, 16, 2); -static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, - u8 trap_group, u16 trap_id) +static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id) { + enum mlxsw_reg_htgt_trap_group trap_group; + MLXSW_REG_ZERO(hpkt, payload); mlxsw_reg_hpkt_ack_set(payload, MLXSW_REG_HPKT_ACK_NOT_REQUIRED); mlxsw_reg_hpkt_action_set(payload, action); + switch (trap_id) { + case MLXSW_TRAP_ID_ETHEMAD: + case MLXSW_TRAP_ID_PUDE: + trap_group = MLXSW_REG_HTGT_TRAP_GROUP_EMAD; + break; + default: + trap_group = MLXSW_REG_HTGT_TRAP_GROUP_RX; + break; + } mlxsw_reg_hpkt_trap_group_set(payload, trap_group); mlxsw_reg_hpkt_trap_id_set(payload, trap_id); mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT); } +/* SBPR - Shared Buffer Pools Register + * ----------------------------------- + * The SBPR configures and retrieves the shared buffer pools and configuration. + */ +#define MLXSW_REG_SBPR_ID 0xB001 +#define MLXSW_REG_SBPR_LEN 0x14 + +static const struct mlxsw_reg_info mlxsw_reg_sbpr = { + .id = MLXSW_REG_SBPR_ID, + .len = MLXSW_REG_SBPR_LEN, +}; + +enum mlxsw_reg_sbpr_dir { + MLXSW_REG_SBPR_DIR_INGRESS, + MLXSW_REG_SBPR_DIR_EGRESS, +}; + +/* reg_sbpr_dir + * Direction. + * Access: Index + */ +MLXSW_ITEM32(reg, sbpr, dir, 0x00, 24, 2); + +/* reg_sbpr_pool + * Pool index. + * Access: Index + */ +MLXSW_ITEM32(reg, sbpr, pool, 0x00, 0, 4); + +/* reg_sbpr_size + * Pool size in buffer cells. + * Access: RW + */ +MLXSW_ITEM32(reg, sbpr, size, 0x04, 0, 24); + +enum mlxsw_reg_sbpr_mode { + MLXSW_REG_SBPR_MODE_STATIC, + MLXSW_REG_SBPR_MODE_DYNAMIC, +}; + +/* reg_sbpr_mode + * Pool quota calculation mode. + * Access: RW + */ +MLXSW_ITEM32(reg, sbpr, mode, 0x08, 0, 4); + +static inline void mlxsw_reg_sbpr_pack(char *payload, u8 pool, + enum mlxsw_reg_sbpr_dir dir, + enum mlxsw_reg_sbpr_mode mode, u32 size) +{ + MLXSW_REG_ZERO(sbpr, payload); + mlxsw_reg_sbpr_pool_set(payload, pool); + mlxsw_reg_sbpr_dir_set(payload, dir); + mlxsw_reg_sbpr_mode_set(payload, mode); + mlxsw_reg_sbpr_size_set(payload, size); +} + +/* SBCM - Shared Buffer Class Management Register + * ---------------------------------------------- + * The SBCM register configures and retrieves the shared buffer allocation + * and configuration according to Port-PG, including the binding to pool + * and definition of the associated quota. + */ +#define MLXSW_REG_SBCM_ID 0xB002 +#define MLXSW_REG_SBCM_LEN 0x28 + +static const struct mlxsw_reg_info mlxsw_reg_sbcm = { + .id = MLXSW_REG_SBCM_ID, + .len = MLXSW_REG_SBCM_LEN, +}; + +/* reg_sbcm_local_port + * Local port number. + * For Ingress: excludes CPU port and Router port + * For Egress: excludes IP Router + * Access: Index + */ +MLXSW_ITEM32(reg, sbcm, local_port, 0x00, 16, 8); + +/* reg_sbcm_pg_buff + * PG buffer - Port PG (dir=ingress) / traffic class (dir=egress) + * For PG buffer: range is 0..cap_max_pg_buffers - 1 + * For traffic class: range is 0..cap_max_tclass - 1 + * Note that when traffic class is in MC aware mode then the traffic + * classes which are MC aware cannot be configured. + * Access: Index + */ +MLXSW_ITEM32(reg, sbcm, pg_buff, 0x00, 8, 6); + +enum mlxsw_reg_sbcm_dir { + MLXSW_REG_SBCM_DIR_INGRESS, + MLXSW_REG_SBCM_DIR_EGRESS, +}; + +/* reg_sbcm_dir + * Direction. + * Access: Index + */ +MLXSW_ITEM32(reg, sbcm, dir, 0x00, 0, 2); + +/* reg_sbcm_min_buff + * Minimum buffer size for the limiter, in cells. + * Access: RW + */ +MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24); + +/* reg_sbcm_max_buff + * When the pool associated to the port-pg/tclass is configured to + * static, Maximum buffer size for the limiter configured in cells. + * When the pool associated to the port-pg/tclass is configured to + * dynamic, the max_buff holds the "alpha" parameter, supporting + * the following values: + * 0: 0 + * i: (1/128)*2^(i-1), for i=1..14 + * 0xFF: Infinity + * Access: RW + */ +MLXSW_ITEM32(reg, sbcm, max_buff, 0x1C, 0, 24); + +/* reg_sbcm_pool + * Association of the port-priority to a pool. + * Access: RW + */ +MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4); + +static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff, + enum mlxsw_reg_sbcm_dir dir, + u32 min_buff, u32 max_buff, u8 pool) +{ + MLXSW_REG_ZERO(sbcm, payload); + mlxsw_reg_sbcm_local_port_set(payload, local_port); + mlxsw_reg_sbcm_pg_buff_set(payload, pg_buff); + mlxsw_reg_sbcm_dir_set(payload, dir); + mlxsw_reg_sbcm_min_buff_set(payload, min_buff); + mlxsw_reg_sbcm_max_buff_set(payload, max_buff); + mlxsw_reg_sbcm_pool_set(payload, pool); +} + +/* SBPM - Shared Buffer Class Management Register + * ---------------------------------------------- + * The SBPM register configures and retrieves the shared buffer allocation + * and configuration according to Port-Pool, including the definition + * of the associated quota. + */ +#define MLXSW_REG_SBPM_ID 0xB003 +#define MLXSW_REG_SBPM_LEN 0x28 + +static const struct mlxsw_reg_info mlxsw_reg_sbpm = { + .id = MLXSW_REG_SBPM_ID, + .len = MLXSW_REG_SBPM_LEN, +}; + +/* reg_sbpm_local_port + * Local port number. + * For Ingress: excludes CPU port and Router port + * For Egress: excludes IP Router + * Access: Index + */ +MLXSW_ITEM32(reg, sbpm, local_port, 0x00, 16, 8); + +/* reg_sbpm_pool + * The pool associated to quota counting on the local_port. + * Access: Index + */ +MLXSW_ITEM32(reg, sbpm, pool, 0x00, 8, 4); + +enum mlxsw_reg_sbpm_dir { + MLXSW_REG_SBPM_DIR_INGRESS, + MLXSW_REG_SBPM_DIR_EGRESS, +}; + +/* reg_sbpm_dir + * Direction. + * Access: Index + */ +MLXSW_ITEM32(reg, sbpm, dir, 0x00, 0, 2); + +/* reg_sbpm_min_buff + * Minimum buffer size for the limiter, in cells. + * Access: RW + */ +MLXSW_ITEM32(reg, sbpm, min_buff, 0x18, 0, 24); + +/* reg_sbpm_max_buff + * When the pool associated to the port-pg/tclass is configured to + * static, Maximum buffer size for the limiter configured in cells. + * When the pool associated to the port-pg/tclass is configured to + * dynamic, the max_buff holds the "alpha" parameter, supporting + * the following values: + * 0: 0 + * i: (1/128)*2^(i-1), for i=1..14 + * 0xFF: Infinity + * Access: RW + */ +MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24); + +static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool, + enum mlxsw_reg_sbpm_dir dir, + u32 min_buff, u32 max_buff) +{ + MLXSW_REG_ZERO(sbpm, payload); + mlxsw_reg_sbpm_local_port_set(payload, local_port); + mlxsw_reg_sbpm_pool_set(payload, pool); + mlxsw_reg_sbpm_dir_set(payload, dir); + mlxsw_reg_sbpm_min_buff_set(payload, min_buff); + mlxsw_reg_sbpm_max_buff_set(payload, max_buff); +} + +/* SBMM - Shared Buffer Multicast Management Register + * -------------------------------------------------- + * The SBMM register configures and retrieves the shared buffer allocation + * and configuration for MC packets according to Switch-Priority, including + * the binding to pool and definition of the associated quota. + */ +#define MLXSW_REG_SBMM_ID 0xB004 +#define MLXSW_REG_SBMM_LEN 0x28 + +static const struct mlxsw_reg_info mlxsw_reg_sbmm = { + .id = MLXSW_REG_SBMM_ID, + .len = MLXSW_REG_SBMM_LEN, +}; + +/* reg_sbmm_prio + * Switch Priority. + * Access: Index + */ +MLXSW_ITEM32(reg, sbmm, prio, 0x00, 8, 4); + +/* reg_sbmm_min_buff + * Minimum buffer size for the limiter, in cells. + * Access: RW + */ +MLXSW_ITEM32(reg, sbmm, min_buff, 0x18, 0, 24); + +/* reg_sbmm_max_buff + * When the pool associated to the port-pg/tclass is configured to + * static, Maximum buffer size for the limiter configured in cells. + * When the pool associated to the port-pg/tclass is configured to + * dynamic, the max_buff holds the "alpha" parameter, supporting + * the following values: + * 0: 0 + * i: (1/128)*2^(i-1), for i=1..14 + * 0xFF: Infinity + * Access: RW + */ +MLXSW_ITEM32(reg, sbmm, max_buff, 0x1C, 0, 24); + +/* reg_sbmm_pool + * Association of the port-priority to a pool. + * Access: RW + */ +MLXSW_ITEM32(reg, sbmm, pool, 0x24, 0, 4); + +static inline void mlxsw_reg_sbmm_pack(char *payload, u8 prio, u32 min_buff, + u32 max_buff, u8 pool) +{ + MLXSW_REG_ZERO(sbmm, payload); + mlxsw_reg_sbmm_prio_set(payload, prio); + mlxsw_reg_sbmm_min_buff_set(payload, min_buff); + mlxsw_reg_sbmm_max_buff_set(payload, max_buff); + mlxsw_reg_sbmm_pool_set(payload, pool); +} + static inline const char *mlxsw_reg_id_str(u16 reg_id) { switch (reg_id) { @@ -1272,18 +2357,34 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "SGCR"; case MLXSW_REG_SPAD_ID: return "SPAD"; - case MLXSW_REG_SMID_ID: - return "SMID"; case MLXSW_REG_SSPR_ID: return "SSPR"; + case MLXSW_REG_SFDAT_ID: + return "SFDAT"; + case MLXSW_REG_SFD_ID: + return "SFD"; + case MLXSW_REG_SFN_ID: + return "SFN"; case MLXSW_REG_SPMS_ID: return "SPMS"; + case MLXSW_REG_SPVID_ID: + return "SPVID"; + case MLXSW_REG_SPVM_ID: + return "SPVM"; case MLXSW_REG_SFGC_ID: return "SFGC"; case MLXSW_REG_SFTR_ID: return "SFTR"; case MLXSW_REG_SPMLR_ID: return "SPMLR"; + case MLXSW_REG_SVFA_ID: + return "SVFA"; + case MLXSW_REG_SVPE_ID: + return "SVPE"; + case MLXSW_REG_SFMR_ID: + return "SFMR"; + case MLXSW_REG_SPVMLR_ID: + return "SPVMLR"; case MLXSW_REG_PMLP_ID: return "PMLP"; case MLXSW_REG_PMTU_ID: @@ -1296,12 +2397,22 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "PAOS"; case MLXSW_REG_PPCNT_ID: return "PPCNT"; + case MLXSW_REG_PBMC_ID: + return "PBMC"; case MLXSW_REG_PSPA_ID: return "PSPA"; case MLXSW_REG_HTGT_ID: return "HTGT"; case MLXSW_REG_HPKT_ID: return "HPKT"; + case MLXSW_REG_SBPR_ID: + return "SBPR"; + case MLXSW_REG_SBCM_ID: + return "SBCM"; + case MLXSW_REG_SBPM_ID: + return "SBPM"; + case MLXSW_REG_SBMM_ID: + return "SBMM"; default: return "*UNKNOWN*"; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c new file mode 100644 index 000000000000..6e9906d8d149 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -0,0 +1,1948 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum.c + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> + * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/skbuff.h> +#include <linux/if_vlan.h> +#include <linux/if_bridge.h> +#include <linux/workqueue.h> +#include <linux/jiffies.h> +#include <linux/bitops.h> +#include <net/switchdev.h> +#include <generated/utsrelease.h> + +#include "spectrum.h" +#include "core.h" +#include "reg.h" +#include "port.h" +#include "trap.h" +#include "txheader.h" + +static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; +static const char mlxsw_sp_driver_version[] = "1.0"; + +/* tx_hdr_version + * Tx header version. + * Must be set to 1. + */ +MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); + +/* tx_hdr_ctl + * Packet control type. + * 0 - Ethernet control (e.g. EMADs, LACP) + * 1 - Ethernet data + */ +MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); + +/* tx_hdr_proto + * Packet protocol type. Must be set to 1 (Ethernet). + */ +MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); + +/* tx_hdr_rx_is_router + * Packet is sent from the router. Valid for data packets only. + */ +MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); + +/* tx_hdr_fid_valid + * Indicates if the 'fid' field is valid and should be used for + * forwarding lookup. Valid for data packets only. + */ +MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); + +/* tx_hdr_swid + * Switch partition ID. Must be set to 0. + */ +MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); + +/* tx_hdr_control_tclass + * Indicates if the packet should use the control TClass and not one + * of the data TClasses. + */ +MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); + +/* tx_hdr_etclass + * Egress TClass to be used on the egress device on the egress port. + */ +MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); + +/* tx_hdr_port_mid + * Destination local port for unicast packets. + * Destination multicast ID for multicast packets. + * + * Control packets are directed to a specific egress port, while data + * packets are transmitted through the CPU port (0) into the switch partition, + * where forwarding rules are applied. + */ +MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); + +/* tx_hdr_fid + * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is + * set, otherwise calculated based on the packet's VID using VID to FID mapping. + * Valid for data packets only. + */ +MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); + +/* tx_hdr_type + * 0 - Data packets + * 6 - Control packets + */ +MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); + +static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, + const struct mlxsw_tx_info *tx_info) +{ + char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); + + memset(txhdr, 0, MLXSW_TXHDR_LEN); + + mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); + mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); + mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); + mlxsw_tx_hdr_swid_set(txhdr, 0); + mlxsw_tx_hdr_control_tclass_set(txhdr, 1); + mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); + mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); +} + +static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) +{ + char spad_pl[MLXSW_REG_SPAD_LEN]; + int err; + + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); + if (err) + return err; + mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); + return 0; +} + +static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, + bool is_up) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char paos_pl[MLXSW_REG_PAOS_LEN]; + + mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, + is_up ? MLXSW_PORT_ADMIN_STATUS_UP : + MLXSW_PORT_ADMIN_STATUS_DOWN); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); +} + +static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port, + bool *p_is_up) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char paos_pl[MLXSW_REG_PAOS_LEN]; + u8 oper_status; + int err; + + mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); + if (err) + return err; + oper_status = mlxsw_reg_paos_oper_status_get(paos_pl); + *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false; + return 0; +} + +static int mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid) +{ + char sfmr_pl[MLXSW_REG_SFMR_LEN]; + int err; + + mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, + MLXSW_SP_VFID_BASE + vfid, 0); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); + + if (err) + return err; + + set_bit(vfid, mlxsw_sp->active_vfids); + return 0; +} + +static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid) +{ + char sfmr_pl[MLXSW_REG_SFMR_LEN]; + + clear_bit(vfid, mlxsw_sp->active_vfids); + + mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, + MLXSW_SP_VFID_BASE + vfid, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); +} + +static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, + unsigned char *addr) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char ppad_pl[MLXSW_REG_PPAD_LEN]; + + mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); + mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); +} + +static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + unsigned char *addr = mlxsw_sp_port->dev->dev_addr; + + ether_addr_copy(addr, mlxsw_sp->base_mac); + addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; + return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); +} + +static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid, enum mlxsw_reg_spms_state state) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char *spms_pl; + int err; + + spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); + if (!spms_pl) + return -ENOMEM; + mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); + mlxsw_reg_spms_vid_pack(spms_pl, vid, state); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); + kfree(spms_pl); + return err; +} + +static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char pmtu_pl[MLXSW_REG_PMTU_LEN]; + int max_mtu; + int err; + + mtu += MLXSW_TXHDR_LEN + ETH_HLEN; + mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); + if (err) + return err; + max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); + + if (mtu > max_mtu) + return -EINVAL; + + mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); +} + +static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char pspa_pl[MLXSW_REG_PSPA_LEN]; + + mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); +} + +static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, + bool enable) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char svpe_pl[MLXSW_REG_SVPE_LEN]; + + mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); +} + +int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, + enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid, + u16 vid) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char svfa_pl[MLXSW_REG_SVFA_LEN]; + + mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid, + fid, vid); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl); +} + +static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid, bool learn_enable) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char *spvmlr_pl; + int err; + + spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); + if (!spvmlr_pl) + return -ENOMEM; + mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, + learn_enable); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); + kfree(spvmlr_pl); + return err; +} + +static int +mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char sspr_pl[MLXSW_REG_SSPR_LEN]; + + mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); +} + +static int mlxsw_sp_port_module_check(struct mlxsw_sp_port *mlxsw_sp_port, + bool *p_usable) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char pmlp_pl[MLXSW_REG_PMLP_LEN]; + int err; + + mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); + if (err) + return err; + *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false; + return 0; +} + +static int mlxsw_sp_port_open(struct net_device *dev) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + int err; + + err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); + if (err) + return err; + netif_start_queue(dev); + return 0; +} + +static int mlxsw_sp_port_stop(struct net_device *dev) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + + netif_stop_queue(dev); + return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); +} + +static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_port_pcpu_stats *pcpu_stats; + const struct mlxsw_tx_info tx_info = { + .local_port = mlxsw_sp_port->local_port, + .is_emad = false, + }; + u64 len; + int err; + + if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info)) + return NETDEV_TX_BUSY; + + if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { + struct sk_buff *skb_orig = skb; + + skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); + if (!skb) { + this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); + dev_kfree_skb_any(skb_orig); + return NETDEV_TX_OK; + } + } + + if (eth_skb_pad(skb)) { + this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); + return NETDEV_TX_OK; + } + + mlxsw_sp_txhdr_construct(skb, &tx_info); + len = skb->len; + /* Due to a race we might fail here because of a full queue. In that + * unlikely case we simply drop the packet. + */ + err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info); + + if (!err) { + pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); + u64_stats_update_begin(&pcpu_stats->syncp); + pcpu_stats->tx_packets++; + pcpu_stats->tx_bytes += len; + u64_stats_update_end(&pcpu_stats->syncp); + } else { + this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); + dev_kfree_skb_any(skb); + } + return NETDEV_TX_OK; +} + +static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct sockaddr *addr = p; + int err; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); + if (err) + return err; + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + return 0; +} + +static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + int err; + + err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); + if (err) + return err; + dev->mtu = mtu; + return 0; +} + +static struct rtnl_link_stats64 * +mlxsw_sp_port_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp_port_pcpu_stats *p; + u64 rx_packets, rx_bytes, tx_packets, tx_bytes; + u32 tx_dropped = 0; + unsigned int start; + int i; + + for_each_possible_cpu(i) { + p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); + do { + start = u64_stats_fetch_begin_irq(&p->syncp); + rx_packets = p->rx_packets; + rx_bytes = p->rx_bytes; + tx_packets = p->tx_packets; + tx_bytes = p->tx_bytes; + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); + + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; + stats->tx_packets += tx_packets; + stats->tx_bytes += tx_bytes; + /* tx_dropped is u32, updated without syncp protection. */ + tx_dropped += p->tx_dropped; + } + stats->tx_dropped = tx_dropped; + return stats; +} + +int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, + u16 vid_end, bool is_member, bool untagged) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char *spvm_pl; + int err; + + spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); + if (!spvm_pl) + return -ENOMEM; + + mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, + vid_end, is_member, untagged); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); + kfree(spvm_pl); + return err; +} + +static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) +{ + enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; + u16 vid, last_visited_vid; + int err; + + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid, + vid); + if (err) { + last_visited_vid = vid; + goto err_port_vid_to_fid_set; + } + } + + err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); + if (err) { + last_visited_vid = VLAN_N_VID; + goto err_port_vid_to_fid_set; + } + + return 0; + +err_port_vid_to_fid_set: + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid) + mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid, + vid); + return err; +} + +static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) +{ + enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; + u16 vid; + int err; + + err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); + if (err) + return err; + + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, + vid, vid); + if (err) + return err; + } + + return 0; +} + +int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, + u16 vid) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char *sftr_pl; + int err; + + /* VLAN 0 is added to HW filter when device goes up, but it is + * reserved in our case, so simply return. + */ + if (!vid) + return 0; + + if (test_bit(vid, mlxsw_sp_port->active_vfids)) { + netdev_warn(dev, "VID=%d already configured\n", vid); + return 0; + } + + if (!test_bit(vid, mlxsw_sp->active_vfids)) { + err = mlxsw_sp_vfid_create(mlxsw_sp, vid); + if (err) { + netdev_err(dev, "Failed to create vFID=%d\n", + MLXSW_SP_VFID_BASE + vid); + return err; + } + + sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); + if (!sftr_pl) { + err = -ENOMEM; + goto err_flood_table_alloc; + } + mlxsw_reg_sftr_pack(sftr_pl, 0, vid, + MLXSW_REG_SFGC_TABLE_TYPE_FID, 0, + MLXSW_PORT_CPU_PORT, true); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); + kfree(sftr_pl); + if (err) { + netdev_err(dev, "Failed to configure flood table\n"); + goto err_flood_table_config; + } + } + + /* In case we fail in the following steps, we intentionally do not + * destroy the associated vFID. + */ + + /* When adding the first VLAN interface on a bridged port we need to + * transition all the active 802.1Q bridge VLANs to use explicit + * {Port, VID} to FID mappings and set the port's mode to Virtual mode. + */ + if (!mlxsw_sp_port->nr_vfids) { + err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); + if (err) { + netdev_err(dev, "Failed to set to Virtual mode\n"); + return err; + } + } + + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, + true, MLXSW_SP_VFID_BASE + vid, vid); + if (err) { + netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n", + vid, MLXSW_SP_VFID_BASE + vid); + goto err_port_vid_to_fid_set; + } + + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false); + if (err) { + netdev_err(dev, "Failed to disable learning for VID=%d\n", vid); + goto err_port_vid_learning_set; + } + + err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, false); + if (err) { + netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", + vid); + goto err_port_add_vid; + } + + err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid, + MLXSW_REG_SPMS_STATE_FORWARDING); + if (err) { + netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); + goto err_port_stp_state_set; + } + + mlxsw_sp_port->nr_vfids++; + set_bit(vid, mlxsw_sp_port->active_vfids); + + return 0; + +err_flood_table_config: +err_flood_table_alloc: + mlxsw_sp_vfid_destroy(mlxsw_sp, vid); + return err; + +err_port_stp_state_set: + mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); +err_port_add_vid: + mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); +err_port_vid_learning_set: + mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, + MLXSW_SP_VFID_BASE + vid, vid); +err_port_vid_to_fid_set: + mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); + return err; +} + +int mlxsw_sp_port_kill_vid(struct net_device *dev, + __be16 __always_unused proto, u16 vid) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + int err; + + /* VLAN 0 is removed from HW filter when device goes down, but + * it is reserved in our case, so simply return. + */ + if (!vid) + return 0; + + if (!test_bit(vid, mlxsw_sp_port->active_vfids)) { + netdev_warn(dev, "VID=%d does not exist\n", vid); + return 0; + } + + err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid, + MLXSW_REG_SPMS_STATE_DISCARDING); + if (err) { + netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); + return err; + } + + err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); + if (err) { + netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", + vid); + return err; + } + + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); + if (err) { + netdev_err(dev, "Failed to enable learning for VID=%d\n", vid); + return err; + } + + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, + false, MLXSW_SP_VFID_BASE + vid, + vid); + if (err) { + netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n", + vid, MLXSW_SP_VFID_BASE + vid); + return err; + } + + /* When removing the last VLAN interface on a bridged port we need to + * transition all active 802.1Q bridge VLANs to use VID to FID + * mappings and set port's mode to VLAN mode. + */ + if (mlxsw_sp_port->nr_vfids == 1) { + err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); + if (err) { + netdev_err(dev, "Failed to set to VLAN mode\n"); + return err; + } + } + + mlxsw_sp_port->nr_vfids--; + clear_bit(vid, mlxsw_sp_port->active_vfids); + + return 0; +} + +static const struct net_device_ops mlxsw_sp_port_netdev_ops = { + .ndo_open = mlxsw_sp_port_open, + .ndo_stop = mlxsw_sp_port_stop, + .ndo_start_xmit = mlxsw_sp_port_xmit, + .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, + .ndo_change_mtu = mlxsw_sp_port_change_mtu, + .ndo_get_stats64 = mlxsw_sp_port_get_stats64, + .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, + .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, + .ndo_fdb_add = switchdev_port_fdb_add, + .ndo_fdb_del = switchdev_port_fdb_del, + .ndo_fdb_dump = switchdev_port_fdb_dump, + .ndo_bridge_setlink = switchdev_port_bridge_setlink, + .ndo_bridge_getlink = switchdev_port_bridge_getlink, + .ndo_bridge_dellink = switchdev_port_bridge_dellink, +}; + +static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + + strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, mlxsw_sp_driver_version, + sizeof(drvinfo->version)); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%d", + mlxsw_sp->bus_info->fw_rev.major, + mlxsw_sp->bus_info->fw_rev.minor, + mlxsw_sp->bus_info->fw_rev.subminor); + strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, + sizeof(drvinfo->bus_info)); +} + +struct mlxsw_sp_port_hw_stats { + char str[ETH_GSTRING_LEN]; + u64 (*getter)(char *payload); +}; + +static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { + { + .str = "a_frames_transmitted_ok", + .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, + }, + { + .str = "a_frames_received_ok", + .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, + }, + { + .str = "a_frame_check_sequence_errors", + .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, + }, + { + .str = "a_alignment_errors", + .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, + }, + { + .str = "a_octets_transmitted_ok", + .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, + }, + { + .str = "a_octets_received_ok", + .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, + }, + { + .str = "a_multicast_frames_xmitted_ok", + .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, + }, + { + .str = "a_broadcast_frames_xmitted_ok", + .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, + }, + { + .str = "a_multicast_frames_received_ok", + .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, + }, + { + .str = "a_broadcast_frames_received_ok", + .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, + }, + { + .str = "a_in_range_length_errors", + .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, + }, + { + .str = "a_out_of_range_length_field", + .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, + }, + { + .str = "a_frame_too_long_errors", + .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, + }, + { + .str = "a_symbol_error_during_carrier", + .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, + }, + { + .str = "a_mac_control_frames_transmitted", + .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, + }, + { + .str = "a_mac_control_frames_received", + .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, + }, + { + .str = "a_unsupported_opcodes_received", + .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, + }, + { + .str = "a_pause_mac_ctrl_frames_received", + .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, + }, + { + .str = "a_pause_mac_ctrl_frames_xmitted", + .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, + }, +}; + +#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) + +static void mlxsw_sp_port_get_strings(struct net_device *dev, + u32 stringset, u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { + memcpy(p, mlxsw_sp_port_hw_stats[i].str, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static void mlxsw_sp_port_get_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; + int i; + int err; + + mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); + for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) + data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0; +} + +static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return MLXSW_SP_PORT_HW_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +struct mlxsw_sp_port_link_mode { + u32 mask; + u32 supported; + u32 advertised; + u32 speed; +}; + +static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, + .supported = SUPPORTED_100baseT_Full, + .advertised = ADVERTISED_100baseT_Full, + .speed = 100, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX, + .speed = 100, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | + MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, + .supported = SUPPORTED_1000baseKX_Full, + .advertised = ADVERTISED_1000baseKX_Full, + .speed = 1000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, + .supported = SUPPORTED_10000baseT_Full, + .advertised = ADVERTISED_10000baseT_Full, + .speed = 10000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, + .supported = SUPPORTED_10000baseKX4_Full, + .advertised = ADVERTISED_10000baseKX4_Full, + .speed = 10000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, + .supported = SUPPORTED_10000baseKR_Full, + .advertised = ADVERTISED_10000baseKR_Full, + .speed = 10000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, + .supported = SUPPORTED_20000baseKR2_Full, + .advertised = ADVERTISED_20000baseKR2_Full, + .speed = 20000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, + .supported = SUPPORTED_40000baseCR4_Full, + .advertised = ADVERTISED_40000baseCR4_Full, + .speed = 40000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, + .supported = SUPPORTED_40000baseKR4_Full, + .advertised = ADVERTISED_40000baseKR4_Full, + .speed = 40000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, + .supported = SUPPORTED_40000baseSR4_Full, + .advertised = ADVERTISED_40000baseSR4_Full, + .speed = 40000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, + .supported = SUPPORTED_40000baseLR4_Full, + .advertised = ADVERTISED_40000baseLR4_Full, + .speed = 40000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR | + MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR | + MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, + .speed = 25000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 | + MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 | + MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, + .speed = 50000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, + .supported = SUPPORTED_56000baseKR4_Full, + .advertised = ADVERTISED_56000baseKR4_Full, + .speed = 56000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, + .speed = 100000, + }, +}; + +#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode) + +static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto) +{ + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | + MLXSW_REG_PTYS_ETH_SPEED_SGMII)) + return SUPPORTED_FIBRE; + + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | + MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) + return SUPPORTED_Backplane; + return 0; +} + +static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto) +{ + u32 modes = 0; + int i; + + for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { + if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) + modes |= mlxsw_sp_port_link_mode[i].supported; + } + return modes; +} + +static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto) +{ + u32 modes = 0; + int i; + + for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { + if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) + modes |= mlxsw_sp_port_link_mode[i].advertised; + } + return modes; +} + +static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, + struct ethtool_cmd *cmd) +{ + u32 speed = SPEED_UNKNOWN; + u8 duplex = DUPLEX_UNKNOWN; + int i; + + if (!carrier_ok) + goto out; + + for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { + if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) { + speed = mlxsw_sp_port_link_mode[i].speed; + duplex = DUPLEX_FULL; + break; + } + } +out: + ethtool_cmd_speed_set(cmd, speed); + cmd->duplex = duplex; +} + +static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto) +{ + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | + MLXSW_REG_PTYS_ETH_SPEED_SGMII)) + return PORT_FIBRE; + + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) + return PORT_DA; + + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) + return PORT_NONE; + + return PORT_OTHER; +} + +static int mlxsw_sp_port_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char ptys_pl[MLXSW_REG_PTYS_LEN]; + u32 eth_proto_cap; + u32 eth_proto_admin; + u32 eth_proto_oper; + int err; + + mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); + if (err) { + netdev_err(dev, "Failed to get proto"); + return err; + } + mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, + ð_proto_admin, ð_proto_oper); + + cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) | + mlxsw_sp_from_ptys_supported_link(eth_proto_cap) | + SUPPORTED_Pause | SUPPORTED_Asym_Pause; + cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin); + mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), + eth_proto_oper, cmd); + + eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; + cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper); + cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper); + + cmd->transceiver = XCVR_INTERNAL; + return 0; +} + +static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising) +{ + u32 ptys_proto = 0; + int i; + + for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { + if (advertising & mlxsw_sp_port_link_mode[i].advertised) + ptys_proto |= mlxsw_sp_port_link_mode[i].mask; + } + return ptys_proto; +} + +static u32 mlxsw_sp_to_ptys_speed(u32 speed) +{ + u32 ptys_proto = 0; + int i; + + for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { + if (speed == mlxsw_sp_port_link_mode[i].speed) + ptys_proto |= mlxsw_sp_port_link_mode[i].mask; + } + return ptys_proto; +} + +static int mlxsw_sp_port_set_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char ptys_pl[MLXSW_REG_PTYS_LEN]; + u32 speed; + u32 eth_proto_new; + u32 eth_proto_cap; + u32 eth_proto_admin; + bool is_up; + int err; + + speed = ethtool_cmd_speed(cmd); + + eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ? + mlxsw_sp_to_ptys_advert_link(cmd->advertising) : + mlxsw_sp_to_ptys_speed(speed); + + mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); + if (err) { + netdev_err(dev, "Failed to get proto"); + return err; + } + mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, NULL); + + eth_proto_new = eth_proto_new & eth_proto_cap; + if (!eth_proto_new) { + netdev_err(dev, "Not supported proto admin requested"); + return -EINVAL; + } + if (eth_proto_new == eth_proto_admin) + return 0; + + mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); + if (err) { + netdev_err(dev, "Failed to set proto admin"); + return err; + } + + err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up); + if (err) { + netdev_err(dev, "Failed to get oper status"); + return err; + } + if (!is_up) + return 0; + + err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); + if (err) { + netdev_err(dev, "Failed to set admin status"); + return err; + } + + err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); + if (err) { + netdev_err(dev, "Failed to set admin status"); + return err; + } + + return 0; +} + +static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { + .get_drvinfo = mlxsw_sp_port_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = mlxsw_sp_port_get_strings, + .get_ethtool_stats = mlxsw_sp_port_get_stats, + .get_sset_count = mlxsw_sp_port_get_sset_count, + .get_settings = mlxsw_sp_port_get_settings, + .set_settings = mlxsw_sp_port_set_settings, +}; + +static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) +{ + struct mlxsw_sp_port *mlxsw_sp_port; + struct net_device *dev; + bool usable; + int err; + + dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); + if (!dev) + return -ENOMEM; + mlxsw_sp_port = netdev_priv(dev); + mlxsw_sp_port->dev = dev; + mlxsw_sp_port->mlxsw_sp = mlxsw_sp; + mlxsw_sp_port->local_port = local_port; + mlxsw_sp_port->learning = 1; + mlxsw_sp_port->learning_sync = 1; + mlxsw_sp_port->pvid = 1; + + mlxsw_sp_port->pcpu_stats = + netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); + if (!mlxsw_sp_port->pcpu_stats) { + err = -ENOMEM; + goto err_alloc_stats; + } + + dev->netdev_ops = &mlxsw_sp_port_netdev_ops; + dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; + + err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", + mlxsw_sp_port->local_port); + goto err_dev_addr_init; + } + + netif_carrier_off(dev); + + dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | + NETIF_F_HW_VLAN_CTAG_FILTER; + + /* Each packet needs to have a Tx header (metadata) on top all other + * headers. + */ + dev->hard_header_len += MLXSW_TXHDR_LEN; + + err = mlxsw_sp_port_module_check(mlxsw_sp_port, &usable); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to check module\n", + mlxsw_sp_port->local_port); + goto err_port_module_check; + } + + if (!usable) { + dev_dbg(mlxsw_sp->bus_info->dev, "Port %d: Not usable, skipping initialization\n", + mlxsw_sp_port->local_port); + goto port_not_usable; + } + + err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", + mlxsw_sp_port->local_port); + goto err_port_system_port_mapping_set; + } + + err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", + mlxsw_sp_port->local_port); + goto err_port_swid_set; + } + + err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", + mlxsw_sp_port->local_port); + goto err_port_mtu_set; + } + + err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); + if (err) + goto err_port_admin_status_set; + + err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", + mlxsw_sp_port->local_port); + goto err_port_buffers_init; + } + + mlxsw_sp_port_switchdev_init(mlxsw_sp_port); + err = register_netdev(dev); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", + mlxsw_sp_port->local_port); + goto err_register_netdev; + } + + err = mlxsw_sp_port_vlan_init(mlxsw_sp_port); + if (err) + goto err_port_vlan_init; + + mlxsw_sp->ports[local_port] = mlxsw_sp_port; + return 0; + +err_port_vlan_init: + unregister_netdev(dev); +err_register_netdev: +err_port_buffers_init: +err_port_admin_status_set: +err_port_mtu_set: +err_port_swid_set: +err_port_system_port_mapping_set: +port_not_usable: +err_port_module_check: +err_dev_addr_init: + free_percpu(mlxsw_sp_port->pcpu_stats); +err_alloc_stats: + free_netdev(dev); + return err; +} + +static void mlxsw_sp_vfids_fini(struct mlxsw_sp *mlxsw_sp) +{ + u16 vfid; + + for_each_set_bit(vfid, mlxsw_sp->active_vfids, VLAN_N_VID) + mlxsw_sp_vfid_destroy(mlxsw_sp, vfid); +} + +static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) +{ + struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; + + if (!mlxsw_sp_port) + return; + mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); + unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ + mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); + free_percpu(mlxsw_sp_port->pcpu_stats); + free_netdev(mlxsw_sp_port->dev); +} + +static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) +{ + int i; + + for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) + mlxsw_sp_port_remove(mlxsw_sp, i); + kfree(mlxsw_sp->ports); +} + +static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) +{ + size_t alloc_size; + int i; + int err; + + alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS; + mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); + if (!mlxsw_sp->ports) + return -ENOMEM; + + for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { + err = mlxsw_sp_port_create(mlxsw_sp, i); + if (err) + goto err_port_create; + } + return 0; + +err_port_create: + for (i--; i >= 1; i--) + mlxsw_sp_port_remove(mlxsw_sp, i); + kfree(mlxsw_sp->ports); + return err; +} + +static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, + char *pude_pl, void *priv) +{ + struct mlxsw_sp *mlxsw_sp = priv; + struct mlxsw_sp_port *mlxsw_sp_port; + enum mlxsw_reg_pude_oper_status status; + u8 local_port; + + local_port = mlxsw_reg_pude_local_port_get(pude_pl); + mlxsw_sp_port = mlxsw_sp->ports[local_port]; + if (!mlxsw_sp_port) { + dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n", + local_port); + return; + } + + status = mlxsw_reg_pude_oper_status_get(pude_pl); + if (status == MLXSW_PORT_OPER_STATUS_UP) { + netdev_info(mlxsw_sp_port->dev, "link up\n"); + netif_carrier_on(mlxsw_sp_port->dev); + } else { + netdev_info(mlxsw_sp_port->dev, "link down\n"); + netif_carrier_off(mlxsw_sp_port->dev); + } +} + +static struct mlxsw_event_listener mlxsw_sp_pude_event = { + .func = mlxsw_sp_pude_event_func, + .trap_id = MLXSW_TRAP_ID_PUDE, +}; + +static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_event_trap_id trap_id) +{ + struct mlxsw_event_listener *el; + char hpkt_pl[MLXSW_REG_HPKT_LEN]; + int err; + + switch (trap_id) { + case MLXSW_TRAP_ID_PUDE: + el = &mlxsw_sp_pude_event; + break; + } + err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp); + if (err) + return err; + + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); + if (err) + goto err_event_trap_set; + + return 0; + +err_event_trap_set: + mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp); + return err; +} + +static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_event_trap_id trap_id) +{ + struct mlxsw_event_listener *el; + + switch (trap_id) { + case MLXSW_TRAP_ID_PUDE: + el = &mlxsw_sp_pude_event; + break; + } + mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp); +} + +static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port, + void *priv) +{ + struct mlxsw_sp *mlxsw_sp = priv; + struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; + struct mlxsw_sp_port_pcpu_stats *pcpu_stats; + + if (unlikely(!mlxsw_sp_port)) { + dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", + local_port); + return; + } + + skb->dev = mlxsw_sp_port->dev; + + pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); + u64_stats_update_begin(&pcpu_stats->syncp); + pcpu_stats->rx_packets++; + pcpu_stats->rx_bytes += skb->len; + u64_stats_update_end(&pcpu_stats->syncp); + + skb->protocol = eth_type_trans(skb, skb->dev); + netif_receive_skb(skb); +} + +static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = { + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_FDB_MC, + }, + /* Traps for specific L2 packet types, not trapped as FDB MC */ + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_STP, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_LACP, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_EAPOL, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_LLDP, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_MMRP, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_MVRP, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_RPVST, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_DHCP, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_IGMP_QUERY, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT, + }, +}; + +static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) +{ + char htgt_pl[MLXSW_REG_HTGT_LEN]; + char hpkt_pl[MLXSW_REG_HPKT_LEN]; + int i; + int err; + + mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); + if (err) + return err; + + mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); + if (err) + return err; + + for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { + err = mlxsw_core_rx_listener_register(mlxsw_sp->core, + &mlxsw_sp_rx_listener[i], + mlxsw_sp); + if (err) + goto err_rx_listener_register; + + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, + mlxsw_sp_rx_listener[i].trap_id); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); + if (err) + goto err_rx_trap_set; + } + return 0; + +err_rx_trap_set: + mlxsw_core_rx_listener_unregister(mlxsw_sp->core, + &mlxsw_sp_rx_listener[i], + mlxsw_sp); +err_rx_listener_register: + for (i--; i >= 0; i--) { + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, + mlxsw_sp_rx_listener[i].trap_id); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); + + mlxsw_core_rx_listener_unregister(mlxsw_sp->core, + &mlxsw_sp_rx_listener[i], + mlxsw_sp); + } + return err; +} + +static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) +{ + char hpkt_pl[MLXSW_REG_HPKT_LEN]; + int i; + + for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, + mlxsw_sp_rx_listener[i].trap_id); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); + + mlxsw_core_rx_listener_unregister(mlxsw_sp->core, + &mlxsw_sp_rx_listener[i], + mlxsw_sp); + } +} + +static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core, + enum mlxsw_reg_sfgc_type type, + enum mlxsw_reg_sfgc_bridge_type bridge_type) +{ + enum mlxsw_flood_table_type table_type; + enum mlxsw_sp_flood_table flood_table; + char sfgc_pl[MLXSW_REG_SFGC_LEN]; + + if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) { + table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; + flood_table = 0; + } else { + table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; + if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST) + flood_table = MLXSW_SP_FLOOD_TABLE_UC; + else + flood_table = MLXSW_SP_FLOOD_TABLE_BM; + } + + mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type, + flood_table); + return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl); +} + +static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) +{ + int type, err; + + /* For non-offloaded netdevs, flood all traffic types to CPU + * port. + */ + for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) { + if (type == MLXSW_REG_SFGC_TYPE_RESERVED) + continue; + + err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, + MLXSW_REG_SFGC_BRIDGE_TYPE_VFID); + if (err) + return err; + } + + /* For bridged ports, use one flooding table for unknown unicast + * traffic and a second table for unregistered multicast and + * broadcast. + */ + for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) { + if (type == MLXSW_REG_SFGC_TYPE_RESERVED) + continue; + + err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, + MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID); + if (err) + return err; + } + + return 0; +} + +static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info) +{ + struct mlxsw_sp *mlxsw_sp = priv; + int err; + + mlxsw_sp->core = mlxsw_core; + mlxsw_sp->bus_info = mlxsw_bus_info; + + err = mlxsw_sp_base_mac_get(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); + return err; + } + + err = mlxsw_sp_ports_create(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); + goto err_ports_create; + } + + err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n"); + goto err_event_register; + } + + err = mlxsw_sp_traps_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n"); + goto err_rx_listener_register; + } + + err = mlxsw_sp_flood_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n"); + goto err_flood_init; + } + + err = mlxsw_sp_buffers_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); + goto err_buffers_init; + } + + err = mlxsw_sp_switchdev_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); + goto err_switchdev_init; + } + + return 0; + +err_switchdev_init: +err_buffers_init: +err_flood_init: + mlxsw_sp_traps_fini(mlxsw_sp); +err_rx_listener_register: + mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); +err_event_register: + mlxsw_sp_ports_remove(mlxsw_sp); +err_ports_create: + mlxsw_sp_vfids_fini(mlxsw_sp); + return err; +} + +static void mlxsw_sp_fini(void *priv) +{ + struct mlxsw_sp *mlxsw_sp = priv; + + mlxsw_sp_switchdev_fini(mlxsw_sp); + mlxsw_sp_traps_fini(mlxsw_sp); + mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); + mlxsw_sp_ports_remove(mlxsw_sp); + mlxsw_sp_vfids_fini(mlxsw_sp); +} + +static struct mlxsw_config_profile mlxsw_sp_config_profile = { + .used_max_vepa_channels = 1, + .max_vepa_channels = 0, + .used_max_lag = 1, + .max_lag = 64, + .used_max_port_per_lag = 1, + .max_port_per_lag = 16, + .used_max_mid = 1, + .max_mid = 7000, + .used_max_pgt = 1, + .max_pgt = 0, + .used_max_system_port = 1, + .max_system_port = 64, + .used_max_vlan_groups = 1, + .max_vlan_groups = 127, + .used_max_regions = 1, + .max_regions = 400, + .used_flood_tables = 1, + .used_flood_mode = 1, + .flood_mode = 3, + .max_fid_offset_flood_tables = 2, + .fid_offset_flood_table_size = VLAN_N_VID - 1, + .max_fid_flood_tables = 1, + .fid_flood_table_size = VLAN_N_VID, + .used_max_ib_mc = 1, + .max_ib_mc = 0, + .used_max_pkey = 1, + .max_pkey = 0, + .swid_config = { + { + .used_type = 1, + .type = MLXSW_PORT_SWID_TYPE_ETH, + } + }, +}; + +static struct mlxsw_driver mlxsw_sp_driver = { + .kind = MLXSW_DEVICE_KIND_SPECTRUM, + .owner = THIS_MODULE, + .priv_size = sizeof(struct mlxsw_sp), + .init = mlxsw_sp_init, + .fini = mlxsw_sp_fini, + .txhdr_construct = mlxsw_sp_txhdr_construct, + .txhdr_len = MLXSW_TXHDR_LEN, + .profile = &mlxsw_sp_config_profile, +}; + +static bool mlxsw_sp_port_dev_check(const struct net_device *dev) +{ + return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; +} + +static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct net_device *dev = mlxsw_sp_port->dev; + int err; + + /* When port is not bridged untagged packets are tagged with + * PVID=VID=1, thereby creating an implicit VLAN interface in + * the device. Remove it and let bridge code take care of its + * own VLANs. + */ + err = mlxsw_sp_port_kill_vid(dev, 0, 1); + if (err) + netdev_err(dev, "Failed to remove VID 1\n"); + + return err; +} + +static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct net_device *dev = mlxsw_sp_port->dev; + int err; + + /* Add implicit VLAN interface in the device, so that untagged + * packets will be classified to the default vFID. + */ + err = mlxsw_sp_port_add_vid(dev, 0, 1); + if (err) + netdev_err(dev, "Failed to add VID 1\n"); + + return err; +} + +static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev) +{ + return !mlxsw_sp->master_bridge.dev || + mlxsw_sp->master_bridge.dev == br_dev; +} + +static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev) +{ + mlxsw_sp->master_bridge.dev = br_dev; + mlxsw_sp->master_bridge.ref_count++; +} + +static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev) +{ + if (--mlxsw_sp->master_bridge.ref_count == 0) + mlxsw_sp->master_bridge.dev = NULL; +} + +static int mlxsw_sp_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct netdev_notifier_changeupper_info *info; + struct mlxsw_sp_port *mlxsw_sp_port; + struct net_device *upper_dev; + struct mlxsw_sp *mlxsw_sp; + int err; + + if (!mlxsw_sp_port_dev_check(dev)) + return NOTIFY_DONE; + + mlxsw_sp_port = netdev_priv(dev); + mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + info = ptr; + + switch (event) { + case NETDEV_PRECHANGEUPPER: + upper_dev = info->upper_dev; + /* HW limitation forbids to put ports to multiple bridges. */ + if (info->master && info->linking && + netif_is_bridge_master(upper_dev) && + !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev)) + return NOTIFY_BAD; + break; + case NETDEV_CHANGEUPPER: + upper_dev = info->upper_dev; + if (info->master && + netif_is_bridge_master(upper_dev)) { + if (info->linking) { + err = mlxsw_sp_port_bridge_join(mlxsw_sp_port); + if (err) + netdev_err(dev, "Failed to join bridge\n"); + mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev); + mlxsw_sp_port->bridged = true; + } else { + err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port); + if (err) + netdev_err(dev, "Failed to leave bridge\n"); + mlxsw_sp_port->bridged = false; + mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev); + } + } + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { + .notifier_call = mlxsw_sp_netdevice_event, +}; + +static int __init mlxsw_sp_module_init(void) +{ + int err; + + register_netdevice_notifier(&mlxsw_sp_netdevice_nb); + err = mlxsw_core_driver_register(&mlxsw_sp_driver); + if (err) + goto err_core_driver_register; + return 0; + +err_core_driver_register: + unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); + return err; +} + +static void __exit mlxsw_sp_module_exit(void) +{ + mlxsw_core_driver_unregister(&mlxsw_sp_driver); + unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); +} + +module_init(mlxsw_sp_module_init); +module_exit(mlxsw_sp_module_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); +MODULE_DESCRIPTION("Mellanox Spectrum driver"); +MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h new file mode 100644 index 000000000000..fc0074902ab5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -0,0 +1,121 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum.h + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> + * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_SPECTRUM_H +#define _MLXSW_SPECTRUM_H + +#include <linux/types.h> +#include <linux/netdevice.h> +#include <linux/bitops.h> +#include <linux/if_vlan.h> +#include <net/switchdev.h> + +#include "core.h" + +#define MLXSW_SP_VFID_BASE VLAN_N_VID + +struct mlxsw_sp_port; + +struct mlxsw_sp { + unsigned long active_vfids[BITS_TO_LONGS(VLAN_N_VID)]; + unsigned long active_fids[BITS_TO_LONGS(VLAN_N_VID)]; + struct mlxsw_sp_port **ports; + struct mlxsw_core *core; + const struct mlxsw_bus_info *bus_info; + unsigned char base_mac[ETH_ALEN]; + struct { + struct delayed_work dw; +#define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100 + unsigned int interval; /* ms */ + } fdb_notify; +#define MLXSW_SP_DEFAULT_AGEING_TIME 300 + u32 ageing_time; + struct { + struct net_device *dev; + unsigned int ref_count; + } master_bridge; +}; + +struct mlxsw_sp_port_pcpu_stats { + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; + u32 tx_dropped; +}; + +struct mlxsw_sp_port { + struct net_device *dev; + struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats; + struct mlxsw_sp *mlxsw_sp; + u8 local_port; + u8 stp_state; + u8 learning:1; + u8 learning_sync:1; + u16 pvid; + bool bridged; + /* 802.1Q bridge VLANs */ + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + /* VLAN interfaces */ + unsigned long active_vfids[BITS_TO_LONGS(VLAN_N_VID)]; + u16 nr_vfids; +}; + +enum mlxsw_sp_flood_table { + MLXSW_SP_FLOOD_TABLE_UC, + MLXSW_SP_FLOOD_TABLE_BM, +}; + +int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp); +int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port); + +int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp); +int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port); +void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port); +void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port); +int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, + enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid, + u16 vid); +int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, + u16 vid_end, bool is_member, bool untagged); +int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, + u16 vid); +int mlxsw_sp_port_kill_vid(struct net_device *dev, + __be16 __always_unused proto, u16 vid); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c new file mode 100644 index 000000000000..d59195e3f7fb --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -0,0 +1,422 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/types.h> + +#include "spectrum.h" +#include "core.h" +#include "port.h" +#include "reg.h" + +struct mlxsw_sp_pb { + u8 index; + u16 size; +}; + +#define MLXSW_SP_PB(_index, _size) \ + { \ + .index = _index, \ + .size = _size, \ + } + +static const struct mlxsw_sp_pb mlxsw_sp_pbs[] = { + MLXSW_SP_PB(0, 208), + MLXSW_SP_PB(1, 208), + MLXSW_SP_PB(2, 208), + MLXSW_SP_PB(3, 208), + MLXSW_SP_PB(4, 208), + MLXSW_SP_PB(5, 208), + MLXSW_SP_PB(6, 208), + MLXSW_SP_PB(7, 208), + MLXSW_SP_PB(9, 208), +}; + +#define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs) + +static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + char pbmc_pl[MLXSW_REG_PBMC_LEN]; + int i; + + mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, + 0xffff, 0xffff / 2); + for (i = 0; i < MLXSW_SP_PBS_LEN; i++) { + const struct mlxsw_sp_pb *pb; + + pb = &mlxsw_sp_pbs[i]; + mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pb->index, pb->size); + } + return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, + MLXSW_REG(pbmc), pbmc_pl); +} + +#define MLXSW_SP_SB_BYTES_PER_CELL 96 + +struct mlxsw_sp_sb_pool { + u8 pool; + enum mlxsw_reg_sbpr_dir dir; + enum mlxsw_reg_sbpr_mode mode; + u32 size; +}; + +#define MLXSW_SP_SB_POOL_INGRESS_SIZE \ + ((15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS)) / \ + MLXSW_SP_SB_BYTES_PER_CELL) +#define MLXSW_SP_SB_POOL_EGRESS_SIZE \ + ((14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS)) / \ + MLXSW_SP_SB_BYTES_PER_CELL) + +#define MLXSW_SP_SB_POOL(_pool, _dir, _mode, _size) \ + { \ + .pool = _pool, \ + .dir = _dir, \ + .mode = _mode, \ + .size = _size, \ + } + +#define MLXSW_SP_SB_POOL_INGRESS(_pool, _size) \ + MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_INGRESS, \ + MLXSW_REG_SBPR_MODE_DYNAMIC, _size) + +#define MLXSW_SP_SB_POOL_EGRESS(_pool, _size) \ + MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_EGRESS, \ + MLXSW_REG_SBPR_MODE_DYNAMIC, _size) + +static const struct mlxsw_sp_sb_pool mlxsw_sp_sb_pools[] = { + MLXSW_SP_SB_POOL_INGRESS(0, MLXSW_SP_SB_POOL_INGRESS_SIZE), + MLXSW_SP_SB_POOL_INGRESS(1, 0), + MLXSW_SP_SB_POOL_INGRESS(2, 0), + MLXSW_SP_SB_POOL_INGRESS(3, 0), + MLXSW_SP_SB_POOL_EGRESS(0, MLXSW_SP_SB_POOL_EGRESS_SIZE), + MLXSW_SP_SB_POOL_EGRESS(1, 0), + MLXSW_SP_SB_POOL_EGRESS(2, 0), + MLXSW_SP_SB_POOL_EGRESS(2, MLXSW_SP_SB_POOL_EGRESS_SIZE), +}; + +#define MLXSW_SP_SB_POOLS_LEN ARRAY_SIZE(mlxsw_sp_sb_pools) + +static int mlxsw_sp_sb_pools_init(struct mlxsw_sp *mlxsw_sp) +{ + char sbpr_pl[MLXSW_REG_SBPR_LEN]; + int i; + int err; + + for (i = 0; i < MLXSW_SP_SB_POOLS_LEN; i++) { + const struct mlxsw_sp_sb_pool *pool; + + pool = &mlxsw_sp_sb_pools[i]; + mlxsw_reg_sbpr_pack(sbpr_pl, pool->pool, pool->dir, + pool->mode, pool->size); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl); + if (err) + return err; + } + return 0; +} + +struct mlxsw_sp_sb_cm { + union { + u8 pg; + u8 tc; + } u; + enum mlxsw_reg_sbcm_dir dir; + u32 min_buff; + u32 max_buff; + u8 pool; +}; + +#define MLXSW_SP_SB_CM(_pg_tc, _dir, _min_buff, _max_buff, _pool) \ + { \ + .u.pg = _pg_tc, \ + .dir = _dir, \ + .min_buff = _min_buff, \ + .max_buff = _max_buff, \ + .pool = _pool, \ + } + +#define MLXSW_SP_SB_CM_INGRESS(_pg, _min_buff, _max_buff) \ + MLXSW_SP_SB_CM(_pg, MLXSW_REG_SBCM_DIR_INGRESS, \ + _min_buff, _max_buff, 0) + +#define MLXSW_SP_SB_CM_EGRESS(_tc, _min_buff, _max_buff) \ + MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, \ + _min_buff, _max_buff, 0) + +#define MLXSW_SP_CPU_PORT_SB_CM_EGRESS(_tc) \ + MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, 104, 2, 3) + +static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms[] = { + MLXSW_SP_SB_CM_INGRESS(0, 10000 / MLXSW_SP_SB_BYTES_PER_CELL, 8), + MLXSW_SP_SB_CM_INGRESS(1, 0, 0), + MLXSW_SP_SB_CM_INGRESS(2, 0, 0), + MLXSW_SP_SB_CM_INGRESS(3, 0, 0), + MLXSW_SP_SB_CM_INGRESS(4, 0, 0), + MLXSW_SP_SB_CM_INGRESS(5, 0, 0), + MLXSW_SP_SB_CM_INGRESS(6, 0, 0), + MLXSW_SP_SB_CM_INGRESS(7, 0, 0), + MLXSW_SP_SB_CM_INGRESS(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff), + MLXSW_SP_SB_CM_EGRESS(0, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), + MLXSW_SP_SB_CM_EGRESS(1, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), + MLXSW_SP_SB_CM_EGRESS(2, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), + MLXSW_SP_SB_CM_EGRESS(3, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), + MLXSW_SP_SB_CM_EGRESS(4, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), + MLXSW_SP_SB_CM_EGRESS(5, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), + MLXSW_SP_SB_CM_EGRESS(6, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), + MLXSW_SP_SB_CM_EGRESS(7, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), + MLXSW_SP_SB_CM_EGRESS(8, 0, 0), + MLXSW_SP_SB_CM_EGRESS(9, 0, 0), + MLXSW_SP_SB_CM_EGRESS(10, 0, 0), + MLXSW_SP_SB_CM_EGRESS(11, 0, 0), + MLXSW_SP_SB_CM_EGRESS(12, 0, 0), + MLXSW_SP_SB_CM_EGRESS(13, 0, 0), + MLXSW_SP_SB_CM_EGRESS(14, 0, 0), + MLXSW_SP_SB_CM_EGRESS(15, 0, 0), + MLXSW_SP_SB_CM_EGRESS(16, 1, 0xff), +}; + +#define MLXSW_SP_SB_CMS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms) + +static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(0), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(1), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(2), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(3), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(4), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(5), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(6), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(7), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(8), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(9), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(10), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(11), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(12), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(13), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(14), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(15), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(16), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(17), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(18), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(19), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(20), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(21), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(22), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(23), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(24), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(25), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(26), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(27), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(28), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(29), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(30), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(31), +}; + +#define MLXSW_SP_CPU_PORT_SB_MCS_LEN \ + ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms) + +static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, + const struct mlxsw_sp_sb_cm *cms, + size_t cms_len) +{ + char sbcm_pl[MLXSW_REG_SBCM_LEN]; + int i; + int err; + + for (i = 0; i < cms_len; i++) { + const struct mlxsw_sp_sb_cm *cm; + + cm = &cms[i]; + mlxsw_reg_sbcm_pack(sbcm_pl, local_port, cm->u.pg, cm->dir, + cm->min_buff, cm->max_buff, cm->pool); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl); + if (err) + return err; + } + return 0; +} + +static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + return mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->local_port, mlxsw_sp_sb_cms, + MLXSW_SP_SB_CMS_LEN); +} + +static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp) +{ + return mlxsw_sp_sb_cms_init(mlxsw_sp, 0, mlxsw_sp_cpu_port_sb_cms, + MLXSW_SP_CPU_PORT_SB_MCS_LEN); +} + +struct mlxsw_sp_sb_pm { + u8 pool; + enum mlxsw_reg_sbpm_dir dir; + u32 min_buff; + u32 max_buff; +}; + +#define MLXSW_SP_SB_PM(_pool, _dir, _min_buff, _max_buff) \ + { \ + .pool = _pool, \ + .dir = _dir, \ + .min_buff = _min_buff, \ + .max_buff = _max_buff, \ + } + +#define MLXSW_SP_SB_PM_INGRESS(_pool, _min_buff, _max_buff) \ + MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_INGRESS, \ + _min_buff, _max_buff) + +#define MLXSW_SP_SB_PM_EGRESS(_pool, _min_buff, _max_buff) \ + MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_EGRESS, \ + _min_buff, _max_buff) + +static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = { + MLXSW_SP_SB_PM_INGRESS(0, 0, 0xff), + MLXSW_SP_SB_PM_INGRESS(1, 0, 0), + MLXSW_SP_SB_PM_INGRESS(2, 0, 0), + MLXSW_SP_SB_PM_INGRESS(3, 0, 0), + MLXSW_SP_SB_PM_EGRESS(0, 0, 7), + MLXSW_SP_SB_PM_EGRESS(1, 0, 0), + MLXSW_SP_SB_PM_EGRESS(2, 0, 0), + MLXSW_SP_SB_PM_EGRESS(3, 0, 0), +}; + +#define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms) + +static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + char sbpm_pl[MLXSW_REG_SBPM_LEN]; + int i; + int err; + + for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) { + const struct mlxsw_sp_sb_pm *pm; + + pm = &mlxsw_sp_sb_pms[i]; + mlxsw_reg_sbpm_pack(sbpm_pl, mlxsw_sp_port->local_port, + pm->pool, pm->dir, + pm->min_buff, pm->max_buff); + err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, + MLXSW_REG(sbpm), sbpm_pl); + if (err) + return err; + } + return 0; +} + +struct mlxsw_sp_sb_mm { + u8 prio; + u32 min_buff; + u32 max_buff; + u8 pool; +}; + +#define MLXSW_SP_SB_MM(_prio, _min_buff, _max_buff, _pool) \ + { \ + .prio = _prio, \ + .min_buff = _min_buff, \ + .max_buff = _max_buff, \ + .pool = _pool, \ + } + +static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = { + MLXSW_SP_SB_MM(0, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(1, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(2, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(3, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(4, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(5, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(6, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(7, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(8, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(10, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(11, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(12, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(13, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(14, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), +}; + +#define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms) + +static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp) +{ + char sbmm_pl[MLXSW_REG_SBMM_LEN]; + int i; + int err; + + for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) { + const struct mlxsw_sp_sb_mm *mc; + + mc = &mlxsw_sp_sb_mms[i]; + mlxsw_reg_sbmm_pack(sbmm_pl, mc->prio, mc->min_buff, + mc->max_buff, mc->pool); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl); + if (err) + return err; + } + return 0; +} + +int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) +{ + int err; + + err = mlxsw_sp_sb_pools_init(mlxsw_sp); + if (err) + return err; + err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp); + if (err) + return err; + err = mlxsw_sp_sb_mms_init(mlxsw_sp); + + return err; +} + +int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + int err; + + err = mlxsw_sp_port_pb_init(mlxsw_sp_port); + if (err) + return err; + err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port); + if (err) + return err; + err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port); + + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c new file mode 100644 index 000000000000..c39b7a188726 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -0,0 +1,863 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> + * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/skbuff.h> +#include <linux/if_vlan.h> +#include <linux/if_bridge.h> +#include <linux/workqueue.h> +#include <linux/jiffies.h> +#include <net/switchdev.h> + +#include "spectrum.h" +#include "core.h" +#include "reg.h" + +static int mlxsw_sp_port_attr_get(struct net_device *dev, + struct switchdev_attr *attr) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: + attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac); + memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac, + attr->u.ppid.id_len); + break; + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + attr->u.brport_flags = + (mlxsw_sp_port->learning ? BR_LEARNING : 0) | + (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, + u8 state) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + enum mlxsw_reg_spms_state spms_state; + char *spms_pl; + u16 vid; + int err; + + switch (state) { + case BR_STATE_DISABLED: /* fall-through */ + case BR_STATE_FORWARDING: + spms_state = MLXSW_REG_SPMS_STATE_FORWARDING; + break; + case BR_STATE_LISTENING: /* fall-through */ + case BR_STATE_LEARNING: + spms_state = MLXSW_REG_SPMS_STATE_LEARNING; + break; + case BR_STATE_BLOCKING: + spms_state = MLXSW_REG_SPMS_STATE_DISCARDING; + break; + default: + BUG(); + } + + spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); + if (!spms_pl) + return -ENOMEM; + mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) + mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); + + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); + kfree(spms_pl); + return err; +} + +static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct switchdev_trans *trans, + u8 state) +{ + if (switchdev_trans_ph_prepare(trans)) + return 0; + + mlxsw_sp_port->stp_state = state; + return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state); +} + +static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct switchdev_trans *trans, + unsigned long brport_flags) +{ + if (switchdev_trans_ph_prepare(trans)) + return 0; + + mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0; + mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0; + return 0; +} + +static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time) +{ + char sfdat_pl[MLXSW_REG_SFDAT_LEN]; + int err; + + mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl); + if (err) + return err; + mlxsw_sp->ageing_time = ageing_time; + return 0; +} + +static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct switchdev_trans *trans, + unsigned long ageing_jiffies) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time); +} + +static int mlxsw_sp_port_attr_set(struct net_device *dev, + const struct switchdev_attr *attr, + struct switchdev_trans *trans) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + int err = 0; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_STP_STATE: + err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans, + attr->u.stp_state); + break; + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans, + attr->u.brport_flags); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: + err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans, + attr->u.ageing_time); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char spvid_pl[MLXSW_REG_SPVID_LEN]; + + mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); +} + +static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid) +{ + char sfmr_pl[MLXSW_REG_SFMR_LEN]; + int err; + + mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, fid); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); + + if (err) + return err; + + set_bit(fid, mlxsw_sp->active_fids); + return 0; +} + +static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, u16 fid) +{ + char sfmr_pl[MLXSW_REG_SFMR_LEN]; + + clear_bit(fid, mlxsw_sp->active_fids); + + mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, + fid, fid); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); +} + +static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) +{ + enum mlxsw_reg_svfa_mt mt; + + if (mlxsw_sp_port->nr_vfids) + mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; + else + mt = MLXSW_REG_SVFA_MT_VID_TO_FID; + + return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid, fid); +} + +static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) +{ + enum mlxsw_reg_svfa_mt mt; + + if (!mlxsw_sp_port->nr_vfids) + return 0; + + mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; + return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid); +} + +static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid, bool set, bool only_uc) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char *sftr_pl; + int err; + + sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); + if (!sftr_pl) + return -ENOMEM; + + mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, fid, + MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, 0, + mlxsw_sp_port->local_port, set); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); + if (err) + goto buffer_out; + + /* Flooding control allows one to decide whether a given port will + * flood unicast traffic for which there is no FDB entry. + */ + if (only_uc) + goto buffer_out; + + mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, fid, + MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, 0, + mlxsw_sp_port->local_port, set); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); + +buffer_out: + kfree(sftr_pl); + return err; +} + +static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin, + u16 vid_end) +{ + u16 vid; + int err; + + for (vid = vid_begin; vid <= vid_end; vid++) { + err = mlxsw_sp_port_add_vid(dev, 0, vid); + if (err) + goto err_port_add_vid; + } + return 0; + +err_port_add_vid: + for (vid--; vid >= vid_begin; vid--) + mlxsw_sp_port_kill_vid(dev, 0, vid); + return err; +} + +static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid_begin, u16 vid_end, + bool flag_untagged, bool flag_pvid) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct net_device *dev = mlxsw_sp_port->dev; + enum mlxsw_reg_svfa_mt mt; + u16 vid, vid_e; + int err; + + /* In case this is invoked with BRIDGE_FLAGS_SELF and port is + * not bridged, then packets ingressing through the port with + * the specified VIDs will be directed to CPU. + */ + if (!mlxsw_sp_port->bridged) + return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end); + + for (vid = vid_begin; vid <= vid_end; vid++) { + if (!test_bit(vid, mlxsw_sp->active_fids)) { + err = mlxsw_sp_fid_create(mlxsw_sp, vid); + if (err) { + netdev_err(dev, "Failed to create FID=%d\n", + vid); + return err; + } + + /* When creating a FID, we set a VID to FID mapping + * regardless of the port's mode. + */ + mt = MLXSW_REG_SVFA_MT_VID_TO_FID; + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, + true, vid, vid); + if (err) { + netdev_err(dev, "Failed to create FID=VID=%d mapping\n", + vid); + return err; + } + } + + /* Set FID mapping according to port's mode */ + err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid); + if (err) { + netdev_err(dev, "Failed to map FID=%d", vid); + return err; + } + + err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, true, + false); + if (err) { + netdev_err(dev, "Failed to set flooding for FID=%d", + vid); + return err; + } + } + + for (vid = vid_begin; vid <= vid_end; + vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { + vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), + vid_end); + + err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, true, + flag_untagged); + if (err) { + netdev_err(mlxsw_sp_port->dev, "Unable to add VIDs %d-%d\n", + vid, vid_e); + return err; + } + } + + vid = vid_begin; + if (flag_pvid && mlxsw_sp_port->pvid != vid) { + err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); + if (err) { + netdev_err(mlxsw_sp_port->dev, "Unable to add PVID %d\n", + vid); + return err; + } + mlxsw_sp_port->pvid = vid; + } + + /* Changing activity bits only if HW operation succeded */ + for (vid = vid_begin; vid <= vid_end; vid++) + set_bit(vid, mlxsw_sp_port->active_vlans); + + return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, + mlxsw_sp_port->stp_state); +} + +static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + bool untagged_flag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool pvid_flag = vlan->flags & BRIDGE_VLAN_INFO_PVID; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + return __mlxsw_sp_port_vlans_add(mlxsw_sp_port, + vlan->vid_begin, vlan->vid_end, + untagged_flag, pvid_flag); +} + +static int mlxsw_sp_port_fdb_op(struct mlxsw_sp_port *mlxsw_sp_port, + const char *mac, u16 vid, bool adding, + bool dynamic) +{ + enum mlxsw_reg_sfd_rec_policy policy; + enum mlxsw_reg_sfd_op op; + char *sfd_pl; + int err; + + if (!vid) + vid = mlxsw_sp_port->pvid; + + sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); + if (!sfd_pl) + return -ENOMEM; + + policy = dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : + MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY; + op = adding ? MLXSW_REG_SFD_OP_WRITE_EDIT : + MLXSW_REG_SFD_OP_WRITE_REMOVE; + mlxsw_reg_sfd_pack(sfd_pl, op, 0); + mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, + mac, vid, MLXSW_REG_SFD_REC_ACTION_NOP, + mlxsw_sp_port->local_port); + err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sfd), + sfd_pl); + kfree(sfd_pl); + + return err; +} + +static int +mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) +{ + if (switchdev_trans_ph_prepare(trans)) + return 0; + + return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid, + true, false); +} + +static int mlxsw_sp_port_obj_add(struct net_device *dev, + const struct switchdev_obj *obj, + struct switchdev_trans *trans) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + int err = 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, + SWITCHDEV_OBJ_PORT_VLAN(obj), + trans); + break; + case SWITCHDEV_OBJ_ID_PORT_FDB: + err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port, + SWITCHDEV_OBJ_PORT_FDB(obj), + trans); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin, + u16 vid_end) +{ + u16 vid; + int err; + + for (vid = vid_begin; vid <= vid_end; vid++) { + err = mlxsw_sp_port_kill_vid(dev, 0, vid); + if (err) + return err; + } + + return 0; +} + +static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid_begin, u16 vid_end, bool init) +{ + struct net_device *dev = mlxsw_sp_port->dev; + u16 vid, vid_e; + int err; + + /* In case this is invoked with BRIDGE_FLAGS_SELF and port is + * not bridged, then prevent packets ingressing through the + * port with the specified VIDs from being trapped to CPU. + */ + if (!init && !mlxsw_sp_port->bridged) + return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end); + + for (vid = vid_begin; vid <= vid_end; + vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { + vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), + vid_end); + err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, false, + false); + if (err) { + netdev_err(mlxsw_sp_port->dev, "Unable to del VIDs %d-%d\n", + vid, vid_e); + return err; + } + } + + if ((mlxsw_sp_port->pvid >= vid_begin) && + (mlxsw_sp_port->pvid <= vid_end)) { + /* Default VLAN is always 1 */ + mlxsw_sp_port->pvid = 1; + err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, + mlxsw_sp_port->pvid); + if (err) { + netdev_err(mlxsw_sp_port->dev, "Unable to del PVID %d\n", + vid); + return err; + } + } + + if (init) + goto out; + + for (vid = vid_begin; vid <= vid_end; vid++) { + err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, false, + false); + if (err) { + netdev_err(dev, "Failed to clear flooding for FID=%d", + vid); + return err; + } + + /* Remove FID mapping in case of Virtual mode */ + err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid); + if (err) { + netdev_err(dev, "Failed to unmap FID=%d", vid); + return err; + } + } + +out: + /* Changing activity bits only if HW operation succeded */ + for (vid = vid_begin; vid <= vid_end; vid++) + clear_bit(vid, mlxsw_sp_port->active_vlans); + + return 0; +} + +static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_port_vlan *vlan) +{ + return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, + vlan->vid_begin, vlan->vid_end, false); +} + +static int +mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_port_fdb *fdb) +{ + return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid, + false, false); +} + +static int mlxsw_sp_port_obj_del(struct net_device *dev, + const struct switchdev_obj *obj) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + int err = 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = mlxsw_sp_port_vlans_del(mlxsw_sp_port, + SWITCHDEV_OBJ_PORT_VLAN(obj)); + break; + case SWITCHDEV_OBJ_ID_PORT_FDB: + err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port, + SWITCHDEV_OBJ_PORT_FDB(obj)); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, + struct switchdev_obj_port_fdb *fdb, + switchdev_obj_dump_cb_t *cb) +{ + char *sfd_pl; + char mac[ETH_ALEN]; + u16 vid; + u8 local_port; + u8 num_rec; + int stored_err = 0; + int i; + int err; + + sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); + if (!sfd_pl) + return -ENOMEM; + + mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0); + do { + mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT); + err = mlxsw_reg_query(mlxsw_sp_port->mlxsw_sp->core, + MLXSW_REG(sfd), sfd_pl); + if (err) + goto out; + + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); + + /* Even in case of error, we have to run the dump to the end + * so the session in firmware is finished. + */ + if (stored_err) + continue; + + for (i = 0; i < num_rec; i++) { + switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) { + case MLXSW_REG_SFD_REC_TYPE_UNICAST: + mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &vid, + &local_port); + if (local_port == mlxsw_sp_port->local_port) { + ether_addr_copy(fdb->addr, mac); + fdb->ndm_state = NUD_REACHABLE; + fdb->vid = vid; + err = cb(&fdb->obj); + if (err) + stored_err = err; + } + } + } + } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT); + +out: + kfree(sfd_pl); + return stored_err ? stored_err : err; +} + +static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port, + struct switchdev_obj_port_vlan *vlan, + switchdev_obj_dump_cb_t *cb) +{ + u16 vid; + int err = 0; + + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { + vlan->flags = 0; + if (vid == mlxsw_sp_port->pvid) + vlan->flags |= BRIDGE_VLAN_INFO_PVID; + vlan->vid_begin = vid; + vlan->vid_end = vid; + err = cb(&vlan->obj); + if (err) + break; + } + return err; +} + +static int mlxsw_sp_port_obj_dump(struct net_device *dev, + struct switchdev_obj *obj, + switchdev_obj_dump_cb_t *cb) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + int err = 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port, + SWITCHDEV_OBJ_PORT_VLAN(obj), cb); + break; + case SWITCHDEV_OBJ_ID_PORT_FDB: + err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port, + SWITCHDEV_OBJ_PORT_FDB(obj), cb); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +const struct switchdev_ops mlxsw_sp_port_switchdev_ops = { + .switchdev_port_attr_get = mlxsw_sp_port_attr_get, + .switchdev_port_attr_set = mlxsw_sp_port_attr_set, + .switchdev_port_obj_add = mlxsw_sp_port_obj_add, + .switchdev_port_obj_del = mlxsw_sp_port_obj_del, + .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump, +}; + +static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, + char *sfn_pl, int rec_index, + bool adding) +{ + struct mlxsw_sp_port *mlxsw_sp_port; + char mac[ETH_ALEN]; + u8 local_port; + u16 vid; + int err; + + mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &vid, &local_port); + mlxsw_sp_port = mlxsw_sp->ports[local_port]; + if (!mlxsw_sp_port) { + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n"); + return; + } + + err = mlxsw_sp_port_fdb_op(mlxsw_sp_port, mac, vid, + adding && mlxsw_sp_port->learning, true); + if (err) { + if (net_ratelimit()) + netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); + return; + } + + if (mlxsw_sp_port->learning && mlxsw_sp_port->learning_sync) { + struct switchdev_notifier_fdb_info info; + unsigned long notifier_type; + + info.addr = mac; + info.vid = vid; + notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL; + call_switchdev_notifiers(notifier_type, mlxsw_sp_port->dev, + &info.info); + } +} + +static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, + char *sfn_pl, int rec_index) +{ + switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) { + case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC: + mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, + rec_index, true); + break; + case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC: + mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, + rec_index, false); + break; + } +} + +static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp) +{ + schedule_delayed_work(&mlxsw_sp->fdb_notify.dw, + msecs_to_jiffies(mlxsw_sp->fdb_notify.interval)); +} + +static void mlxsw_sp_fdb_notify_work(struct work_struct *work) +{ + struct mlxsw_sp *mlxsw_sp; + char *sfn_pl; + u8 num_rec; + int i; + int err; + + sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL); + if (!sfn_pl) + return; + + mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work); + + do { + mlxsw_reg_sfn_pack(sfn_pl); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); + if (err) { + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n"); + break; + } + num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl); + for (i = 0; i < num_rec; i++) + mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); + + } while (num_rec); + + kfree(sfn_pl); + mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); +} + +static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp) +{ + int err; + + err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n"); + return err; + } + INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work); + mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL; + mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); + return 0; +} + +static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp) +{ + cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw); +} + +static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp) +{ + u16 fid; + + for_each_set_bit(fid, mlxsw_sp->active_fids, VLAN_N_VID) + mlxsw_sp_fid_destroy(mlxsw_sp, fid); +} + +int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) +{ + return mlxsw_sp_fdb_init(mlxsw_sp); +} + +void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp_fdb_fini(mlxsw_sp); + mlxsw_sp_fids_fini(mlxsw_sp); +} + +int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct net_device *dev = mlxsw_sp_port->dev; + int err; + + /* Allow only untagged packets to ingress and tag them internally + * with VID 1. + */ + mlxsw_sp_port->pvid = 1; + err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID, true); + if (err) { + netdev_err(dev, "Unable to init VLANs\n"); + return err; + } + + /* Add implicit VLAN interface in the device, so that untagged + * packets will be classified to the default vFID. + */ + err = mlxsw_sp_port_add_vid(dev, 0, 1); + if (err) + netdev_err(dev, "Failed to configure default vFID\n"); + + return err; +} + +void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops; +} + +void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port) +{ +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index d448431bbc83..50e29c4879db 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -57,13 +57,11 @@ static const char mlxsw_sx_driver_version[] = "1.0"; struct mlxsw_sx_port; -#define MLXSW_SW_HW_ID_LEN 6 - struct mlxsw_sx { struct mlxsw_sx_port **ports; struct mlxsw_core *core; const struct mlxsw_bus_info *bus_info; - u8 hw_id[MLXSW_SW_HW_ID_LEN]; + u8 hw_id[ETH_ALEN]; }; struct mlxsw_sx_port_pcpu_stats { @@ -925,7 +923,8 @@ static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port, spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); if (!spms_pl) return -ENOMEM; - mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port, vid, state); + mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port); + mlxsw_reg_spms_vid_pack(spms_pl, vid, state); err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl); kfree(spms_pl); return err; @@ -1069,9 +1068,9 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port) return 0; err_register_netdev: -err_port_admin_status_set: err_port_mac_learning_mode_set: err_port_stp_state_set: +err_port_admin_status_set: err_port_mtu_set: err_port_speed_set: err_port_swid_set: @@ -1178,8 +1177,7 @@ static int mlxsw_sx_event_register(struct mlxsw_sx *mlxsw_sx, if (err) return err; - mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, - MLXSW_REG_HTGT_TRAP_GROUP_EMAD, trap_id); + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id); err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); if (err) goto err_event_trap_set; @@ -1212,9 +1210,8 @@ static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port, struct mlxsw_sx_port_pcpu_stats *pcpu_stats; if (unlikely(!mlxsw_sx_port)) { - if (net_ratelimit()) - dev_warn(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n", - local_port); + dev_warn_ratelimited(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n", + local_port); return; } @@ -1316,6 +1313,11 @@ static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx) if (err) return err; + mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl); + if (err) + return err; + for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) { err = mlxsw_core_rx_listener_register(mlxsw_sx->core, &mlxsw_sx_rx_listener[i], @@ -1324,7 +1326,6 @@ static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx) goto err_rx_listener_register; mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, - MLXSW_REG_HTGT_TRAP_GROUP_RX, mlxsw_sx_rx_listener[i].trap_id); err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); if (err) @@ -1339,7 +1340,6 @@ err_rx_trap_set: err_rx_listener_register: for (i--; i >= 0; i--) { mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, - MLXSW_REG_HTGT_TRAP_GROUP_RX, mlxsw_sx_rx_listener[i].trap_id); mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); @@ -1357,7 +1357,6 @@ static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx) for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) { mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, - MLXSW_REG_HTGT_TRAP_GROUP_RX, mlxsw_sx_rx_listener[i].trap_id); mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); @@ -1371,25 +1370,15 @@ static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx) { char sfgc_pl[MLXSW_REG_SFGC_LEN]; char sgcr_pl[MLXSW_REG_SGCR_LEN]; - char *smid_pl; char *sftr_pl; int err; - /* Due to FW bug, we must configure SMID. */ - smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); - if (!smid_pl) - return -ENOMEM; - mlxsw_reg_smid_pack(smid_pl, MLXSW_PORT_MID); - err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(smid), smid_pl); - kfree(smid_pl); - if (err) - return err; - /* Configure a flooding table, which includes only CPU port. */ sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); if (!sftr_pl) return -ENOMEM; - mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0); + mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0, + MLXSW_PORT_CPU_PORT, true); err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl); kfree(sftr_pl); if (err) diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h index 06fc46c78a0b..fdf94720ca62 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/txheader.h +++ b/drivers/net/ethernet/mellanox/mlxsw/txheader.h @@ -38,6 +38,7 @@ #define MLXSW_TXHDR_LEN 0x10 #define MLXSW_TXHDR_VERSION_0 0 +#define MLXSW_TXHDR_VERSION_1 1 enum { MLXSW_TXHDR_ETH_CTL, diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c index e1329d9c2acc..bf08ce2baf8d 100644 --- a/drivers/net/ethernet/microchip/encx24j600.c +++ b/drivers/net/ethernet/microchip/encx24j600.c @@ -617,10 +617,10 @@ static int encx24j600_hw_init(struct encx24j600_priv *priv) (eidled & REVID_MASK) >> REVID_SHIFT); /* PHY Leds: link status, - * LEDA: Link + transmit/receive events - * LEDB: Link State + colision events + * LEDA: Link State + collision events + * LEDB: Link State + transmit/receive events */ - encx24j600_update_reg(priv, EIDLED, 0xbc00, 0xbc00); + encx24j600_update_reg(priv, EIDLED, 0xff00, 0xcb00); /* Loopback disabled */ encx24j600_write_reg(priv, MACON1, 0x9); diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 2d1b94274079..9ba975853ec6 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -5389,8 +5389,6 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev, strlcpy(info->driver, s2io_driver_name, sizeof(info->driver)); strlcpy(info->version, s2io_driver_version, sizeof(info->version)); strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info)); - info->regdump_len = XENA_REG_SPACE; - info->eedump_len = XENA_EEPROM_SPACE; } /** diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c index be916eb2f2e7..9a2967016c18 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c @@ -105,10 +105,6 @@ static void vxge_ethtool_gdrvinfo(struct net_device *dev, strlcpy(info->version, DRV_VERSION, sizeof(info->version)); strlcpy(info->fw_version, vdev->fw_version, sizeof(info->fw_version)); strlcpy(info->bus_info, pci_name(vdev->pdev), sizeof(info->bus_info)); - info->regdump_len = sizeof(struct vxge_hw_vpath_reg) - * vdev->no_of_vpath; - - info->n_stats = STAT_LEN; } /** diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 66fd868152e5..b159ef8303cc 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -476,13 +476,12 @@ static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac) mac[5] = tmp >> 8; } -static void __lpc_eth_clock_enable(struct netdata_local *pldat, - bool enable) +static void __lpc_eth_clock_enable(struct netdata_local *pldat, bool enable) { if (enable) - clk_enable(pldat->clk); + clk_prepare_enable(pldat->clk); else - clk_disable(pldat->clk); + clk_disable_unprepare(pldat->clk); } static void __lpc_params_setup(struct netdata_local *pldat) @@ -1494,7 +1493,7 @@ err_out_free_irq: err_out_iounmap: iounmap(pldat->net_base); err_out_disable_clocks: - clk_disable(pldat->clk); + clk_disable_unprepare(pldat->clk); clk_put(pldat->clk); err_out_free_dev: free_netdev(ndev); @@ -1519,7 +1518,7 @@ static int lpc_eth_drv_remove(struct platform_device *pdev) iounmap(pldat->net_base); mdiobus_unregister(pldat->mii_bus); mdiobus_free(pldat->mii_bus); - clk_disable(pldat->clk); + clk_disable_unprepare(pldat->clk); clk_put(pldat->clk); free_netdev(ndev); @@ -1540,7 +1539,7 @@ static int lpc_eth_drv_suspend(struct platform_device *pdev, if (netif_running(ndev)) { netif_device_detach(ndev); __lpc_eth_shutdown(pldat); - clk_disable(pldat->clk); + clk_disable_unprepare(pldat->clk); /* * Reset again now clock is disable to be sure diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c index 7bf9c028d8d7..c177c7cec13b 100644 --- a/drivers/net/ethernet/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/octeon/octeon_mgmt.c @@ -1344,10 +1344,6 @@ static void octeon_mgmt_get_drvinfo(struct net_device *netdev, strlcpy(info->version, DRV_VERSION, sizeof(info->version)); strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); strlcpy(info->bus_info, "N/A", sizeof(info->bus_info)); - info->n_stats = 0; - info->testinfo_len = 0; - info->regdump_len = 0; - info->eedump_len = 0; } static int octeon_mgmt_get_settings(struct net_device *netdev, diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c index f6fcf7450352..b19be7c6c1f4 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c @@ -164,7 +164,6 @@ static void pch_gbe_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->version, pch_driver_version, sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->regdump_len = pch_gbe_get_regs_len(netdev); } /** diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index f1f0108c275d..30a6f246dfc9 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -91,4 +91,15 @@ config NETXEN_NIC ---help--- This enables the support for NetXen's Gigabit Ethernet card. +config QED + tristate "QLogic QED 25/40/100Gb core driver" + depends on PCI + ---help--- + This enables the support for ... + +config QEDE + tristate "QLogic QED 25/40/100Gb Ethernet NIC" + depends on QED + ---help--- + This enables the support for ... endif # NET_VENDOR_QLOGIC diff --git a/drivers/net/ethernet/qlogic/Makefile b/drivers/net/ethernet/qlogic/Makefile index b2a283d9ae60..cee90e05beb8 100644 --- a/drivers/net/ethernet/qlogic/Makefile +++ b/drivers/net/ethernet/qlogic/Makefile @@ -6,3 +6,5 @@ obj-$(CONFIG_QLA3XXX) += qla3xxx.o obj-$(CONFIG_QLCNIC) += qlcnic/ obj-$(CONFIG_QLGE) += qlge/ obj-$(CONFIG_NETXEN_NIC) += netxen/ +obj-$(CONFIG_QED) += qed/ +obj-$(CONFIG_QEDE)+= qede/ diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c index 87e073c6e291..f9034467736c 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c @@ -93,8 +93,6 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->regdump_len = NETXEN_NIC_REGS_LEN; - drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev); } static int diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile new file mode 100644 index 000000000000..5c2fd57236fe --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_QED) := qed.o + +qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ + qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h new file mode 100644 index 000000000000..ac17d8669b1a --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -0,0 +1,496 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_H +#define _QED_H + +#include <linux/types.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/firmware.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/workqueue.h> +#include <linux/zlib.h> +#include <linux/hashtable.h> +#include <linux/qed/qed_if.h> +#include "qed_hsi.h" + +extern const struct qed_common_ops qed_common_ops_pass; +#define DRV_MODULE_VERSION "8.4.0.0" + +#define MAX_HWFNS_PER_DEVICE (4) +#define NAME_SIZE 16 +#define VER_SIZE 16 + +/* cau states */ +enum qed_coalescing_mode { + QED_COAL_MODE_DISABLE, + QED_COAL_MODE_ENABLE +}; + +struct qed_eth_cb_ops; +struct qed_dev_info; + +/* helpers */ +static inline u32 qed_db_addr(u32 cid, u32 DEMS) +{ + u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) | + FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid); + + return db_addr; +} + +#define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \ + ((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \ + ~((1 << (p_hwfn->cdev->cache_shift)) - 1)) + +#define for_each_hwfn(cdev, i) for (i = 0; i < cdev->num_hwfns; i++) + +#define D_TRINE(val, cond1, cond2, true1, true2, def) \ + (val == (cond1) ? true1 : \ + (val == (cond2) ? true2 : def)) + +/* forward */ +struct qed_ptt_pool; +struct qed_spq; +struct qed_sb_info; +struct qed_sb_attn_info; +struct qed_cxt_mngr; +struct qed_sb_sp_info; +struct qed_mcp_info; + +struct qed_rt_data { + u32 init_val; + bool b_valid; +}; + +/* The PCI personality is not quite synonymous to protocol ID: + * 1. All personalities need CORE connections + * 2. The Ethernet personality may support also the RoCE protocol + */ +enum qed_pci_personality { + QED_PCI_ETH, + QED_PCI_DEFAULT /* default in shmem */ +}; + +/* All VFs are symmetric, all counters are PF + all VFs */ +struct qed_qm_iids { + u32 cids; + u32 vf_cids; + u32 tids; +}; + +enum QED_RESOURCES { + QED_SB, + QED_L2_QUEUE, + QED_VPORT, + QED_RSS_ENG, + QED_PQ, + QED_RL, + QED_MAC, + QED_VLAN, + QED_ILT, + QED_MAX_RESC, +}; + +enum QED_FEATURE { + QED_PF_L2_QUE, + QED_MAX_FEATURES, +}; + +enum QED_PORT_MODE { + QED_PORT_MODE_DE_2X40G, + QED_PORT_MODE_DE_2X50G, + QED_PORT_MODE_DE_1X100G, + QED_PORT_MODE_DE_4X10G_F, + QED_PORT_MODE_DE_4X10G_E, + QED_PORT_MODE_DE_4X20G, + QED_PORT_MODE_DE_1X40G, + QED_PORT_MODE_DE_2X25G, + QED_PORT_MODE_DE_1X25G +}; + +struct qed_hw_info { + /* PCI personality */ + enum qed_pci_personality personality; + + /* Resource Allocation scheme results */ + u32 resc_start[QED_MAX_RESC]; + u32 resc_num[QED_MAX_RESC]; + u32 feat_num[QED_MAX_FEATURES]; + +#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc]) +#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc]) +#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc]) + + u8 num_tc; + u8 offload_tc; + u8 non_offload_tc; + + u32 concrete_fid; + u16 opaque_fid; + u16 ovlan; + u32 part_num[4]; + + u32 vendor_id; + u32 device_id; + + unsigned char hw_mac_addr[ETH_ALEN]; + + struct qed_igu_info *p_igu_info; + + u32 port_mode; + u32 hw_mode; +}; + +struct qed_hw_cid_data { + u32 cid; + bool b_cid_allocated; + + /* Additional identifiers */ + u16 opaque_fid; + u8 vport_id; +}; + +/* maximun size of read/write commands (HW limit) */ +#define DMAE_MAX_RW_SIZE 0x2000 + +struct qed_dmae_info { + /* Mutex for synchronizing access to functions */ + struct mutex mutex; + + u8 channel; + + dma_addr_t completion_word_phys_addr; + + /* The memory location where the DMAE writes the completion + * value when an operation is finished on this context. + */ + u32 *p_completion_word; + + dma_addr_t intermediate_buffer_phys_addr; + + /* An intermediate buffer for DMAE operations that use virtual + * addresses - data is DMA'd to/from this buffer and then + * memcpy'd to/from the virtual address + */ + u32 *p_intermediate_buffer; + + dma_addr_t dmae_cmd_phys_addr; + struct dmae_cmd *p_dmae_cmd; +}; + +struct qed_qm_info { + struct init_qm_pq_params *qm_pq_params; + struct init_qm_vport_params *qm_vport_params; + struct init_qm_port_params *qm_port_params; + u16 start_pq; + u8 start_vport; + u8 pure_lb_pq; + u8 offload_pq; + u8 pure_ack_pq; + u8 vf_queues_offset; + u16 num_pqs; + u16 num_vf_pqs; + u8 num_vports; + u8 max_phys_tcs_per_port; + bool pf_rl_en; + bool pf_wfq_en; + bool vport_rl_en; + bool vport_wfq_en; + u8 pf_wfq; + u32 pf_rl; +}; + +struct storm_stats { + u32 address; + u32 len; +}; + +struct qed_storm_stats { + struct storm_stats mstats; + struct storm_stats pstats; + struct storm_stats tstats; + struct storm_stats ustats; +}; + +struct qed_fw_data { + struct fw_ver_info *fw_ver_info; + const u8 *modes_tree_buf; + union init_op *init_ops; + const u32 *arr_data; + u32 init_ops_size; +}; + +struct qed_simd_fp_handler { + void *token; + void (*func)(void *); +}; + +struct qed_hwfn { + struct qed_dev *cdev; + u8 my_id; /* ID inside the PF */ +#define IS_LEAD_HWFN(edev) (!((edev)->my_id)) + u8 rel_pf_id; /* Relative to engine*/ + u8 abs_pf_id; +#define QED_PATH_ID(_p_hwfn) ((_p_hwfn)->abs_pf_id & 1) + u8 port_id; + bool b_active; + + u32 dp_module; + u8 dp_level; + char name[NAME_SIZE]; + + bool first_on_engine; + bool hw_init_done; + + /* BAR access */ + void __iomem *regview; + void __iomem *doorbells; + u64 db_phys_addr; + unsigned long db_size; + + /* PTT pool */ + struct qed_ptt_pool *p_ptt_pool; + + /* HW info */ + struct qed_hw_info hw_info; + + /* rt_array (for init-tool) */ + struct qed_rt_data *rt_data; + + /* SPQ */ + struct qed_spq *p_spq; + + /* EQ */ + struct qed_eq *p_eq; + + /* Consolidate Q*/ + struct qed_consq *p_consq; + + /* Slow-Path definitions */ + struct tasklet_struct *sp_dpc; + bool b_sp_dpc_enabled; + + struct qed_ptt *p_main_ptt; + struct qed_ptt *p_dpc_ptt; + + struct qed_sb_sp_info *p_sp_sb; + struct qed_sb_attn_info *p_sb_attn; + + /* Protocol related */ + struct qed_pf_params pf_params; + + /* Array of sb_info of all status blocks */ + struct qed_sb_info *sbs_info[MAX_SB_PER_PF_MIMD]; + u16 num_sbs; + + struct qed_cxt_mngr *p_cxt_mngr; + + /* Flag indicating whether interrupts are enabled or not*/ + bool b_int_enabled; + + struct qed_mcp_info *mcp_info; + + struct qed_hw_cid_data *p_tx_cids; + struct qed_hw_cid_data *p_rx_cids; + + struct qed_dmae_info dmae_info; + + /* QM init */ + struct qed_qm_info qm_info; + struct qed_storm_stats storm_stats; + + /* Buffer for unzipping firmware data */ + void *unzip_buf; + + struct qed_simd_fp_handler simd_proto_handler[64]; + + struct z_stream_s *stream; +}; + +struct pci_params { + int pm_cap; + + unsigned long mem_start; + unsigned long mem_end; + unsigned int irq; + u8 pf_num; +}; + +struct qed_int_param { + u32 int_mode; + u8 num_vectors; + u8 min_msix_cnt; /* for minimal functionality */ +}; + +struct qed_int_params { + struct qed_int_param in; + struct qed_int_param out; + struct msix_entry *msix_table; + bool fp_initialized; + u8 fp_msix_base; + u8 fp_msix_cnt; +}; + +struct qed_dev { + u32 dp_module; + u8 dp_level; + char name[NAME_SIZE]; + + u8 type; +#define QED_DEV_TYPE_BB_A0 (0 << 0) +#define QED_DEV_TYPE_MASK (0x3) +#define QED_DEV_TYPE_SHIFT (0) + + u16 chip_num; +#define CHIP_NUM_MASK 0xffff +#define CHIP_NUM_SHIFT 16 + + u16 chip_rev; +#define CHIP_REV_MASK 0xf +#define CHIP_REV_SHIFT 12 + + u16 chip_metal; +#define CHIP_METAL_MASK 0xff +#define CHIP_METAL_SHIFT 4 + + u16 chip_bond_id; +#define CHIP_BOND_ID_MASK 0xf +#define CHIP_BOND_ID_SHIFT 0 + + u8 num_engines; + u8 num_ports_in_engines; + u8 num_funcs_in_port; + + u8 path_id; + enum mf_mode mf_mode; +#define IS_MF(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode != SF) +#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == MF_NPAR) +#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == MF_OVLAN) + + int pcie_width; + int pcie_speed; + u8 ver_str[VER_SIZE]; + + /* Add MF related configuration */ + u8 mcp_rev; + u8 boot_mode; + + u8 wol; + + u32 int_mode; + enum qed_coalescing_mode int_coalescing_mode; + u8 rx_coalesce_usecs; + u8 tx_coalesce_usecs; + + /* Start Bar offset of first hwfn */ + void __iomem *regview; + void __iomem *doorbells; + u64 db_phys_addr; + unsigned long db_size; + + /* PCI */ + u8 cache_shift; + + /* Init */ + const struct iro *iro_arr; +#define IRO (p_hwfn->cdev->iro_arr) + + /* HW functions */ + u8 num_hwfns; + struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE]; + + u32 drv_type; + + struct qed_eth_stats *reset_stats; + struct qed_fw_data *fw_data; + + u32 mcp_nvm_resp; + + /* Linux specific here */ + struct qede_dev *edev; + struct pci_dev *pdev; + int msg_enable; + + struct pci_params pci_params; + + struct qed_int_params int_params; + + u8 protocol; +#define IS_QED_ETH_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_ETH) + + /* Callbacks to protocol driver */ + union { + struct qed_common_cb_ops *common; + struct qed_eth_cb_ops *eth; + } protocol_ops; + void *ops_cookie; + + const struct firmware *firmware; +}; + +#define QED_GET_TYPE(dev) (((dev)->type & QED_DEV_TYPE_MASK) >> \ + QED_DEV_TYPE_SHIFT) +#define QED_IS_BB_A0(dev) (QED_GET_TYPE(dev) == QED_DEV_TYPE_BB_A0) +#define QED_IS_BB(dev) (QED_IS_BB_A0(dev)) + +#define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB +#define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB + +/** + * @brief qed_concrete_to_sw_fid - get the sw function id from + * the concrete value. + * + * @param concrete_fid + * + * @return inline u8 + */ +static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, + u32 concrete_fid) +{ + u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID); + + return pfid; +} + +#define PURE_LB_TC 8 + +#define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) + +/* Other Linux specific common definitions */ +#define DP_NAME(cdev) ((cdev)->name) + +#define REG_ADDR(cdev, offset) (void __iomem *)((u8 __iomem *)\ + (cdev->regview) + \ + (offset)) + +#define REG_RD(cdev, offset) readl(REG_ADDR(cdev, offset)) +#define REG_WR(cdev, offset, val) writel((u32)val, REG_ADDR(cdev, offset)) +#define REG_WR16(cdev, offset, val) writew((u16)val, REG_ADDR(cdev, offset)) + +#define DOORBELL(cdev, db_addr, val) \ + writel((u32)val, (void __iomem *)((u8 __iomem *)\ + (cdev->doorbells) + (db_addr))) + +/* Prototypes */ +int qed_fill_dev_info(struct qed_dev *cdev, + struct qed_dev_info *dev_info); +void qed_link_update(struct qed_hwfn *hwfn); +u32 qed_unzip_data(struct qed_hwfn *p_hwfn, + u32 input_len, u8 *input_buf, + u32 max_size, u8 *unzip_buf); + +#define QED_ETH_INTERFACE_VERSION 300 + +#endif /* _QED_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c new file mode 100644 index 000000000000..7ccdb46c6764 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -0,0 +1,847 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include <linux/types.h> +#include <linux/bitops.h> +#include <linux/dma-mapping.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/log2.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/bitops.h> +#include "qed.h" +#include "qed_cxt.h" +#include "qed_dev_api.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_init_ops.h" +#include "qed_reg_addr.h" + +/* Max number of connection types in HW (DQ/CDU etc.) */ +#define MAX_CONN_TYPES PROTOCOLID_COMMON +#define NUM_TASK_TYPES 2 +#define NUM_TASK_PF_SEGMENTS 4 + +/* QM constants */ +#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */ + +/* Doorbell-Queue constants */ +#define DQ_RANGE_SHIFT 4 +#define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT) + +/* ILT constants */ +#define ILT_DEFAULT_HW_P_SIZE 3 +#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12)) +#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET + +/* ILT entry structure */ +#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL +#define ILT_ENTRY_PHY_ADDR_SHIFT 0 +#define ILT_ENTRY_VALID_MASK 0x1ULL +#define ILT_ENTRY_VALID_SHIFT 52 +#define ILT_ENTRY_IN_REGS 2 +#define ILT_REG_SIZE_IN_BYTES 4 + +/* connection context union */ +union conn_context { + struct core_conn_context core_ctx; + struct eth_conn_context eth_ctx; +}; + +#define CONN_CXT_SIZE(p_hwfn) \ + ALIGNED_TYPE_SIZE(union conn_context, p_hwfn) + +/* PF per protocl configuration object */ +struct qed_conn_type_cfg { + u32 cid_count; + u32 cid_start; +}; + +/* ILT Client configuration, Per connection type (protocol) resources. */ +#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2) +#define CDUC_BLK (0) + +enum ilt_clients { + ILT_CLI_CDUC, + ILT_CLI_QM, + ILT_CLI_MAX +}; + +struct ilt_cfg_pair { + u32 reg; + u32 val; +}; + +struct qed_ilt_cli_blk { + u32 total_size; /* 0 means not active */ + u32 real_size_in_page; + u32 start_line; +}; + +struct qed_ilt_client_cfg { + bool active; + + /* ILT boundaries */ + struct ilt_cfg_pair first; + struct ilt_cfg_pair last; + struct ilt_cfg_pair p_size; + + /* ILT client blocks for PF */ + struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS]; + u32 pf_total_lines; +}; + +/* Per Path - + * ILT shadow table + * Protocol acquired CID lists + * PF start line in ILT + */ +struct qed_dma_mem { + dma_addr_t p_phys; + void *p_virt; + size_t size; +}; + +struct qed_cid_acquired_map { + u32 start_cid; + u32 max_count; + unsigned long *cid_map; +}; + +struct qed_cxt_mngr { + /* Per protocl configuration */ + struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES]; + + /* computed ILT structure */ + struct qed_ilt_client_cfg clients[ILT_CLI_MAX]; + + /* Acquired CIDs */ + struct qed_cid_acquired_map acquired[MAX_CONN_TYPES]; + + /* ILT shadow table */ + struct qed_dma_mem *ilt_shadow; + u32 pf_start_line; +}; + +static u32 qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr) +{ + u32 type, pf_cids = 0; + + for (type = 0; type < MAX_CONN_TYPES; type++) + pf_cids += p_mngr->conn_cfg[type].cid_count; + + return pf_cids; +} + +static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn, + struct qed_qm_iids *iids) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + int type; + + for (type = 0; type < MAX_CONN_TYPES; type++) + iids->cids += p_mngr->conn_cfg[type].cid_count; + + DP_VERBOSE(p_hwfn, QED_MSG_ILT, "iids: CIDS %08x\n", iids->cids); +} + +/* set the iids count per protocol */ +static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn, + enum protocol_type type, + u32 cid_count) +{ + struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; + struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type]; + + p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN); +} + +static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli, + struct qed_ilt_cli_blk *p_blk, + u32 start_line, u32 total_size, + u32 elem_size) +{ + u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val); + + /* verify thatits called only once for each block */ + if (p_blk->total_size) + return; + + p_blk->total_size = total_size; + p_blk->real_size_in_page = 0; + if (elem_size) + p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size; + p_blk->start_line = start_line; +} + +static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn, + struct qed_ilt_client_cfg *p_cli, + struct qed_ilt_cli_blk *p_blk, + u32 *p_line, enum ilt_clients client_id) +{ + if (!p_blk->total_size) + return; + + if (!p_cli->active) + p_cli->first.val = *p_line; + + p_cli->active = true; + *p_line += DIV_ROUND_UP(p_blk->total_size, + p_blk->real_size_in_page); + p_cli->last.val = *p_line - 1; + + DP_VERBOSE(p_hwfn, QED_MSG_ILT, + "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n", + client_id, p_cli->first.val, + p_cli->last.val, p_blk->total_size, + p_blk->real_size_in_page, p_blk->start_line); +} + +int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct qed_ilt_client_cfg *p_cli; + struct qed_ilt_cli_blk *p_blk; + u32 curr_line, total, pf_cids; + struct qed_qm_iids qm_iids; + + memset(&qm_iids, 0, sizeof(qm_iids)); + + p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT); + + DP_VERBOSE(p_hwfn, QED_MSG_ILT, + "hwfn [%d] - Set context manager starting line to be 0x%08x\n", + p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line); + + /* CDUC */ + p_cli = &p_mngr->clients[ILT_CLI_CDUC]; + curr_line = p_mngr->pf_start_line; + p_cli->pf_total_lines = 0; + + /* get the counters for the CDUC and QM clients */ + pf_cids = qed_cxt_cdu_iids(p_mngr); + + p_blk = &p_cli->pf_blks[CDUC_BLK]; + + total = pf_cids * CONN_CXT_SIZE(p_hwfn); + + qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total, CONN_CXT_SIZE(p_hwfn)); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC); + p_cli->pf_total_lines = curr_line - p_blk->start_line; + + /* QM */ + p_cli = &p_mngr->clients[ILT_CLI_QM]; + p_blk = &p_cli->pf_blks[0]; + + qed_cxt_qm_iids(p_hwfn, &qm_iids); + total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, 0, 0, + p_hwfn->qm_info.num_pqs, 0); + + DP_VERBOSE(p_hwfn, QED_MSG_ILT, + "QM ILT Info, (cids=%d, num_pqs=%d, memory_size=%d)\n", + qm_iids.cids, p_hwfn->qm_info.num_pqs, total); + + qed_ilt_cli_blk_fill(p_cli, p_blk, + curr_line, total * 0x1000, + QM_PQ_ELEMENT_SIZE); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM); + p_cli->pf_total_lines = curr_line - p_blk->start_line; + + if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line > + RESC_NUM(p_hwfn, QED_ILT)) { + DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n", + curr_line - p_hwfn->p_cxt_mngr->pf_start_line); + return -EINVAL; + } + + return 0; +} + +#define for_each_ilt_valid_client(pos, clients) \ + for (pos = 0; pos < ILT_CLI_MAX; pos++) + +/* Total number of ILT lines used by this PF */ +static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients) +{ + u32 size = 0; + u32 i; + + for_each_ilt_valid_client(i, ilt_clients) { + if (!ilt_clients[i].active) + continue; + size += (ilt_clients[i].last.val - + ilt_clients[i].first.val + 1); + } + + return size; +} + +static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn) +{ + struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients; + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 ilt_size, i; + + ilt_size = qed_cxt_ilt_shadow_size(p_cli); + + for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) { + struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i]; + + if (p_dma->p_virt) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + p_dma->size, p_dma->p_virt, + p_dma->p_phys); + p_dma->p_virt = NULL; + } + kfree(p_mngr->ilt_shadow); +} + +static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, + struct qed_ilt_cli_blk *p_blk, + enum ilt_clients ilt_client, + u32 start_line_offset) +{ + struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow; + u32 lines, line, sz_left; + + if (!p_blk->total_size) + return 0; + + sz_left = p_blk->total_size; + lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page); + line = p_blk->start_line + start_line_offset - + p_hwfn->p_cxt_mngr->pf_start_line; + + for (; lines; lines--) { + dma_addr_t p_phys; + void *p_virt; + u32 size; + + size = min_t(u32, sz_left, + p_blk->real_size_in_page); + p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + size, + &p_phys, + GFP_KERNEL); + if (!p_virt) + return -ENOMEM; + memset(p_virt, 0, size); + + ilt_shadow[line].p_phys = p_phys; + ilt_shadow[line].p_virt = p_virt; + ilt_shadow[line].size = size; + + DP_VERBOSE(p_hwfn, QED_MSG_ILT, + "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n", + line, (u64)p_phys, p_virt, size); + + sz_left -= size; + line++; + } + + return 0; +} + +static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct qed_ilt_client_cfg *clients = p_mngr->clients; + struct qed_ilt_cli_blk *p_blk; + u32 size, i, j; + int rc; + + size = qed_cxt_ilt_shadow_size(clients); + p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem), + GFP_KERNEL); + if (!p_mngr->ilt_shadow) { + DP_NOTICE(p_hwfn, "Failed to allocate ilt shadow table\n"); + rc = -ENOMEM; + goto ilt_shadow_fail; + } + + DP_VERBOSE(p_hwfn, QED_MSG_ILT, + "Allocated 0x%x bytes for ilt shadow\n", + (u32)(size * sizeof(struct qed_dma_mem))); + + for_each_ilt_valid_client(i, clients) { + if (!clients[i].active) + continue; + for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) { + p_blk = &clients[i].pf_blks[j]; + rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0); + if (rc != 0) + goto ilt_shadow_fail; + } + } + + return 0; + +ilt_shadow_fail: + qed_ilt_shadow_free(p_hwfn); + return rc; +} + +static void qed_cid_map_free(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 type; + + for (type = 0; type < MAX_CONN_TYPES; type++) { + kfree(p_mngr->acquired[type].cid_map); + p_mngr->acquired[type].max_count = 0; + p_mngr->acquired[type].start_cid = 0; + } +} + +static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 start_cid = 0; + u32 type; + + for (type = 0; type < MAX_CONN_TYPES; type++) { + u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count; + u32 size; + + if (cid_cnt == 0) + continue; + + size = DIV_ROUND_UP(cid_cnt, + sizeof(unsigned long) * BITS_PER_BYTE) * + sizeof(unsigned long); + p_mngr->acquired[type].cid_map = kzalloc(size, GFP_KERNEL); + if (!p_mngr->acquired[type].cid_map) + goto cid_map_fail; + + p_mngr->acquired[type].max_count = cid_cnt; + p_mngr->acquired[type].start_cid = start_cid; + + p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid; + + DP_VERBOSE(p_hwfn, QED_MSG_CXT, + "Type %08x start: %08x count %08x\n", + type, p_mngr->acquired[type].start_cid, + p_mngr->acquired[type].max_count); + start_cid += cid_cnt; + } + + return 0; + +cid_map_fail: + qed_cid_map_free(p_hwfn); + return -ENOMEM; +} + +int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr; + u32 i; + + p_mngr = kzalloc(sizeof(*p_mngr), GFP_ATOMIC); + if (!p_mngr) { + DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n"); + return -ENOMEM; + } + + /* Initialize ILT client registers */ + p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT); + p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT); + p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE); + + p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT); + p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT); + p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE); + + /* default ILT page size for all clients is 32K */ + for (i = 0; i < ILT_CLI_MAX; i++) + p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE; + + /* Set the cxt mangr pointer priori to further allocations */ + p_hwfn->p_cxt_mngr = p_mngr; + + return 0; +} + +int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn) +{ + int rc; + + /* Allocate the ILT shadow table */ + rc = qed_ilt_shadow_alloc(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to allocate ilt memory\n"); + goto tables_alloc_fail; + } + + /* Allocate and initialize the acquired cids bitmaps */ + rc = qed_cid_map_alloc(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to allocate cid maps\n"); + goto tables_alloc_fail; + } + + return 0; + +tables_alloc_fail: + qed_cxt_mngr_free(p_hwfn); + return rc; +} + +void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn) +{ + if (!p_hwfn->p_cxt_mngr) + return; + + qed_cid_map_free(p_hwfn); + qed_ilt_shadow_free(p_hwfn); + kfree(p_hwfn->p_cxt_mngr); + + p_hwfn->p_cxt_mngr = NULL; +} + +void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + int type; + + /* Reset acquired cids */ + for (type = 0; type < MAX_CONN_TYPES; type++) { + u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count; + + if (cid_cnt == 0) + continue; + + memset(p_mngr->acquired[type].cid_map, 0, + DIV_ROUND_UP(cid_cnt, + sizeof(unsigned long) * BITS_PER_BYTE) * + sizeof(unsigned long)); + } +} + +/* CDU Common */ +#define CDUC_CXT_SIZE_SHIFT \ + CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT + +#define CDUC_CXT_SIZE_MASK \ + (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT) + +#define CDUC_BLOCK_WASTE_SHIFT \ + CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT + +#define CDUC_BLOCK_WASTE_MASK \ + (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT) + +#define CDUC_NCIB_SHIFT \ + CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT + +#define CDUC_NCIB_MASK \ + (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT) + +static void qed_cdu_init_common(struct qed_hwfn *p_hwfn) +{ + u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0; + + /* CDUC - connection configuration */ + page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val; + cxt_size = CONN_CXT_SIZE(p_hwfn); + elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; + block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size; + + SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size); + SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste); + SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page); + STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params); +} + +void qed_qm_init_pf(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_pf_rt_init_params params; + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + struct qed_qm_iids iids; + + memset(&iids, 0, sizeof(iids)); + qed_cxt_qm_iids(p_hwfn, &iids); + + memset(¶ms, 0, sizeof(params)); + params.port_id = p_hwfn->port_id; + params.pf_id = p_hwfn->rel_pf_id; + params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; + params.is_first_pf = p_hwfn->first_on_engine; + params.num_pf_cids = iids.cids; + params.start_pq = qm_info->start_pq; + params.num_pf_pqs = qm_info->num_pqs; + params.start_vport = qm_info->num_vports; + params.pf_wfq = qm_info->pf_wfq; + params.pf_rl = qm_info->pf_rl; + params.pq_params = qm_info->qm_pq_params; + params.vport_params = qm_info->qm_vport_params; + + qed_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, ¶ms); +} + +/* CM PF */ +static int qed_cm_init_pf(struct qed_hwfn *p_hwfn) +{ + union qed_qm_pq_params pq_params; + u16 pq; + + /* XCM pure-LB queue */ + memset(&pq_params, 0, sizeof(pq_params)); + pq_params.core.tc = LB_TC; + pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params); + STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq); + + return 0; +} + +/* DQ PF */ +static void qed_dq_init_pf(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 dq_pf_max_cid = 0; + + dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid); + + dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid); + + dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid); + + dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid); + + dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid); + + /* 5 - PF */ + dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid); +} + +static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn) +{ + struct qed_ilt_client_cfg *ilt_clients; + int i; + + ilt_clients = p_hwfn->p_cxt_mngr->clients; + for_each_ilt_valid_client(i, ilt_clients) { + if (!ilt_clients[i].active) + continue; + STORE_RT_REG(p_hwfn, + ilt_clients[i].first.reg, + ilt_clients[i].first.val); + STORE_RT_REG(p_hwfn, + ilt_clients[i].last.reg, + ilt_clients[i].last.val); + STORE_RT_REG(p_hwfn, + ilt_clients[i].p_size.reg, + ilt_clients[i].p_size.val); + } +} + +/* ILT (PSWRQ2) PF */ +static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn) +{ + struct qed_ilt_client_cfg *clients; + struct qed_cxt_mngr *p_mngr; + struct qed_dma_mem *p_shdw; + u32 line, rt_offst, i; + + qed_ilt_bounds_init(p_hwfn); + + p_mngr = p_hwfn->p_cxt_mngr; + p_shdw = p_mngr->ilt_shadow; + clients = p_hwfn->p_cxt_mngr->clients; + + for_each_ilt_valid_client(i, clients) { + if (!clients[i].active) + continue; + + /** Client's 1st val and RT array are absolute, ILT shadows' + * lines are relative. + */ + line = clients[i].first.val - p_mngr->pf_start_line; + rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET + + clients[i].first.val * ILT_ENTRY_IN_REGS; + + for (; line <= clients[i].last.val - p_mngr->pf_start_line; + line++, rt_offst += ILT_ENTRY_IN_REGS) { + u64 ilt_hw_entry = 0; + + /** p_virt could be NULL incase of dynamic + * allocation + */ + if (p_shdw[line].p_virt) { + SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL); + SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR, + (p_shdw[line].p_phys >> 12)); + + DP_VERBOSE(p_hwfn, QED_MSG_ILT, + "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n", + rt_offst, line, i, + (u64)(p_shdw[line].p_phys >> 12)); + } + + STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry); + } + } +} + +void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn) +{ + qed_cdu_init_common(p_hwfn); +} + +void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn) +{ + qed_qm_init_pf(p_hwfn); + qed_cm_init_pf(p_hwfn); + qed_dq_init_pf(p_hwfn); + qed_ilt_init_pf(p_hwfn); +} + +int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, + enum protocol_type type, + u32 *p_cid) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 rel_cid; + + if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) { + DP_NOTICE(p_hwfn, "Invalid protocol type %d", type); + return -EINVAL; + } + + rel_cid = find_first_zero_bit(p_mngr->acquired[type].cid_map, + p_mngr->acquired[type].max_count); + + if (rel_cid >= p_mngr->acquired[type].max_count) { + DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", + type); + return -EINVAL; + } + + __set_bit(rel_cid, p_mngr->acquired[type].cid_map); + + *p_cid = rel_cid + p_mngr->acquired[type].start_cid; + + return 0; +} + +static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn, + u32 cid, + enum protocol_type *p_type) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct qed_cid_acquired_map *p_map; + enum protocol_type p; + u32 rel_cid; + + /* Iterate over protocols and find matching cid range */ + for (p = 0; p < MAX_CONN_TYPES; p++) { + p_map = &p_mngr->acquired[p]; + + if (!p_map->cid_map) + continue; + if (cid >= p_map->start_cid && + cid < p_map->start_cid + p_map->max_count) + break; + } + *p_type = p; + + if (p == MAX_CONN_TYPES) { + DP_NOTICE(p_hwfn, "Invalid CID %d", cid); + return false; + } + + rel_cid = cid - p_map->start_cid; + if (!test_bit(rel_cid, p_map->cid_map)) { + DP_NOTICE(p_hwfn, "CID %d not acquired", cid); + return false; + } + return true; +} + +void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, + u32 cid) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + enum protocol_type type; + bool b_acquired; + u32 rel_cid; + + /* Test acquired and find matching per-protocol map */ + b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, &type); + + if (!b_acquired) + return; + + rel_cid = cid - p_mngr->acquired[type].start_cid; + __clear_bit(rel_cid, p_mngr->acquired[type].cid_map); +} + +int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, + struct qed_cxt_info *p_info) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 conn_cxt_size, hw_p_size, cxts_per_p, line; + enum protocol_type type; + bool b_acquired; + + /* Test acquired and find matching per-protocol map */ + b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type); + + if (!b_acquired) + return -EINVAL; + + /* set the protocl type */ + p_info->type = type; + + /* compute context virtual pointer */ + hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val; + + conn_cxt_size = CONN_CXT_SIZE(p_hwfn); + cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size; + line = p_info->iid / cxts_per_p; + + /* Make sure context is allocated (dynamic allocation) */ + if (!p_mngr->ilt_shadow[line].p_virt) + return -EINVAL; + + p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt + + p_info->iid % cxts_per_p * conn_cxt_size; + + DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT), + "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n", + p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid); + + return 0; +} + +int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn) +{ + struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params; + + /* Set the number of required CORE connections */ + u32 core_cids = 1; /* SPQ */ + + qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids); + + qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, + p_params->num_cons); + + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h new file mode 100644 index 000000000000..c8e1f5e5c42b --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -0,0 +1,139 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_CXT_H +#define _QED_CXT_H + +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/qed/qed_if.h> +#include "qed_hsi.h" +#include "qed.h" + +struct qed_cxt_info { + void *p_cxt; + u32 iid; + enum protocol_type type; +}; + +/** + * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type + * + * @param p_hwfn + * @param type + * @param p_cid + * + * @return int + */ +int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, + enum protocol_type type, + u32 *p_cid); + +/** + * @brief qedo_cid_get_cxt_info - Returns the context info for a specific cid + * + * + * @param p_hwfn + * @param p_info in/out + * + * @return int + */ +int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, + struct qed_cxt_info *p_info); + +enum qed_cxt_elem_type { + QED_ELEM_CXT, + QED_ELEM_TASK +}; + +/** + * @brief qed_cxt_set_pf_params - Set the PF params for cxt init + * + * @param p_hwfn + * + * @return int + */ +int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_cxt_cfg_ilt_compute - compute ILT init parameters + * + * @param p_hwfn + * + * @return int + */ +int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_cxt_mngr_alloc - Allocate and init the context manager struct + * + * @param p_hwfn + * + * @return int + */ +int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_cxt_mngr_free + * + * @param p_hwfn + */ +void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired map + * + * @param p_hwfn + * + * @return int + */ +int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_cxt_mngr_setup - Reset the acquired CIDs + * + * @param p_hwfn + */ +void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_cxt_hw_init_common - Initailze ILT and DQ, common phase, per path. + * + * + * + * @param p_hwfn + */ +void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path. + * + * + * + * @param p_hwfn + */ +void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_qm_init_pf - Initailze the QM PF phase, per path + * + * @param p_hwfn + */ + +void qed_qm_init_pf(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_cxt_release - Release a cid + * + * @param p_hwfn + * @param cid + */ +void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, + u32 cid); + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c new file mode 100644 index 000000000000..b9b7b7e6fa53 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -0,0 +1,1797 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/etherdevice.h> +#include <linux/qed/qed_chain.h> +#include <linux/qed/qed_if.h> +#include "qed.h" +#include "qed_cxt.h" +#include "qed_dev_api.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_init_ops.h" +#include "qed_int.h" +#include "qed_mcp.h" +#include "qed_reg_addr.h" +#include "qed_sp.h" + +/* API common to all protocols */ +void qed_init_dp(struct qed_dev *cdev, + u32 dp_module, u8 dp_level) +{ + u32 i; + + cdev->dp_level = dp_level; + cdev->dp_module = dp_module; + for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + p_hwfn->dp_level = dp_level; + p_hwfn->dp_module = dp_module; + } +} + +void qed_init_struct(struct qed_dev *cdev) +{ + u8 i; + + for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + p_hwfn->cdev = cdev; + p_hwfn->my_id = i; + p_hwfn->b_active = false; + + mutex_init(&p_hwfn->dmae_info.mutex); + } + + /* hwfn 0 is always active */ + cdev->hwfns[0].b_active = true; + + /* set the default cache alignment to 128 */ + cdev->cache_shift = 7; +} + +static void qed_qm_info_free(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + + kfree(qm_info->qm_pq_params); + qm_info->qm_pq_params = NULL; + kfree(qm_info->qm_vport_params); + qm_info->qm_vport_params = NULL; + kfree(qm_info->qm_port_params); + qm_info->qm_port_params = NULL; +} + +void qed_resc_free(struct qed_dev *cdev) +{ + int i; + + kfree(cdev->fw_data); + cdev->fw_data = NULL; + + kfree(cdev->reset_stats); + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + kfree(p_hwfn->p_tx_cids); + p_hwfn->p_tx_cids = NULL; + kfree(p_hwfn->p_rx_cids); + p_hwfn->p_rx_cids = NULL; + } + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + qed_cxt_mngr_free(p_hwfn); + qed_qm_info_free(p_hwfn); + qed_spq_free(p_hwfn); + qed_eq_free(p_hwfn, p_hwfn->p_eq); + qed_consq_free(p_hwfn, p_hwfn->p_consq); + qed_int_free(p_hwfn); + qed_dmae_info_free(p_hwfn); + } +} + +static int qed_init_qm_info(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + struct init_qm_port_params *p_qm_port; + u8 num_vports, i, vport_id, num_ports; + u16 num_pqs, multi_cos_tcs = 1; + + memset(qm_info, 0, sizeof(*qm_info)); + + num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */ + num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT); + + /* Sanity checking that setup requires legal number of resources */ + if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) { + DP_ERR(p_hwfn, + "Need too many Physical queues - 0x%04x when only %04x are available\n", + num_pqs, RESC_NUM(p_hwfn, QED_PQ)); + return -EINVAL; + } + + /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete. + */ + qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) * + num_pqs, GFP_ATOMIC); + if (!qm_info->qm_pq_params) + goto alloc_err; + + qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) * + num_vports, GFP_ATOMIC); + if (!qm_info->qm_vport_params) + goto alloc_err; + + qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) * + MAX_NUM_PORTS, GFP_ATOMIC); + if (!qm_info->qm_port_params) + goto alloc_err; + + vport_id = (u8)RESC_START(p_hwfn, QED_VPORT); + + /* First init per-TC PQs */ + for (i = 0; i < multi_cos_tcs; i++) { + struct init_qm_pq_params *params = &qm_info->qm_pq_params[i]; + + params->vport_id = vport_id; + params->tc_id = p_hwfn->hw_info.non_offload_tc; + params->wrr_group = 1; + } + + /* Then init pure-LB PQ */ + qm_info->pure_lb_pq = i; + qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT); + qm_info->qm_pq_params[i].tc_id = PURE_LB_TC; + qm_info->qm_pq_params[i].wrr_group = 1; + i++; + + qm_info->offload_pq = 0; + qm_info->num_pqs = num_pqs; + qm_info->num_vports = num_vports; + + /* Initialize qm port parameters */ + num_ports = p_hwfn->cdev->num_ports_in_engines; + for (i = 0; i < num_ports; i++) { + p_qm_port = &qm_info->qm_port_params[i]; + p_qm_port->active = 1; + p_qm_port->num_active_phys_tcs = 4; + p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports; + p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports; + } + + qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS; + + qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ); + + qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT); + + qm_info->pf_wfq = 0; + qm_info->pf_rl = 0; + qm_info->vport_rl_en = 1; + + return 0; + +alloc_err: + DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n"); + kfree(qm_info->qm_pq_params); + kfree(qm_info->qm_vport_params); + kfree(qm_info->qm_port_params); + + return -ENOMEM; +} + +int qed_resc_alloc(struct qed_dev *cdev) +{ + struct qed_consq *p_consq; + struct qed_eq *p_eq; + int i, rc = 0; + + cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL); + if (!cdev->fw_data) + return -ENOMEM; + + /* Allocate Memory for the Queue->CID mapping */ + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + int tx_size = sizeof(struct qed_hw_cid_data) * + RESC_NUM(p_hwfn, QED_L2_QUEUE); + int rx_size = sizeof(struct qed_hw_cid_data) * + RESC_NUM(p_hwfn, QED_L2_QUEUE); + + p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL); + if (!p_hwfn->p_tx_cids) { + DP_NOTICE(p_hwfn, + "Failed to allocate memory for Tx Cids\n"); + goto alloc_err; + } + + p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL); + if (!p_hwfn->p_rx_cids) { + DP_NOTICE(p_hwfn, + "Failed to allocate memory for Rx Cids\n"); + goto alloc_err; + } + } + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + /* First allocate the context manager structure */ + rc = qed_cxt_mngr_alloc(p_hwfn); + if (rc) + goto alloc_err; + + /* Set the HW cid/tid numbers (in the contest manager) + * Must be done prior to any further computations. + */ + rc = qed_cxt_set_pf_params(p_hwfn); + if (rc) + goto alloc_err; + + /* Prepare and process QM requirements */ + rc = qed_init_qm_info(p_hwfn); + if (rc) + goto alloc_err; + + /* Compute the ILT client partition */ + rc = qed_cxt_cfg_ilt_compute(p_hwfn); + if (rc) + goto alloc_err; + + /* CID map / ILT shadow table / T2 + * The talbes sizes are determined by the computations above + */ + rc = qed_cxt_tables_alloc(p_hwfn); + if (rc) + goto alloc_err; + + /* SPQ, must follow ILT because initializes SPQ context */ + rc = qed_spq_alloc(p_hwfn); + if (rc) + goto alloc_err; + + /* SP status block allocation */ + p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn, + RESERVED_PTT_DPC); + + rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt); + if (rc) + goto alloc_err; + + /* EQ */ + p_eq = qed_eq_alloc(p_hwfn, 256); + + if (!p_eq) + goto alloc_err; + p_hwfn->p_eq = p_eq; + + p_consq = qed_consq_alloc(p_hwfn); + if (!p_consq) + goto alloc_err; + p_hwfn->p_consq = p_consq; + + /* DMA info initialization */ + rc = qed_dmae_info_alloc(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed to allocate memory for dmae_info structure\n"); + goto alloc_err; + } + } + + cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL); + if (!cdev->reset_stats) { + DP_NOTICE(cdev, "Failed to allocate reset statistics\n"); + goto alloc_err; + } + + return 0; + +alloc_err: + qed_resc_free(cdev); + return rc; +} + +void qed_resc_setup(struct qed_dev *cdev) +{ + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + qed_cxt_mngr_setup(p_hwfn); + qed_spq_setup(p_hwfn); + qed_eq_setup(p_hwfn, p_hwfn->p_eq); + qed_consq_setup(p_hwfn, p_hwfn->p_consq); + + /* Read shadow of current MFW mailbox */ + qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt); + memcpy(p_hwfn->mcp_info->mfw_mb_shadow, + p_hwfn->mcp_info->mfw_mb_cur, + p_hwfn->mcp_info->mfw_mb_length); + + qed_int_setup(p_hwfn, p_hwfn->p_main_ptt); + } +} + +#define FINAL_CLEANUP_CMD_OFFSET (0) +#define FINAL_CLEANUP_CMD (0x1) +#define FINAL_CLEANUP_VALID_OFFSET (6) +#define FINAL_CLEANUP_VFPF_ID_SHIFT (7) +#define FINAL_CLEANUP_COMP (0x2) +#define FINAL_CLEANUP_POLL_CNT (100) +#define FINAL_CLEANUP_POLL_TIME (10) +int qed_final_cleanup(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 id) +{ + u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; + int rc = -EBUSY; + + addr = GTT_BAR0_MAP_REG_USDM_RAM + USTORM_FLR_FINAL_ACK_OFFSET; + + command |= FINAL_CLEANUP_CMD << FINAL_CLEANUP_CMD_OFFSET; + command |= 1 << FINAL_CLEANUP_VALID_OFFSET; + command |= id << FINAL_CLEANUP_VFPF_ID_SHIFT; + command |= FINAL_CLEANUP_COMP << SDM_OP_GEN_COMP_TYPE_SHIFT; + + /* Make sure notification is not set before initiating final cleanup */ + if (REG_RD(p_hwfn, addr)) { + DP_NOTICE( + p_hwfn, + "Unexpected; Found final cleanup notification before initiating final cleanup\n"); + REG_WR(p_hwfn, addr, 0); + } + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Sending final cleanup for PFVF[%d] [Command %08x\n]", + id, command); + + qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command); + + /* Poll until completion */ + while (!REG_RD(p_hwfn, addr) && count--) + msleep(FINAL_CLEANUP_POLL_TIME); + + if (REG_RD(p_hwfn, addr)) + rc = 0; + else + DP_NOTICE(p_hwfn, + "Failed to receive FW final cleanup notification\n"); + + /* Cleanup afterwards */ + REG_WR(p_hwfn, addr, 0); + + return rc; +} + +static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn) +{ + int hw_mode = 0; + + hw_mode = (1 << MODE_BB_A0); + + switch (p_hwfn->cdev->num_ports_in_engines) { + case 1: + hw_mode |= 1 << MODE_PORTS_PER_ENG_1; + break; + case 2: + hw_mode |= 1 << MODE_PORTS_PER_ENG_2; + break; + case 4: + hw_mode |= 1 << MODE_PORTS_PER_ENG_4; + break; + default: + DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n", + p_hwfn->cdev->num_ports_in_engines); + return; + } + + switch (p_hwfn->cdev->mf_mode) { + case SF: + hw_mode |= 1 << MODE_SF; + break; + case MF_OVLAN: + hw_mode |= 1 << MODE_MF_SD; + break; + case MF_NPAR: + hw_mode |= 1 << MODE_MF_SI; + break; + default: + DP_NOTICE(p_hwfn, "Unsupported MF mode, init as SF\n"); + hw_mode |= 1 << MODE_SF; + } + + hw_mode |= 1 << MODE_ASIC; + + p_hwfn->hw_info.hw_mode = hw_mode; +} + +/* Init run time data for all PFs on an engine. */ +static void qed_init_cau_rt_data(struct qed_dev *cdev) +{ + u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET; + int i, sb_id; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct qed_igu_info *p_igu_info; + struct qed_igu_block *p_block; + struct cau_sb_entry sb_entry; + + p_igu_info = p_hwfn->hw_info.p_igu_info; + + for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev); + sb_id++) { + p_block = &p_igu_info->igu_map.igu_blocks[sb_id]; + if (!p_block->is_pf) + continue; + + qed_init_cau_sb_entry(p_hwfn, &sb_entry, + p_block->function_id, + 0, 0); + STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, + sb_entry); + } + } +} + +static int qed_hw_init_common(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + int hw_mode) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + struct qed_qm_common_rt_init_params params; + struct qed_dev *cdev = p_hwfn->cdev; + int rc = 0; + + qed_init_cau_rt_data(cdev); + + /* Program GTT windows */ + qed_gtt_init(p_hwfn); + + if (p_hwfn->mcp_info) { + if (p_hwfn->mcp_info->func_info.bandwidth_max) + qm_info->pf_rl_en = 1; + if (p_hwfn->mcp_info->func_info.bandwidth_min) + qm_info->pf_wfq_en = 1; + } + + memset(¶ms, 0, sizeof(params)); + params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines; + params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; + params.pf_rl_en = qm_info->pf_rl_en; + params.pf_wfq_en = qm_info->pf_wfq_en; + params.vport_rl_en = qm_info->vport_rl_en; + params.vport_wfq_en = qm_info->vport_wfq_en; + params.port_params = qm_info->qm_port_params; + + qed_qm_common_rt_init(p_hwfn, ¶ms); + + qed_cxt_hw_init_common(p_hwfn); + + /* Close gate from NIG to BRB/Storm; By default they are open, but + * we close them to prevent NIG from passing data to reset blocks. + * Should have been done in the ENGINE phase, but init-tool lacks + * proper port-pretend capabilities. + */ + qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0); + qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0); + qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1); + qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0); + qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0); + qed_port_unpretend(p_hwfn, p_ptt); + + rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); + if (rc != 0) + return rc; + + qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); + qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); + + /* Disable relaxed ordering in the PCI config space */ + qed_wr(p_hwfn, p_ptt, 0x20b4, + qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10); + + return rc; +} + +static int qed_hw_init_port(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + int hw_mode) +{ + int rc = 0; + + rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, + hw_mode); + return rc; +} + +static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + int hw_mode, + bool b_hw_start, + enum qed_int_mode int_mode, + bool allow_npar_tx_switch) +{ + u8 rel_pf_id = p_hwfn->rel_pf_id; + int rc = 0; + + if (p_hwfn->mcp_info) { + struct qed_mcp_function_info *p_info; + + p_info = &p_hwfn->mcp_info->func_info; + if (p_info->bandwidth_min) + p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min; + + /* Update rate limit once we'll actually have a link */ + p_hwfn->qm_info.pf_rl = 100; + } + + qed_cxt_hw_init_pf(p_hwfn); + + qed_int_igu_init_rt(p_hwfn); + + /* Set VLAN in NIG if needed */ + if (hw_mode & (1 << MODE_MF_SD)) { + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n"); + STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); + STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, + p_hwfn->hw_info.ovlan); + } + + /* Enable classification by MAC if needed */ + if (hw_mode & MODE_MF_SI) { + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "Configuring TAGMAC_CLS_TYPE\n"); + STORE_RT_REG(p_hwfn, + NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1); + } + + /* Protocl Configuration */ + STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0); + STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0); + STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); + + /* Cleanup chip from previous driver if such remains exist */ + rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id); + if (rc != 0) + return rc; + + /* PF Init sequence */ + rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode); + if (rc) + return rc; + + /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */ + rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode); + if (rc) + return rc; + + /* Pure runtime initializations - directly to the HW */ + qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); + + if (b_hw_start) { + /* enable interrupts */ + qed_int_igu_enable(p_hwfn, p_ptt, int_mode); + + /* send function start command */ + rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode); + if (rc) + DP_NOTICE(p_hwfn, "Function start ramrod failed\n"); + } + return rc; +} + +static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 enable) +{ + u32 delay_idx = 0, val, set_val = enable ? 1 : 0; + + /* Change PF in PXP */ + qed_wr(p_hwfn, p_ptt, + PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val); + + /* wait until value is set - try for 1 second every 50us */ + for (delay_idx = 0; delay_idx < 20000; delay_idx++) { + val = qed_rd(p_hwfn, p_ptt, + PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); + if (val == set_val) + break; + + usleep_range(50, 60); + } + + if (val != set_val) { + DP_NOTICE(p_hwfn, + "PFID_ENABLE_MASTER wasn't changed after a second\n"); + return -EAGAIN; + } + + return 0; +} + +static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_main_ptt) +{ + /* Read shadow of current MFW mailbox */ + qed_mcp_read_mb(p_hwfn, p_main_ptt); + memcpy(p_hwfn->mcp_info->mfw_mb_shadow, + p_hwfn->mcp_info->mfw_mb_cur, + p_hwfn->mcp_info->mfw_mb_length); +} + +int qed_hw_init(struct qed_dev *cdev, + bool b_hw_start, + enum qed_int_mode int_mode, + bool allow_npar_tx_switch, + const u8 *bin_fw_data) +{ + struct qed_storm_stats *p_stat; + u32 load_code, param, *p_address; + int rc, mfw_rc, i; + u8 fw_vport = 0; + + rc = qed_init_fw_data(cdev, bin_fw_data); + if (rc != 0) + return rc; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + rc = qed_fw_vport(p_hwfn, 0, &fw_vport); + if (rc != 0) + return rc; + + /* Enable DMAE in PXP */ + rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); + + qed_calc_hw_mode(p_hwfn); + + rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, + &load_code); + if (rc) { + DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n"); + return rc; + } + + qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Load request was sent. Resp:0x%x, Load code: 0x%x\n", + rc, load_code); + + p_hwfn->first_on_engine = (load_code == + FW_MSG_CODE_DRV_LOAD_ENGINE); + + switch (load_code) { + case FW_MSG_CODE_DRV_LOAD_ENGINE: + rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, + p_hwfn->hw_info.hw_mode); + if (rc) + break; + /* Fall into */ + case FW_MSG_CODE_DRV_LOAD_PORT: + rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt, + p_hwfn->hw_info.hw_mode); + if (rc) + break; + + /* Fall into */ + case FW_MSG_CODE_DRV_LOAD_FUNCTION: + rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, + p_hwfn->hw_info.hw_mode, + b_hw_start, int_mode, + allow_npar_tx_switch); + break; + default: + rc = -EINVAL; + break; + } + + if (rc) + DP_NOTICE(p_hwfn, + "init phase failed for loadcode 0x%x (rc %d)\n", + load_code, rc); + + /* ACK mfw regardless of success or failure of initialization */ + mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, + DRV_MSG_CODE_LOAD_DONE, + 0, &load_code, ¶m); + if (rc) + return rc; + if (mfw_rc) { + DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n"); + return mfw_rc; + } + + p_hwfn->hw_init_done = true; + + /* init PF stats */ + p_stat = &p_hwfn->storm_stats; + p_stat->mstats.address = BAR0_MAP_REG_MSDM_RAM + + MSTORM_QUEUE_STAT_OFFSET(fw_vport); + p_stat->mstats.len = sizeof(struct eth_mstorm_per_queue_stat); + + p_stat->ustats.address = BAR0_MAP_REG_USDM_RAM + + USTORM_QUEUE_STAT_OFFSET(fw_vport); + p_stat->ustats.len = sizeof(struct eth_ustorm_per_queue_stat); + + p_stat->pstats.address = BAR0_MAP_REG_PSDM_RAM + + PSTORM_QUEUE_STAT_OFFSET(fw_vport); + p_stat->pstats.len = sizeof(struct eth_pstorm_per_queue_stat); + + p_address = &p_stat->tstats.address; + *p_address = BAR0_MAP_REG_TSDM_RAM + + TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); + p_stat->tstats.len = sizeof(struct tstorm_per_port_stat); + } + + return 0; +} + +#define QED_HW_STOP_RETRY_LIMIT (10) +int qed_hw_stop(struct qed_dev *cdev) +{ + int rc = 0, t_rc; + int i, j; + + for_each_hwfn(cdev, j) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; + struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; + + DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n"); + + /* mark the hw as uninitialized... */ + p_hwfn->hw_init_done = false; + + rc = qed_sp_pf_stop(p_hwfn); + if (rc) + return rc; + + qed_wr(p_hwfn, p_ptt, + NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); + + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); + + qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); + qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); + for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) { + if ((!qed_rd(p_hwfn, p_ptt, + TM_REG_PF_SCAN_ACTIVE_CONN)) && + (!qed_rd(p_hwfn, p_ptt, + TM_REG_PF_SCAN_ACTIVE_TASK))) + break; + + usleep_range(1000, 2000); + } + if (i == QED_HW_STOP_RETRY_LIMIT) + DP_NOTICE(p_hwfn, + "Timers linear scans are not over [Connection %02x Tasks %02x]\n", + (u8)qed_rd(p_hwfn, p_ptt, + TM_REG_PF_SCAN_ACTIVE_CONN), + (u8)qed_rd(p_hwfn, p_ptt, + TM_REG_PF_SCAN_ACTIVE_TASK)); + + /* Disable Attention Generation */ + qed_int_igu_disable_int(p_hwfn, p_ptt); + + qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0); + qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0); + + qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true); + + /* Need to wait 1ms to guarantee SBs are cleared */ + usleep_range(1000, 2000); + } + + /* Disable DMAE in PXP - in CMT, this should only be done for + * first hw-function, and only after all transactions have + * stopped for all active hw-functions. + */ + t_rc = qed_change_pci_hwfn(&cdev->hwfns[0], + cdev->hwfns[0].p_main_ptt, + false); + if (t_rc != 0) + rc = t_rc; + + return rc; +} + +void qed_hw_stop_fastpath(struct qed_dev *cdev) +{ + int i, j; + + for_each_hwfn(cdev, j) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; + struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; + + DP_VERBOSE(p_hwfn, + NETIF_MSG_IFDOWN, + "Shutting down the fastpath\n"); + + qed_wr(p_hwfn, p_ptt, + NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); + + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); + + qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); + qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); + for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) { + if ((!qed_rd(p_hwfn, p_ptt, + TM_REG_PF_SCAN_ACTIVE_CONN)) && + (!qed_rd(p_hwfn, p_ptt, + TM_REG_PF_SCAN_ACTIVE_TASK))) + break; + + usleep_range(1000, 2000); + } + if (i == QED_HW_STOP_RETRY_LIMIT) + DP_NOTICE(p_hwfn, + "Timers linear scans are not over [Connection %02x Tasks %02x]\n", + (u8)qed_rd(p_hwfn, p_ptt, + TM_REG_PF_SCAN_ACTIVE_CONN), + (u8)qed_rd(p_hwfn, p_ptt, + TM_REG_PF_SCAN_ACTIVE_TASK)); + + qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false); + + /* Need to wait 1ms to guarantee SBs are cleared */ + usleep_range(1000, 2000); + } +} + +void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) +{ + /* Re-open incoming traffic */ + qed_wr(p_hwfn, p_hwfn->p_main_ptt, + NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); +} + +static int qed_reg_assert(struct qed_hwfn *hwfn, + struct qed_ptt *ptt, u32 reg, + bool expected) +{ + u32 assert_val = qed_rd(hwfn, ptt, reg); + + if (assert_val != expected) { + DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n", + reg, expected); + return -EINVAL; + } + + return 0; +} + +int qed_hw_reset(struct qed_dev *cdev) +{ + int rc = 0; + u32 unload_resp, unload_param; + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n"); + + /* Check for incorrect states */ + qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt, + QM_REG_USG_CNT_PF_TX, 0); + qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt, + QM_REG_USG_CNT_PF_OTHER, 0); + + /* Disable PF in HW blocks */ + qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0); + qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0); + qed_wr(p_hwfn, p_hwfn->p_main_ptt, + TCFC_REG_STRONG_ENABLE_PF, 0); + qed_wr(p_hwfn, p_hwfn->p_main_ptt, + CCFC_REG_STRONG_ENABLE_PF, 0); + + /* Send unload command to MCP */ + rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, + DRV_MSG_CODE_UNLOAD_REQ, + DRV_MB_PARAM_UNLOAD_WOL_MCP, + &unload_resp, &unload_param); + if (rc) { + DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n"); + unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE; + } + + rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, + DRV_MSG_CODE_UNLOAD_DONE, + 0, &unload_resp, &unload_param); + if (rc) { + DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n"); + return rc; + } + } + + return rc; +} + +/* Free hwfn memory and resources acquired in hw_hwfn_prepare */ +static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn) +{ + qed_ptt_pool_free(p_hwfn); + kfree(p_hwfn->hw_info.p_igu_info); +} + +/* Setup bar access */ +static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) +{ + int rc; + + /* Allocate PTT pool */ + rc = qed_ptt_pool_alloc(p_hwfn); + if (rc) + return rc; + + /* Allocate the main PTT */ + p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN); + + /* clear indirect access */ + qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0); + qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0); + qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0); + qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0); + + /* Clean Previous errors if such exist */ + qed_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, + 1 << p_hwfn->abs_pf_id); + + /* enable internal target-read */ + qed_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); + + return 0; +} + +static void get_function_id(struct qed_hwfn *p_hwfn) +{ + /* ME Register */ + p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR); + + p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); + + p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf; + p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, + PXP_CONCRETE_FID_PFID); + p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, + PXP_CONCRETE_FID_PORT); +} + +static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) +{ + u32 *feat_num = p_hwfn->hw_info.feat_num; + int num_features = 1; + + feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) / + num_features, + RESC_NUM(p_hwfn, QED_L2_QUEUE)); + DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, + "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n", + feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB), + num_features); +} + +static void qed_hw_get_resc(struct qed_hwfn *p_hwfn) +{ + u32 *resc_start = p_hwfn->hw_info.resc_start; + u32 *resc_num = p_hwfn->hw_info.resc_num; + int num_funcs, i; + + num_funcs = IS_MF(p_hwfn) ? MAX_NUM_PFS_BB + : p_hwfn->cdev->num_ports_in_engines; + + resc_num[QED_SB] = min_t(u32, + (MAX_SB_PER_PATH_BB / num_funcs), + qed_int_get_num_sbs(p_hwfn, NULL)); + resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs; + resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs; + resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs; + resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs; + resc_num[QED_RL] = 8; + resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs; + resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) / + num_funcs; + resc_num[QED_ILT] = 950; + + for (i = 0; i < QED_MAX_RESC; i++) + resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id; + + qed_hw_set_feat(p_hwfn); + + DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, + "The numbers for each resource are:\n" + "SB = %d start = %d\n" + "L2_QUEUE = %d start = %d\n" + "VPORT = %d start = %d\n" + "PQ = %d start = %d\n" + "RL = %d start = %d\n" + "MAC = %d start = %d\n" + "VLAN = %d start = %d\n" + "ILT = %d start = %d\n", + p_hwfn->hw_info.resc_num[QED_SB], + p_hwfn->hw_info.resc_start[QED_SB], + p_hwfn->hw_info.resc_num[QED_L2_QUEUE], + p_hwfn->hw_info.resc_start[QED_L2_QUEUE], + p_hwfn->hw_info.resc_num[QED_VPORT], + p_hwfn->hw_info.resc_start[QED_VPORT], + p_hwfn->hw_info.resc_num[QED_PQ], + p_hwfn->hw_info.resc_start[QED_PQ], + p_hwfn->hw_info.resc_num[QED_RL], + p_hwfn->hw_info.resc_start[QED_RL], + p_hwfn->hw_info.resc_num[QED_MAC], + p_hwfn->hw_info.resc_start[QED_MAC], + p_hwfn->hw_info.resc_num[QED_VLAN], + p_hwfn->hw_info.resc_start[QED_VLAN], + p_hwfn->hw_info.resc_num[QED_ILT], + p_hwfn->hw_info.resc_start[QED_ILT]); +} + +static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; + u32 port_cfg_addr, link_temp, val, nvm_cfg_addr; + struct qed_mcp_link_params *link; + + /* Read global nvm_cfg address */ + nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); + + /* Verify MCP has initialized it */ + if (!nvm_cfg_addr) { + DP_NOTICE(p_hwfn, "Shared memory not initialized\n"); + return -EINVAL; + } + + /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */ + nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); + + /* Read Vendor Id / Device Id */ + addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, glob) + + offsetof(struct nvm_cfg1_glob, pci_id); + p_hwfn->hw_info.vendor_id = qed_rd(p_hwfn, p_ptt, addr) & + NVM_CFG1_GLOB_VENDOR_ID_MASK; + + addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, glob) + + offsetof(struct nvm_cfg1_glob, core_cfg); + + core_cfg = qed_rd(p_hwfn, p_ptt, addr); + + switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >> + NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) { + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G: + p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G: + p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G: + p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F: + p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E: + p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G: + p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G: + p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G: + p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G: + p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G; + break; + default: + DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", + core_cfg); + break; + } + + addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, func[MCP_PF_ID(p_hwfn)]) + + offsetof(struct nvm_cfg1_func, device_id); + val = qed_rd(p_hwfn, p_ptt, addr); + + if (IS_MF(p_hwfn)) { + p_hwfn->hw_info.device_id = + (val & NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK) >> + NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET; + } else { + p_hwfn->hw_info.device_id = + (val & NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK) >> + NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET; + } + + /* Read default link configuration */ + link = &p_hwfn->mcp_info->link_input; + port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); + link_temp = qed_rd(p_hwfn, p_ptt, + port_cfg_addr + + offsetof(struct nvm_cfg1_port, speed_cap_mask)); + link->speed.advertised_speeds = + link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; + + p_hwfn->mcp_info->link_capabilities.speed_capabilities = + link->speed.advertised_speeds; + + link_temp = qed_rd(p_hwfn, p_ptt, + port_cfg_addr + + offsetof(struct nvm_cfg1_port, link_settings)); + switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >> + NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) { + case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG: + link->speed.autoneg = true; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_1G: + link->speed.forced_speed = 1000; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_10G: + link->speed.forced_speed = 10000; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_25G: + link->speed.forced_speed = 25000; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_40G: + link->speed.forced_speed = 40000; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_50G: + link->speed.forced_speed = 50000; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_100G: + link->speed.forced_speed = 100000; + break; + default: + DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", + link_temp); + } + + link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK; + link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET; + link->pause.autoneg = !!(link_temp & + NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); + link->pause.forced_rx = !!(link_temp & + NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); + link->pause.forced_tx = !!(link_temp & + NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); + link->loopback_mode = 0; + + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n", + link->speed.forced_speed, link->speed.advertised_speeds, + link->speed.autoneg, link->pause.autoneg); + + /* Read Multi-function information from shmem */ + addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, glob) + + offsetof(struct nvm_cfg1_glob, generic_cont0); + + generic_cont0 = qed_rd(p_hwfn, p_ptt, addr); + + mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> + NVM_CFG1_GLOB_MF_MODE_OFFSET; + + switch (mf_mode) { + case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: + p_hwfn->cdev->mf_mode = MF_OVLAN; + break; + case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: + p_hwfn->cdev->mf_mode = MF_NPAR; + break; + case NVM_CFG1_GLOB_MF_MODE_FORCED_SF: + p_hwfn->cdev->mf_mode = SF; + break; + } + DP_INFO(p_hwfn, "Multi function mode is %08x\n", + p_hwfn->cdev->mf_mode); + + return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt); +} + +static int +qed_get_hw_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_pci_personality personality) +{ + u32 port_mode; + int rc; + + /* Read the port mode */ + port_mode = qed_rd(p_hwfn, p_ptt, + CNIG_REG_NW_PORT_MODE_BB_B0); + + if (port_mode < 3) { + p_hwfn->cdev->num_ports_in_engines = 1; + } else if (port_mode <= 5) { + p_hwfn->cdev->num_ports_in_engines = 2; + } else { + DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n", + p_hwfn->cdev->num_ports_in_engines); + + /* Default num_ports_in_engines to something */ + p_hwfn->cdev->num_ports_in_engines = 1; + } + + qed_hw_get_nvm_info(p_hwfn, p_ptt); + + rc = qed_int_igu_read_cam(p_hwfn, p_ptt); + if (rc) + return rc; + + if (qed_mcp_is_init(p_hwfn)) + ether_addr_copy(p_hwfn->hw_info.hw_mac_addr, + p_hwfn->mcp_info->func_info.mac); + else + eth_random_addr(p_hwfn->hw_info.hw_mac_addr); + + if (qed_mcp_is_init(p_hwfn)) { + if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET) + p_hwfn->hw_info.ovlan = + p_hwfn->mcp_info->func_info.ovlan; + + qed_mcp_cmd_port_init(p_hwfn, p_ptt); + } + + if (qed_mcp_is_init(p_hwfn)) { + enum qed_pci_personality protocol; + + protocol = p_hwfn->mcp_info->func_info.protocol; + p_hwfn->hw_info.personality = protocol; + } + + qed_hw_get_resc(p_hwfn); + + return rc; +} + +static void qed_get_dev_info(struct qed_dev *cdev) +{ + u32 tmp; + + cdev->chip_num = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt, + MISCS_REG_CHIP_NUM); + cdev->chip_rev = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt, + MISCS_REG_CHIP_REV); + MASK_FIELD(CHIP_REV, cdev->chip_rev); + + /* Learn number of HW-functions */ + tmp = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt, + MISCS_REG_CMT_ENABLED_FOR_PAIR); + + if (tmp & (1 << cdev->hwfns[0].rel_pf_id)) { + DP_NOTICE(cdev->hwfns, "device in CMT mode\n"); + cdev->num_hwfns = 2; + } else { + cdev->num_hwfns = 1; + } + + cdev->chip_bond_id = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt, + MISCS_REG_CHIP_TEST_REG) >> 4; + MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id); + cdev->chip_metal = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt, + MISCS_REG_CHIP_METAL); + MASK_FIELD(CHIP_METAL, cdev->chip_metal); + + DP_INFO(cdev->hwfns, + "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n", + cdev->chip_num, cdev->chip_rev, + cdev->chip_bond_id, cdev->chip_metal); +} + +static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, + void __iomem *p_regview, + void __iomem *p_doorbells, + enum qed_pci_personality personality) +{ + int rc = 0; + + /* Split PCI bars evenly between hwfns */ + p_hwfn->regview = p_regview; + p_hwfn->doorbells = p_doorbells; + + /* Validate that chip access is feasible */ + if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) { + DP_ERR(p_hwfn, + "Reading the ME register returns all Fs; Preventing further chip access\n"); + return -EINVAL; + } + + get_function_id(p_hwfn); + + rc = qed_hw_hwfn_prepare(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n"); + goto err0; + } + + /* First hwfn learns basic information, e.g., number of hwfns */ + if (!p_hwfn->my_id) + qed_get_dev_info(p_hwfn->cdev); + + /* Initialize MCP structure */ + rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt); + if (rc) { + DP_NOTICE(p_hwfn, "Failed initializing mcp command\n"); + goto err1; + } + + /* Read the device configuration information from the HW and SHMEM */ + rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to get HW information\n"); + goto err2; + } + + /* Allocate the init RT array and initialize the init-ops engine */ + rc = qed_init_alloc(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to allocate the init array\n"); + goto err2; + } + + return rc; +err2: + qed_mcp_free(p_hwfn); +err1: + qed_hw_hwfn_free(p_hwfn); +err0: + return rc; +} + +static u32 qed_hw_bar_size(struct qed_dev *cdev, + u8 bar_id) +{ + u32 size = pci_resource_len(cdev->pdev, (bar_id > 0) ? 2 : 0); + + return size / cdev->num_hwfns; +} + +int qed_hw_prepare(struct qed_dev *cdev, + int personality) +{ + int rc, i; + + /* Store the precompiled init data ptrs */ + qed_init_iro_array(cdev); + + /* Initialize the first hwfn - will learn number of hwfns */ + rc = qed_hw_prepare_single(&cdev->hwfns[0], cdev->regview, + cdev->doorbells, personality); + if (rc) + return rc; + + personality = cdev->hwfns[0].hw_info.personality; + + /* Initialize the rest of the hwfns */ + for (i = 1; i < cdev->num_hwfns; i++) { + void __iomem *p_regview, *p_doorbell; + + p_regview = cdev->regview + + i * qed_hw_bar_size(cdev, 0); + p_doorbell = cdev->doorbells + + i * qed_hw_bar_size(cdev, 1); + rc = qed_hw_prepare_single(&cdev->hwfns[i], p_regview, + p_doorbell, personality); + if (rc) { + /* Cleanup previously initialized hwfns */ + while (--i >= 0) { + qed_init_free(&cdev->hwfns[i]); + qed_mcp_free(&cdev->hwfns[i]); + qed_hw_hwfn_free(&cdev->hwfns[i]); + } + return rc; + } + } + + return 0; +} + +void qed_hw_remove(struct qed_dev *cdev) +{ + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + qed_init_free(p_hwfn); + qed_hw_hwfn_free(p_hwfn); + qed_mcp_free(p_hwfn); + } +} + +int qed_chain_alloc(struct qed_dev *cdev, + enum qed_chain_use_mode intended_use, + enum qed_chain_mode mode, + u16 num_elems, + size_t elem_size, + struct qed_chain *p_chain) +{ + dma_addr_t p_pbl_phys = 0; + void *p_pbl_virt = NULL; + dma_addr_t p_phys = 0; + void *p_virt = NULL; + u16 page_cnt = 0; + size_t size; + + if (mode == QED_CHAIN_MODE_SINGLE) + page_cnt = 1; + else + page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode); + + size = page_cnt * QED_CHAIN_PAGE_SIZE; + p_virt = dma_alloc_coherent(&cdev->pdev->dev, + size, &p_phys, GFP_KERNEL); + if (!p_virt) { + DP_NOTICE(cdev, "Failed to allocate chain mem\n"); + goto nomem; + } + + if (mode == QED_CHAIN_MODE_PBL) { + size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; + p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev, + size, &p_pbl_phys, + GFP_KERNEL); + if (!p_pbl_virt) { + DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n"); + goto nomem; + } + + qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt, + (u8)elem_size, intended_use, + p_pbl_phys, p_pbl_virt); + } else { + qed_chain_init(p_chain, p_virt, p_phys, page_cnt, + (u8)elem_size, intended_use, mode); + } + + return 0; + +nomem: + dma_free_coherent(&cdev->pdev->dev, + page_cnt * QED_CHAIN_PAGE_SIZE, + p_virt, p_phys); + dma_free_coherent(&cdev->pdev->dev, + page_cnt * QED_CHAIN_PBL_ENTRY_SIZE, + p_pbl_virt, p_pbl_phys); + + return -ENOMEM; +} + +void qed_chain_free(struct qed_dev *cdev, + struct qed_chain *p_chain) +{ + size_t size; + + if (!p_chain->p_virt_addr) + return; + + if (p_chain->mode == QED_CHAIN_MODE_PBL) { + size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; + dma_free_coherent(&cdev->pdev->dev, size, + p_chain->pbl.p_virt_table, + p_chain->pbl.p_phys_table); + } + + size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE; + dma_free_coherent(&cdev->pdev->dev, size, + p_chain->p_virt_addr, + p_chain->p_phys_addr); +} + +static void __qed_get_vport_stats(struct qed_dev *cdev, + struct qed_eth_stats *stats) +{ + int i, j; + + memset(stats, 0, sizeof(*stats)); + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct eth_mstorm_per_queue_stat mstats; + struct eth_ustorm_per_queue_stat ustats; + struct eth_pstorm_per_queue_stat pstats; + struct tstorm_per_port_stat tstats; + struct port_stats port_stats; + struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); + + if (!p_ptt) { + DP_ERR(p_hwfn, "Failed to acquire ptt\n"); + continue; + } + + memset(&mstats, 0, sizeof(mstats)); + qed_memcpy_from(p_hwfn, p_ptt, &mstats, + p_hwfn->storm_stats.mstats.address, + p_hwfn->storm_stats.mstats.len); + + memset(&ustats, 0, sizeof(ustats)); + qed_memcpy_from(p_hwfn, p_ptt, &ustats, + p_hwfn->storm_stats.ustats.address, + p_hwfn->storm_stats.ustats.len); + + memset(&pstats, 0, sizeof(pstats)); + qed_memcpy_from(p_hwfn, p_ptt, &pstats, + p_hwfn->storm_stats.pstats.address, + p_hwfn->storm_stats.pstats.len); + + memset(&tstats, 0, sizeof(tstats)); + qed_memcpy_from(p_hwfn, p_ptt, &tstats, + p_hwfn->storm_stats.tstats.address, + p_hwfn->storm_stats.tstats.len); + + memset(&port_stats, 0, sizeof(port_stats)); + + if (p_hwfn->mcp_info) + qed_memcpy_from(p_hwfn, p_ptt, &port_stats, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, stats), + sizeof(port_stats)); + qed_ptt_release(p_hwfn, p_ptt); + + stats->no_buff_discards += + HILO_64_REGPAIR(mstats.no_buff_discard); + stats->packet_too_big_discard += + HILO_64_REGPAIR(mstats.packet_too_big_discard); + stats->ttl0_discard += + HILO_64_REGPAIR(mstats.ttl0_discard); + stats->tpa_coalesced_pkts += + HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); + stats->tpa_coalesced_events += + HILO_64_REGPAIR(mstats.tpa_coalesced_events); + stats->tpa_aborts_num += + HILO_64_REGPAIR(mstats.tpa_aborts_num); + stats->tpa_coalesced_bytes += + HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); + + stats->rx_ucast_bytes += + HILO_64_REGPAIR(ustats.rcv_ucast_bytes); + stats->rx_mcast_bytes += + HILO_64_REGPAIR(ustats.rcv_mcast_bytes); + stats->rx_bcast_bytes += + HILO_64_REGPAIR(ustats.rcv_bcast_bytes); + stats->rx_ucast_pkts += + HILO_64_REGPAIR(ustats.rcv_ucast_pkts); + stats->rx_mcast_pkts += + HILO_64_REGPAIR(ustats.rcv_mcast_pkts); + stats->rx_bcast_pkts += + HILO_64_REGPAIR(ustats.rcv_bcast_pkts); + + stats->mftag_filter_discards += + HILO_64_REGPAIR(tstats.mftag_filter_discard); + stats->mac_filter_discards += + HILO_64_REGPAIR(tstats.eth_mac_filter_discard); + + stats->tx_ucast_bytes += + HILO_64_REGPAIR(pstats.sent_ucast_bytes); + stats->tx_mcast_bytes += + HILO_64_REGPAIR(pstats.sent_mcast_bytes); + stats->tx_bcast_bytes += + HILO_64_REGPAIR(pstats.sent_bcast_bytes); + stats->tx_ucast_pkts += + HILO_64_REGPAIR(pstats.sent_ucast_pkts); + stats->tx_mcast_pkts += + HILO_64_REGPAIR(pstats.sent_mcast_pkts); + stats->tx_bcast_pkts += + HILO_64_REGPAIR(pstats.sent_bcast_pkts); + stats->tx_err_drop_pkts += + HILO_64_REGPAIR(pstats.error_drop_pkts); + stats->rx_64_byte_packets += port_stats.pmm.r64; + stats->rx_127_byte_packets += port_stats.pmm.r127; + stats->rx_255_byte_packets += port_stats.pmm.r255; + stats->rx_511_byte_packets += port_stats.pmm.r511; + stats->rx_1023_byte_packets += port_stats.pmm.r1023; + stats->rx_1518_byte_packets += port_stats.pmm.r1518; + stats->rx_1522_byte_packets += port_stats.pmm.r1522; + stats->rx_2047_byte_packets += port_stats.pmm.r2047; + stats->rx_4095_byte_packets += port_stats.pmm.r4095; + stats->rx_9216_byte_packets += port_stats.pmm.r9216; + stats->rx_16383_byte_packets += port_stats.pmm.r16383; + stats->rx_crc_errors += port_stats.pmm.rfcs; + stats->rx_mac_crtl_frames += port_stats.pmm.rxcf; + stats->rx_pause_frames += port_stats.pmm.rxpf; + stats->rx_pfc_frames += port_stats.pmm.rxpp; + stats->rx_align_errors += port_stats.pmm.raln; + stats->rx_carrier_errors += port_stats.pmm.rfcr; + stats->rx_oversize_packets += port_stats.pmm.rovr; + stats->rx_jabbers += port_stats.pmm.rjbr; + stats->rx_undersize_packets += port_stats.pmm.rund; + stats->rx_fragments += port_stats.pmm.rfrg; + stats->tx_64_byte_packets += port_stats.pmm.t64; + stats->tx_65_to_127_byte_packets += port_stats.pmm.t127; + stats->tx_128_to_255_byte_packets += port_stats.pmm.t255; + stats->tx_256_to_511_byte_packets += port_stats.pmm.t511; + stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023; + stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518; + stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047; + stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095; + stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216; + stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383; + stats->tx_pause_frames += port_stats.pmm.txpf; + stats->tx_pfc_frames += port_stats.pmm.txpp; + stats->tx_lpi_entry_count += port_stats.pmm.tlpiec; + stats->tx_total_collisions += port_stats.pmm.tncl; + stats->rx_mac_bytes += port_stats.pmm.rbyte; + stats->rx_mac_uc_packets += port_stats.pmm.rxuca; + stats->rx_mac_mc_packets += port_stats.pmm.rxmca; + stats->rx_mac_bc_packets += port_stats.pmm.rxbca; + stats->rx_mac_frames_ok += port_stats.pmm.rxpok; + stats->tx_mac_bytes += port_stats.pmm.tbyte; + stats->tx_mac_uc_packets += port_stats.pmm.txuca; + stats->tx_mac_mc_packets += port_stats.pmm.txmca; + stats->tx_mac_bc_packets += port_stats.pmm.txbca; + stats->tx_mac_ctrl_frames += port_stats.pmm.txcf; + + for (j = 0; j < 8; j++) { + stats->brb_truncates += port_stats.brb.brb_truncate[j]; + stats->brb_discards += port_stats.brb.brb_discard[j]; + } + } +} + +void qed_get_vport_stats(struct qed_dev *cdev, + struct qed_eth_stats *stats) +{ + u32 i; + + if (!cdev) { + memset(stats, 0, sizeof(*stats)); + return; + } + + __qed_get_vport_stats(cdev, stats); + + if (!cdev->reset_stats) + return; + + /* Reduce the statistics baseline */ + for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++) + ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i]; +} + +/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */ +void qed_reset_vport_stats(struct qed_dev *cdev) +{ + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct eth_mstorm_per_queue_stat mstats; + struct eth_ustorm_per_queue_stat ustats; + struct eth_pstorm_per_queue_stat pstats; + struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); + + if (!p_ptt) { + DP_ERR(p_hwfn, "Failed to acquire ptt\n"); + continue; + } + + memset(&mstats, 0, sizeof(mstats)); + qed_memcpy_to(p_hwfn, p_ptt, + p_hwfn->storm_stats.mstats.address, + &mstats, + p_hwfn->storm_stats.mstats.len); + + memset(&ustats, 0, sizeof(ustats)); + qed_memcpy_to(p_hwfn, p_ptt, + p_hwfn->storm_stats.ustats.address, + &ustats, + p_hwfn->storm_stats.ustats.len); + + memset(&pstats, 0, sizeof(pstats)); + qed_memcpy_to(p_hwfn, p_ptt, + p_hwfn->storm_stats.pstats.address, + &pstats, + p_hwfn->storm_stats.pstats.len); + + qed_ptt_release(p_hwfn, p_ptt); + } + + /* PORT statistics are not necessarily reset, so we need to + * read and create a baseline for future statistics. + */ + if (!cdev->reset_stats) + DP_INFO(cdev, "Reset stats not allocated\n"); + else + __qed_get_vport_stats(cdev, cdev->reset_stats); +} + +int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, + u16 src_id, u16 *dst_id) +{ + if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) { + u16 min, max; + + min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); + max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE); + DP_NOTICE(p_hwfn, + "l2_queue id [%d] is not valid, available indices [%d - %d]\n", + src_id, min, max); + + return -EINVAL; + } + + *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id; + + return 0; +} + +int qed_fw_vport(struct qed_hwfn *p_hwfn, + u8 src_id, u8 *dst_id) +{ + if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) { + u8 min, max; + + min = (u8)RESC_START(p_hwfn, QED_VPORT); + max = min + RESC_NUM(p_hwfn, QED_VPORT); + DP_NOTICE(p_hwfn, + "vport id [%d] is not valid, available indices [%d - %d]\n", + src_id, min, max); + + return -EINVAL; + } + + *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id; + + return 0; +} + +int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, + u8 src_id, u8 *dst_id) +{ + if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) { + u8 min, max; + + min = (u8)RESC_START(p_hwfn, QED_RSS_ENG); + max = min + RESC_NUM(p_hwfn, QED_RSS_ENG); + DP_NOTICE(p_hwfn, + "rss_eng id [%d] is not valid, available indices [%d - %d]\n", + src_id, min, max); + + return -EINVAL; + } + + *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id; + + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h new file mode 100644 index 000000000000..e29a3ba6c8b0 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -0,0 +1,283 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_DEV_API_H +#define _QED_DEV_API_H + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/qed/qed_chain.h> +#include <linux/qed/qed_if.h> +#include "qed_int.h" + +/** + * @brief qed_init_dp - initialize the debug level + * + * @param cdev + * @param dp_module + * @param dp_level + */ +void qed_init_dp(struct qed_dev *cdev, + u32 dp_module, + u8 dp_level); + +/** + * @brief qed_init_struct - initialize the device structure to + * its defaults + * + * @param cdev + */ +void qed_init_struct(struct qed_dev *cdev); + +/** + * @brief qed_resc_free - + * + * @param cdev + */ +void qed_resc_free(struct qed_dev *cdev); + +/** + * @brief qed_resc_alloc - + * + * @param cdev + * + * @return int + */ +int qed_resc_alloc(struct qed_dev *cdev); + +/** + * @brief qed_resc_setup - + * + * @param cdev + */ +void qed_resc_setup(struct qed_dev *cdev); + +/** + * @brief qed_hw_init - + * + * @param cdev + * @param b_hw_start + * @param int_mode - interrupt mode [msix, inta, etc.] to use. + * @param allow_npar_tx_switch - npar tx switching to be used + * for vports configured for tx-switching. + * @param bin_fw_data - binary fw data pointer in binary fw file. + * Pass NULL if not using binary fw file. + * + * @return int + */ +int qed_hw_init(struct qed_dev *cdev, + bool b_hw_start, + enum qed_int_mode int_mode, + bool allow_npar_tx_switch, + const u8 *bin_fw_data); + +/** + * @brief qed_hw_stop - + * + * @param cdev + * + * @return int + */ +int qed_hw_stop(struct qed_dev *cdev); + +/** + * @brief qed_hw_stop_fastpath -should be called incase + * slowpath is still required for the device, + * but fastpath is not. + * + * @param cdev + * + */ +void qed_hw_stop_fastpath(struct qed_dev *cdev); + +/** + * @brief qed_hw_start_fastpath -restart fastpath traffic, + * only if hw_stop_fastpath was called + * + * @param cdev + * + */ +void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_hw_reset - + * + * @param cdev + * + * @return int + */ +int qed_hw_reset(struct qed_dev *cdev); + +/** + * @brief qed_hw_prepare - + * + * @param cdev + * @param personality - personality to initialize + * + * @return int + */ +int qed_hw_prepare(struct qed_dev *cdev, + int personality); + +/** + * @brief qed_hw_remove - + * + * @param cdev + */ +void qed_hw_remove(struct qed_dev *cdev); + +/** + * @brief qed_ptt_acquire - Allocate a PTT window + * + * Should be called at the entry point to the driver (at the beginning of an + * exported function) + * + * @param p_hwfn + * + * @return struct qed_ptt + */ +struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_ptt_release - Release PTT Window + * + * Should be called at the end of a flow - at the end of the function that + * acquired the PTT. + * + * + * @param p_hwfn + * @param p_ptt + */ +void qed_ptt_release(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); +void qed_get_vport_stats(struct qed_dev *cdev, + struct qed_eth_stats *stats); +void qed_reset_vport_stats(struct qed_dev *cdev); + +enum qed_dmae_address_type_t { + QED_DMAE_ADDRESS_HOST_VIRT, + QED_DMAE_ADDRESS_HOST_PHYS, + QED_DMAE_ADDRESS_GRC +}; + +/* value of flags If QED_DMAE_FLAG_RW_REPL_SRC flag is set and the + * source is a block of length DMAE_MAX_RW_SIZE and the + * destination is larger, the source block will be duplicated as + * many times as required to fill the destination block. This is + * used mostly to write a zeroed buffer to destination address + * using DMA + */ +#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001 +#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008 + +struct qed_dmae_params { + u32 flags; /* consists of QED_DMAE_FLAG_* values */ +}; + +/** + * @brief qed_dmae_host2grc - copy data from source addr to + * dmae registers using the given ptt + * + * @param p_hwfn + * @param p_ptt + * @param source_addr + * @param grc_addr (dmae_data_offset) + * @param size_in_dwords + * @param flags (one of the flags defined above) + */ +int +qed_dmae_host2grc(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u64 source_addr, + u32 grc_addr, + u32 size_in_dwords, + u32 flags); + +/** + * @brief qed_chain_alloc - Allocate and initialize a chain + * + * @param p_hwfn + * @param intended_use + * @param mode + * @param num_elems + * @param elem_size + * @param p_chain + * + * @return int + */ +int +qed_chain_alloc(struct qed_dev *cdev, + enum qed_chain_use_mode intended_use, + enum qed_chain_mode mode, + u16 num_elems, + size_t elem_size, + struct qed_chain *p_chain); + +/** + * @brief qed_chain_free - Free chain DMA memory + * + * @param p_hwfn + * @param p_chain + */ +void qed_chain_free(struct qed_dev *cdev, + struct qed_chain *p_chain); + +/** + * @@brief qed_fw_l2_queue - Get absolute L2 queue ID + * + * @param p_hwfn + * @param src_id - relative to p_hwfn + * @param dst_id - absolute per engine + * + * @return int + */ +int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, + u16 src_id, + u16 *dst_id); + +/** + * @@brief qed_fw_vport - Get absolute vport ID + * + * @param p_hwfn + * @param src_id - relative to p_hwfn + * @param dst_id - absolute per engine + * + * @return int + */ +int qed_fw_vport(struct qed_hwfn *p_hwfn, + u8 src_id, + u8 *dst_id); + +/** + * @@brief qed_fw_rss_eng - Get absolute RSS engine ID + * + * @param p_hwfn + * @param src_id - relative to p_hwfn + * @param dst_id - absolute per engine + * + * @return int + */ +int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, + u8 src_id, + u8 *dst_id); + +/** + * *@brief Cleanup of previous driver remains prior to load + * + * @param p_hwfn + * @param p_ptt + * @param id - For PF, engine-relative. For VF, PF-relative. + * + * @return int + */ +int qed_final_cleanup(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 id); + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h new file mode 100644 index 000000000000..b2f8e854dfd1 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -0,0 +1,5291 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_HSI_H +#define _QED_HSI_H + +#include <linux/types.h> +#include <linux/io.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/qed/common_hsi.h> +#include <linux/qed/eth_common.h> + +struct qed_hwfn; +struct qed_ptt; +/********************************/ +/* Add include to common target */ +/********************************/ + +/* opcodes for the event ring */ +enum common_event_opcode { + COMMON_EVENT_PF_START, + COMMON_EVENT_PF_STOP, + COMMON_EVENT_RESERVED, + COMMON_EVENT_RESERVED2, + COMMON_EVENT_RESERVED3, + COMMON_EVENT_RESERVED4, + COMMON_EVENT_RESERVED5, + MAX_COMMON_EVENT_OPCODE +}; + +/* Common Ramrod Command IDs */ +enum common_ramrod_cmd_id { + COMMON_RAMROD_UNUSED, + COMMON_RAMROD_PF_START /* PF Function Start Ramrod */, + COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */, + COMMON_RAMROD_RESERVED, + COMMON_RAMROD_RESERVED2, + COMMON_RAMROD_RESERVED3, + MAX_COMMON_RAMROD_CMD_ID +}; + +/* The core storm context for the Ystorm */ +struct ystorm_core_conn_st_ctx { + __le32 reserved[4]; +}; + +/* The core storm context for the Pstorm */ +struct pstorm_core_conn_st_ctx { + __le32 reserved[4]; +}; + +/* Core Slowpath Connection storm context of Xstorm */ +struct xstorm_core_conn_st_ctx { + __le32 spq_base_lo /* SPQ Ring Base Address low dword */; + __le32 spq_base_hi /* SPQ Ring Base Address high dword */; + struct regpair consolid_base_addr; + __le16 spq_cons /* SPQ Ring Consumer */; + __le16 consolid_cons /* Consolidation Ring Consumer */; + __le32 reserved0[55] /* Pad to 15 cycles */; +}; + +struct xstorm_core_conn_ag_ctx { + u8 reserved0 /* cdu_validation */; + u8 core_state /* state */; + u8 flags0; +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */ +#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */ +#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */ +#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */ +#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 /* bit10 */ +#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */ +#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */ +#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */ +#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 /* bit14 */ +#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 /* bit15 */ +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 + u8 flags2; +#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ +#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ +#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ +#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ +#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ +#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ +#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ +#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ +#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ +#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ +#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */ +#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */ +#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */ +#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */ +#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */ +#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 /* cf16 */ +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */ +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */ +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 + u8 flags7; +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */ +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */ +#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */ +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ +#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ +#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ +#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ +#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ +#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ +#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ +#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */ +#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */ +#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */ +#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 /* cf14en */ +#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */ +#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 /* cf16en */ +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 /* cf18en */ +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 /* cf19en */ +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */ +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 /* cf21en */ +#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */ +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 /* cf23en */ +#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 /* rule0en */ +#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 /* rule1en */ +#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7 + u8 flags11; +#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 /* rule2en */ +#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 /* rule3en */ +#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 /* rule4en */ +#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ +#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */ +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */ +#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 /* rule10en */ +#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */ +#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */ +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */ +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 /* rule14en */ +#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */ +#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */ +#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */ +#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 /* rule18en */ +#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 /* rule19en */ +#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 /* rule20en */ +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 /* rule21en */ +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */ +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 /* rule23en */ +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */ +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */ +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 /* bit16 */ +#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 /* bit17 */ +#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 /* bit18 */ +#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 /* bit19 */ +#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 /* bit20 */ +#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 /* bit21 */ +#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 /* cf23 */ +#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2 /* byte2 */; + __le16 physical_q0 /* physical_q0 */; + __le16 consolid_prod /* physical_q1 */; + __le16 reserved16 /* physical_q2 */; + __le16 tx_bd_cons /* word3 */; + __le16 tx_bd_or_spq_prod /* word4 */; + __le16 word5 /* word5 */; + __le16 conn_dpi /* conn_dpi */; + u8 byte3 /* byte3 */; + u8 byte4 /* byte4 */; + u8 byte5 /* byte5 */; + u8 byte6 /* byte6 */; + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le32 reg4 /* reg4 */; + __le32 reg5 /* cf_array0 */; + __le32 reg6 /* cf_array1 */; + __le16 word7 /* word7 */; + __le16 word8 /* word8 */; + __le16 word9 /* word9 */; + __le16 word10 /* word10 */; + __le32 reg7 /* reg7 */; + __le32 reg8 /* reg8 */; + __le32 reg9 /* reg9 */; + u8 byte7 /* byte7 */; + u8 byte8 /* byte8 */; + u8 byte9 /* byte9 */; + u8 byte10 /* byte10 */; + u8 byte11 /* byte11 */; + u8 byte12 /* byte12 */; + u8 byte13 /* byte13 */; + u8 byte14 /* byte14 */; + u8 byte15 /* byte15 */; + u8 byte16 /* byte16 */; + __le16 word11 /* word11 */; + __le32 reg10 /* reg10 */; + __le32 reg11 /* reg11 */; + __le32 reg12 /* reg12 */; + __le32 reg13 /* reg13 */; + __le32 reg14 /* reg14 */; + __le32 reg15 /* reg15 */; + __le32 reg16 /* reg16 */; + __le32 reg17 /* reg17 */; + __le32 reg18 /* reg18 */; + __le32 reg19 /* reg19 */; + __le16 word12 /* word12 */; + __le16 word13 /* word13 */; + __le16 word14 /* word14 */; + __le16 word15 /* word15 */; +}; + +/* The core storm context for the Mstorm */ +struct mstorm_core_conn_st_ctx { + __le32 reserved[24]; +}; + +/* The core storm context for the Ustorm */ +struct ustorm_core_conn_st_ctx { + __le32 reserved[4]; +}; + +/* core connection context */ +struct core_conn_context { + struct ystorm_core_conn_st_ctx ystorm_st_context; + struct regpair ystorm_st_padding[2] /* padding */; + struct pstorm_core_conn_st_ctx pstorm_st_context; + struct regpair pstorm_st_padding[2]; + struct xstorm_core_conn_st_ctx xstorm_st_context; + struct xstorm_core_conn_ag_ctx xstorm_ag_context; + struct mstorm_core_conn_st_ctx mstorm_st_context; + struct regpair mstorm_st_padding[2]; + struct ustorm_core_conn_st_ctx ustorm_st_context; + struct regpair ustorm_st_padding[2] /* padding */; +}; + +struct eth_mstorm_per_queue_stat { + struct regpair ttl0_discard; + struct regpair packet_too_big_discard; + struct regpair no_buff_discard; + struct regpair not_active_discard; + struct regpair tpa_coalesced_pkts; + struct regpair tpa_coalesced_events; + struct regpair tpa_aborts_num; + struct regpair tpa_coalesced_bytes; +}; + +struct eth_pstorm_per_queue_stat { + struct regpair sent_ucast_bytes; + struct regpair sent_mcast_bytes; + struct regpair sent_bcast_bytes; + struct regpair sent_ucast_pkts; + struct regpair sent_mcast_pkts; + struct regpair sent_bcast_pkts; + struct regpair error_drop_pkts; +}; + +struct eth_ustorm_per_queue_stat { + struct regpair rcv_ucast_bytes; + struct regpair rcv_mcast_bytes; + struct regpair rcv_bcast_bytes; + struct regpair rcv_ucast_pkts; + struct regpair rcv_mcast_pkts; + struct regpair rcv_bcast_pkts; +}; + +/* Event Ring Next Page Address */ +struct event_ring_next_addr { + struct regpair addr /* Next Page Address */; + __le32 reserved[2] /* Reserved */; +}; + +union event_ring_element { + struct event_ring_entry entry /* Event Ring Entry */; + struct event_ring_next_addr next_addr; +}; + +enum personality_type { + PERSONALITY_RESERVED, + PERSONALITY_RESERVED2, + PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp */, + PERSONALITY_RESERVED3, + PERSONALITY_ETH /* Ethernet */, + PERSONALITY_RESERVED4, + MAX_PERSONALITY_TYPE +}; + +struct pf_start_tunnel_config { + u8 set_vxlan_udp_port_flg; + u8 set_geneve_udp_port_flg; + u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */; + u8 tx_enable_l2geneve; + u8 tx_enable_ipgeneve; + u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */; + u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */; + u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */; + u8 tunnel_clss_l2geneve; + u8 tunnel_clss_ipgeneve; + u8 tunnel_clss_l2gre; + u8 tunnel_clss_ipgre; + __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */; + __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */; +}; + +/* Ramrod data for PF start ramrod */ +struct pf_start_ramrod_data { + struct regpair event_ring_pbl_addr; + struct regpair consolid_q_pbl_addr; + struct pf_start_tunnel_config tunnel_config; + __le16 event_ring_sb_id; + u8 base_vf_id; + u8 num_vfs; + u8 event_ring_num_pages; + u8 event_ring_sb_index; + u8 path_id; + u8 warning_as_error; + u8 dont_log_ramrods; + u8 personality; + __le16 log_type_mask; + u8 mf_mode /* Multi function mode */; + u8 integ_phase /* Integration phase */; + u8 allow_npar_tx_switching; + u8 inner_to_outer_pri_map[8]; + u8 pri_map_valid; + u32 outer_tag; + u8 reserved0[4]; +}; + +enum ports_mode { + ENGX2_PORTX1 /* 2 engines x 1 port */, + ENGX2_PORTX2 /* 2 engines x 2 ports */, + ENGX1_PORTX1 /* 1 engine x 1 port */, + ENGX1_PORTX2 /* 1 engine x 2 ports */, + ENGX1_PORTX4 /* 1 engine x 4 ports */, + MAX_PORTS_MODE +}; + +/* Ramrod Header of SPQE */ +struct ramrod_header { + __le32 cid /* Slowpath Connection CID */; + u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */; + u8 protocol_id /* Ramrod Protocol ID */; + __le16 echo /* Ramrod echo */; +}; + +/* Slowpath Element (SPQE) */ +struct slow_path_element { + struct ramrod_header hdr /* Ramrod Header */; + struct regpair data_ptr; +}; + +struct tstorm_per_port_stat { + struct regpair trunc_error_discard; + struct regpair mac_error_discard; + struct regpair mftag_filter_discard; + struct regpair eth_mac_filter_discard; + struct regpair ll2_mac_filter_discard; + struct regpair ll2_conn_disabled_discard; + struct regpair iscsi_irregular_pkt; + struct regpair fcoe_irregular_pkt; + struct regpair roce_irregular_pkt; + struct regpair eth_irregular_pkt; + struct regpair toe_irregular_pkt; + struct regpair preroce_irregular_pkt; +}; + +struct atten_status_block { + __le32 atten_bits; + __le32 atten_ack; + __le16 reserved0; + __le16 sb_index /* status block running index */; + __le32 reserved1; +}; + +enum block_addr { + GRCBASE_GRC = 0x50000, + GRCBASE_MISCS = 0x9000, + GRCBASE_MISC = 0x8000, + GRCBASE_DBU = 0xa000, + GRCBASE_PGLUE_B = 0x2a8000, + GRCBASE_CNIG = 0x218000, + GRCBASE_CPMU = 0x30000, + GRCBASE_NCSI = 0x40000, + GRCBASE_OPTE = 0x53000, + GRCBASE_BMB = 0x540000, + GRCBASE_PCIE = 0x54000, + GRCBASE_MCP = 0xe00000, + GRCBASE_MCP2 = 0x52000, + GRCBASE_PSWHST = 0x2a0000, + GRCBASE_PSWHST2 = 0x29e000, + GRCBASE_PSWRD = 0x29c000, + GRCBASE_PSWRD2 = 0x29d000, + GRCBASE_PSWWR = 0x29a000, + GRCBASE_PSWWR2 = 0x29b000, + GRCBASE_PSWRQ = 0x280000, + GRCBASE_PSWRQ2 = 0x240000, + GRCBASE_PGLCS = 0x0, + GRCBASE_PTU = 0x560000, + GRCBASE_DMAE = 0xc000, + GRCBASE_TCM = 0x1180000, + GRCBASE_MCM = 0x1200000, + GRCBASE_UCM = 0x1280000, + GRCBASE_XCM = 0x1000000, + GRCBASE_YCM = 0x1080000, + GRCBASE_PCM = 0x1100000, + GRCBASE_QM = 0x2f0000, + GRCBASE_TM = 0x2c0000, + GRCBASE_DORQ = 0x100000, + GRCBASE_BRB = 0x340000, + GRCBASE_SRC = 0x238000, + GRCBASE_PRS = 0x1f0000, + GRCBASE_TSDM = 0xfb0000, + GRCBASE_MSDM = 0xfc0000, + GRCBASE_USDM = 0xfd0000, + GRCBASE_XSDM = 0xf80000, + GRCBASE_YSDM = 0xf90000, + GRCBASE_PSDM = 0xfa0000, + GRCBASE_TSEM = 0x1700000, + GRCBASE_MSEM = 0x1800000, + GRCBASE_USEM = 0x1900000, + GRCBASE_XSEM = 0x1400000, + GRCBASE_YSEM = 0x1500000, + GRCBASE_PSEM = 0x1600000, + GRCBASE_RSS = 0x238800, + GRCBASE_TMLD = 0x4d0000, + GRCBASE_MULD = 0x4e0000, + GRCBASE_YULD = 0x4c8000, + GRCBASE_XYLD = 0x4c0000, + GRCBASE_PRM = 0x230000, + GRCBASE_PBF_PB1 = 0xda0000, + GRCBASE_PBF_PB2 = 0xda4000, + GRCBASE_RPB = 0x23c000, + GRCBASE_BTB = 0xdb0000, + GRCBASE_PBF = 0xd80000, + GRCBASE_RDIF = 0x300000, + GRCBASE_TDIF = 0x310000, + GRCBASE_CDU = 0x580000, + GRCBASE_CCFC = 0x2e0000, + GRCBASE_TCFC = 0x2d0000, + GRCBASE_IGU = 0x180000, + GRCBASE_CAU = 0x1c0000, + GRCBASE_UMAC = 0x51000, + GRCBASE_XMAC = 0x210000, + GRCBASE_DBG = 0x10000, + GRCBASE_NIG = 0x500000, + GRCBASE_WOL = 0x600000, + GRCBASE_BMBN = 0x610000, + GRCBASE_IPC = 0x20000, + GRCBASE_NWM = 0x800000, + GRCBASE_NWS = 0x700000, + GRCBASE_MS = 0x6a0000, + GRCBASE_PHY_PCIE = 0x618000, + GRCBASE_MISC_AEU = 0x8000, + GRCBASE_BAR0_MAP = 0x1c00000, + MAX_BLOCK_ADDR +}; + +enum block_id { + BLOCK_GRC, + BLOCK_MISCS, + BLOCK_MISC, + BLOCK_DBU, + BLOCK_PGLUE_B, + BLOCK_CNIG, + BLOCK_CPMU, + BLOCK_NCSI, + BLOCK_OPTE, + BLOCK_BMB, + BLOCK_PCIE, + BLOCK_MCP, + BLOCK_MCP2, + BLOCK_PSWHST, + BLOCK_PSWHST2, + BLOCK_PSWRD, + BLOCK_PSWRD2, + BLOCK_PSWWR, + BLOCK_PSWWR2, + BLOCK_PSWRQ, + BLOCK_PSWRQ2, + BLOCK_PGLCS, + BLOCK_PTU, + BLOCK_DMAE, + BLOCK_TCM, + BLOCK_MCM, + BLOCK_UCM, + BLOCK_XCM, + BLOCK_YCM, + BLOCK_PCM, + BLOCK_QM, + BLOCK_TM, + BLOCK_DORQ, + BLOCK_BRB, + BLOCK_SRC, + BLOCK_PRS, + BLOCK_TSDM, + BLOCK_MSDM, + BLOCK_USDM, + BLOCK_XSDM, + BLOCK_YSDM, + BLOCK_PSDM, + BLOCK_TSEM, + BLOCK_MSEM, + BLOCK_USEM, + BLOCK_XSEM, + BLOCK_YSEM, + BLOCK_PSEM, + BLOCK_RSS, + BLOCK_TMLD, + BLOCK_MULD, + BLOCK_YULD, + BLOCK_XYLD, + BLOCK_PRM, + BLOCK_PBF_PB1, + BLOCK_PBF_PB2, + BLOCK_RPB, + BLOCK_BTB, + BLOCK_PBF, + BLOCK_RDIF, + BLOCK_TDIF, + BLOCK_CDU, + BLOCK_CCFC, + BLOCK_TCFC, + BLOCK_IGU, + BLOCK_CAU, + BLOCK_UMAC, + BLOCK_XMAC, + BLOCK_DBG, + BLOCK_NIG, + BLOCK_WOL, + BLOCK_BMBN, + BLOCK_IPC, + BLOCK_NWM, + BLOCK_NWS, + BLOCK_MS, + BLOCK_PHY_PCIE, + BLOCK_MISC_AEU, + BLOCK_BAR0_MAP, + MAX_BLOCK_ID +}; + +enum command_type_bit { + IGU_COMMAND_TYPE_NOP = 0, + IGU_COMMAND_TYPE_SET = 1, + MAX_COMMAND_TYPE_BIT +}; + +struct dmae_cmd { + __le32 opcode; +#define DMAE_CMD_SRC_MASK 0x1 +#define DMAE_CMD_SRC_SHIFT 0 +#define DMAE_CMD_DST_MASK 0x3 +#define DMAE_CMD_DST_SHIFT 1 +#define DMAE_CMD_C_DST_MASK 0x1 +#define DMAE_CMD_C_DST_SHIFT 3 +#define DMAE_CMD_CRC_RESET_MASK 0x1 +#define DMAE_CMD_CRC_RESET_SHIFT 4 +#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1 +#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5 +#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1 +#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6 +#define DMAE_CMD_COMP_FUNC_MASK 0x1 +#define DMAE_CMD_COMP_FUNC_SHIFT 7 +#define DMAE_CMD_COMP_WORD_EN_MASK 0x1 +#define DMAE_CMD_COMP_WORD_EN_SHIFT 8 +#define DMAE_CMD_COMP_CRC_EN_MASK 0x1 +#define DMAE_CMD_COMP_CRC_EN_SHIFT 9 +#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7 +#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10 +#define DMAE_CMD_RESERVED1_MASK 0x1 +#define DMAE_CMD_RESERVED1_SHIFT 13 +#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3 +#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14 +#define DMAE_CMD_ERR_HANDLING_MASK 0x3 +#define DMAE_CMD_ERR_HANDLING_SHIFT 16 +#define DMAE_CMD_PORT_ID_MASK 0x3 +#define DMAE_CMD_PORT_ID_SHIFT 18 +#define DMAE_CMD_SRC_PF_ID_MASK 0xF +#define DMAE_CMD_SRC_PF_ID_SHIFT 20 +#define DMAE_CMD_DST_PF_ID_MASK 0xF +#define DMAE_CMD_DST_PF_ID_SHIFT 24 +#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1 +#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28 +#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1 +#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29 +#define DMAE_CMD_RESERVED2_MASK 0x3 +#define DMAE_CMD_RESERVED2_SHIFT 30 + __le32 src_addr_lo; + __le32 src_addr_hi; + __le32 dst_addr_lo; + __le32 dst_addr_hi; + __le16 length /* Length in DW */; + __le16 opcode_b; +#define DMAE_CMD_SRC_VF_ID_MASK 0xFF /* Source VF id */ +#define DMAE_CMD_SRC_VF_ID_SHIFT 0 +#define DMAE_CMD_DST_VF_ID_MASK 0xFF /* Destination VF id */ +#define DMAE_CMD_DST_VF_ID_SHIFT 8 + __le32 comp_addr_lo /* PCIe completion address low or grc address */; + __le32 comp_addr_hi; + __le32 comp_val /* Value to write to copmletion address */; + __le32 crc32 /* crc16 result */; + __le32 crc_32_c /* crc32_c result */; + __le16 crc16 /* crc16 result */; + __le16 crc16_c /* crc16_c result */; + __le16 crc10 /* crc_t10 result */; + __le16 reserved; + __le16 xsum16 /* checksum16 result */; + __le16 xsum8 /* checksum8 result */; +}; + +struct igu_cleanup { + __le32 sb_id_and_flags; +#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF +#define IGU_CLEANUP_RESERVED0_SHIFT 0 +#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1 /* cleanup clear - 0, set - 1 */ +#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27 +#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7 +#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28 +#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1 +#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31 + __le32 reserved1; +}; + +union igu_command { + struct igu_prod_cons_update prod_cons_update; + struct igu_cleanup cleanup; +}; + +struct igu_command_reg_ctrl { + __le16 opaque_fid; + __le16 igu_command_reg_ctrl_fields; +#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF +#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0 +#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7 +#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12 +#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1 +#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15 +}; + +struct igu_mapping_line { + __le32 igu_mapping_line_fields; +#define IGU_MAPPING_LINE_VALID_MASK 0x1 +#define IGU_MAPPING_LINE_VALID_SHIFT 0 +#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF +#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1 +#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF +#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9 +#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1 /* PF-1, VF-0 */ +#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17 +#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F +#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18 +#define IGU_MAPPING_LINE_RESERVED_MASK 0xFF +#define IGU_MAPPING_LINE_RESERVED_SHIFT 24 +}; + +struct igu_msix_vector { + struct regpair address; + __le32 data; + __le32 msix_vector_fields; +#define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1 +#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0 +#define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF +#define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1 +#define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF +#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16 +#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF +#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24 +}; + +enum init_modes { + MODE_BB_A0, + MODE_RESERVED, + MODE_RESERVED2, + MODE_ASIC, + MODE_RESERVED3, + MODE_RESERVED4, + MODE_RESERVED5, + MODE_SF, + MODE_MF_SD, + MODE_MF_SI, + MODE_PORTS_PER_ENG_1, + MODE_PORTS_PER_ENG_2, + MODE_PORTS_PER_ENG_4, + MODE_40G, + MODE_100G, + MODE_EAGLE_ENG1_WORKAROUND, + MAX_INIT_MODES +}; + +enum init_phases { + PHASE_ENGINE, + PHASE_PORT, + PHASE_PF, + PHASE_RESERVED, + PHASE_QM_PF, + MAX_INIT_PHASES +}; + +struct mstorm_core_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */ +#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */ +#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */ +#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0 /* word0 */; + __le16 word1 /* word1 */; + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; +}; + +/* per encapsulation type enabling flags */ +struct prs_reg_encapsulation_type_en { + u8 flags; +#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0 +#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2 +#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3 +#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4 +#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5 +#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3 +#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6 +}; + +enum pxp_tph_st_hint { + TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */, + TPH_ST_HINT_REQUESTER /* Read/Write access by Device */, + TPH_ST_HINT_TARGET, + TPH_ST_HINT_TARGET_PRIO, + MAX_PXP_TPH_ST_HINT +}; + +/* QM hardware structure of enable bypass credit mask */ +struct qm_rf_bypass_mask { + u8 flags; +#define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1 +#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0 +#define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1 +#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1 +#define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1 +#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2 +#define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1 +#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3 +#define QM_RF_BYPASS_MASK_PFRL_MASK 0x1 +#define QM_RF_BYPASS_MASK_PFRL_SHIFT 4 +#define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1 +#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5 +#define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1 +#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6 +#define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1 +#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7 +}; + +/* QM hardware structure of opportunistic credit mask */ +struct qm_rf_opportunistic_mask { + __le16 flags; +#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0 +#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1 +#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2 +#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3 +#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4 +#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5 +#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6 +#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7 +#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8 +#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F +#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9 +}; + +/* QM hardware structure of QM map memory */ +struct qm_rf_pq_map { + u32 reg; +#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 /* PQ active */ +#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0 +#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF /* RL ID */ +#define QM_RF_PQ_MAP_RL_ID_SHIFT 1 +#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF +#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9 +#define QM_RF_PQ_MAP_VOQ_MASK 0x1F /* VOQ */ +#define QM_RF_PQ_MAP_VOQ_SHIFT 18 +#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 /* WRR weight */ +#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23 +#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 /* RL active */ +#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25 +#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F +#define QM_RF_PQ_MAP_RESERVED_SHIFT 26 +}; + +/* SDM operation gen command (generate aggregative interrupt) */ +struct sdm_op_gen { + __le32 command; +#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF /* completion parameters 0-15 */ +#define SDM_OP_GEN_COMP_PARAM_SHIFT 0 +#define SDM_OP_GEN_COMP_TYPE_MASK 0xF /* completion type 16-19 */ +#define SDM_OP_GEN_COMP_TYPE_SHIFT 16 +#define SDM_OP_GEN_RESERVED_MASK 0xFFF /* reserved 20-31 */ +#define SDM_OP_GEN_RESERVED_SHIFT 20 +}; + +struct tstorm_core_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ +#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ +#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ +#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ +#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ +#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ +#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ +#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ +#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ +#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ +#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ +#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 + u8 flags4; +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le32 reg4 /* reg4 */; + __le32 reg5 /* reg5 */; + __le32 reg6 /* reg6 */; + __le32 reg7 /* reg7 */; + __le32 reg8 /* reg8 */; + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 word0 /* word0 */; + u8 byte4 /* byte4 */; + u8 byte5 /* byte5 */; + __le16 word1 /* word1 */; + __le16 word2 /* conn_dpi */; + __le16 word3 /* word3 */; + __le32 reg9 /* reg9 */; + __le32 reg10 /* reg10 */; +}; + +struct ustorm_core_conn_ag_ctx { + u8 reserved /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ +#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ +#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ +#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ +#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ +#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ +#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ +#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ +#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ +#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ +#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ +#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ +#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 word0 /* conn_dpi */; + __le16 word1 /* word1 */; + __le32 rx_producers /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le16 word2 /* word2 */; + __le16 word3 /* word3 */; +}; + +struct ystorm_core_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */ +#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */ +#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */ +#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 word0 /* word0 */; + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le16 word1 /* word1 */; + __le16 word2 /* word2 */; + __le16 word3 /* word3 */; + __le16 word4 /* word4 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; +}; + +/*********************************** Init ************************************/ + +/* Width of GRC address in bits (addresses are specified in dwords) */ +#define GRC_ADDR_BITS 23 +#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1) + +/* indicates an init that should be applied to any phase ID */ +#define ANY_PHASE_ID 0xffff + +/* init pattern size in bytes */ +#define INIT_PATTERN_SIZE_BITS 4 +#define MAX_INIT_PATTERN_SIZE BIT(INIT_PATTERN_SIZE_BITS) + +/* Max size in dwords of a zipped array */ +#define MAX_ZIPPED_SIZE 8192 + +/* Global PXP window */ +#define NUM_OF_PXP_WIN 19 +#define PXP_WIN_DWORD_SIZE_BITS 10 +#define PXP_WIN_DWORD_SIZE BIT(PXP_WIN_DWORD_SIZE_BITS) +#define PXP_WIN_BYTE_SIZE_BITS (PXP_WIN_DWORD_SIZE_BITS + 2) +#define PXP_WIN_BYTE_SIZE (PXP_WIN_DWORD_SIZE * 4) + +/********************************* GRC Dump **********************************/ + +/* width of GRC dump register sequence length in bits */ +#define DUMP_SEQ_LEN_BITS 8 +#define DUMP_SEQ_LEN_MAX_VAL ((1 << DUMP_SEQ_LEN_BITS) - 1) + +/* width of GRC dump memory length in bits */ +#define DUMP_MEM_LEN_BITS 18 +#define DUMP_MEM_LEN_MAX_VAL ((1 << DUMP_MEM_LEN_BITS) - 1) + +/* width of register type ID in bits */ +#define REG_TYPE_ID_BITS 6 +#define REG_TYPE_ID_MAX_VAL ((1 << REG_TYPE_ID_BITS) - 1) + +/* width of block ID in bits */ +#define BLOCK_ID_BITS 8 +#define BLOCK_ID_MAX_VAL ((1 << BLOCK_ID_BITS) - 1) + +/******************************** Idle Check *********************************/ + +/* max number of idle check predicate immediates */ +#define MAX_IDLE_CHK_PRED_IMM 3 + +/* max number of idle check argument registers */ +#define MAX_IDLE_CHK_READ_REGS 3 + +/* max number of idle check loops */ +#define MAX_IDLE_CHK_LOOPS 0x10000 + +/* max idle check address increment */ +#define MAX_IDLE_CHK_INCREMENT 0x10000 + +/* inicates an undefined idle check line index */ +#define IDLE_CHK_UNDEFINED_LINE_IDX 0xffffff + +/* max number of register values following the idle check header */ +#define IDLE_CHK_MAX_DUMP_REGS 2 + +/* arguments for IDLE_CHK_MACRO_TYPE_QM_RD_WR */ +#define IDLE_CHK_QM_RD_WR_PTR 0 +#define IDLE_CHK_QM_RD_WR_BANK 1 + +/**************************************/ +/* HSI Functions constants and macros */ +/**************************************/ + +/* Number of VLAN priorities */ +#define NUM_OF_VLAN_PRIORITIES 8 + +/* the MCP Trace meta data signautre is duplicated in the perl script that + * generats the NVRAM images. + */ +#define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa + +/* Binary buffer header */ +struct bin_buffer_hdr { + u32 offset; + u32 length /* buffer length in bytes */; +}; + +/* binary buffer types */ +enum bin_buffer_type { + BIN_BUF_FW_VER_INFO /* fw_ver_info struct */, + BIN_BUF_INIT_CMD /* init commands */, + BIN_BUF_INIT_VAL /* init data */, + BIN_BUF_INIT_MODE_TREE /* init modes tree */, + BIN_BUF_IRO /* internal RAM offsets array */, + MAX_BIN_BUFFER_TYPE +}; + +/* Chip IDs */ +enum chip_ids { + CHIP_BB_A0 /* BB A0 chip ID */, + CHIP_BB_B0 /* BB B0 chip ID */, + CHIP_K2 /* AH chip ID */, + MAX_CHIP_IDS +}; + +enum idle_chk_severity_types { + IDLE_CHK_SEVERITY_ERROR /* idle check failure should cause an error */, + IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC, + IDLE_CHK_SEVERITY_WARNING, + MAX_IDLE_CHK_SEVERITY_TYPES +}; + +struct init_array_raw_hdr { + __le32 data; +#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0 +#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF /* init array params */ +#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4 +}; + +struct init_array_standard_hdr { + __le32 data; +#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0 +#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF +#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4 +}; + +struct init_array_zipped_hdr { + __le32 data; +#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0 +#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF +#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4 +}; + +struct init_array_pattern_hdr { + __le32 data; +#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0 +#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF +#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4 +#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF +#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8 +}; + +union init_array_hdr { + struct init_array_raw_hdr raw /* raw init array header */; + struct init_array_standard_hdr standard; + struct init_array_zipped_hdr zipped /* zipped init array header */; + struct init_array_pattern_hdr pattern /* pattern init array header */; +}; + +enum init_array_types { + INIT_ARR_STANDARD /* standard init array */, + INIT_ARR_ZIPPED /* zipped init array */, + INIT_ARR_PATTERN /* a repeated pattern */, + MAX_INIT_ARRAY_TYPES +}; + +/* init operation: callback */ +struct init_callback_op { + __le32 op_data; +#define INIT_CALLBACK_OP_OP_MASK 0xF +#define INIT_CALLBACK_OP_OP_SHIFT 0 +#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF +#define INIT_CALLBACK_OP_RESERVED_SHIFT 4 + __le16 callback_id /* Callback ID */; + __le16 block_id /* Blocks ID */; +}; + +/* init comparison types */ +enum init_comparison_types { + INIT_COMPARISON_EQ /* init value is included in the init command */, + INIT_COMPARISON_OR /* init value is all zeros */, + INIT_COMPARISON_AND /* init value is an array of values */, + MAX_INIT_COMPARISON_TYPES +}; + +/* init operation: delay */ +struct init_delay_op { + __le32 op_data; +#define INIT_DELAY_OP_OP_MASK 0xF +#define INIT_DELAY_OP_OP_SHIFT 0 +#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF +#define INIT_DELAY_OP_RESERVED_SHIFT 4 + __le32 delay /* delay in us */; +}; + +/* init operation: if_mode */ +struct init_if_mode_op { + __le32 op_data; +#define INIT_IF_MODE_OP_OP_MASK 0xF +#define INIT_IF_MODE_OP_OP_SHIFT 0 +#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF +#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4 +#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF +#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16 + __le16 reserved2; + __le16 modes_buf_offset; +}; + +/* init operation: if_phase */ +struct init_if_phase_op { + __le32 op_data; +#define INIT_IF_PHASE_OP_OP_MASK 0xF +#define INIT_IF_PHASE_OP_OP_SHIFT 0 +#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1 +#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4 +#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF +#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5 +#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF +#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16 + __le32 phase_data; +#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF /* Init phase */ +#define INIT_IF_PHASE_OP_PHASE_SHIFT 0 +#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF +#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8 +#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF /* Init phase ID */ +#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16 +}; + +/* init mode operators */ +enum init_mode_ops { + INIT_MODE_OP_NOT /* init mode not operator */, + INIT_MODE_OP_OR /* init mode or operator */, + INIT_MODE_OP_AND /* init mode and operator */, + MAX_INIT_MODE_OPS +}; + +/* init operation: raw */ +struct init_raw_op { + __le32 op_data; +#define INIT_RAW_OP_OP_MASK 0xF +#define INIT_RAW_OP_OP_SHIFT 0 +#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF /* init param 1 */ +#define INIT_RAW_OP_PARAM1_SHIFT 4 + __le32 param2 /* Init param 2 */; +}; + +/* init array params */ +struct init_op_array_params { + __le16 size /* array size in dwords */; + __le16 offset /* array start offset in dwords */; +}; + +/* Write init operation arguments */ +union init_write_args { + __le32 inline_val; + __le32 zeros_count; + __le32 array_offset; + struct init_op_array_params runtime; +}; + +/* init operation: write */ +struct init_write_op { + __le32 data; +#define INIT_WRITE_OP_OP_MASK 0xF +#define INIT_WRITE_OP_OP_SHIFT 0 +#define INIT_WRITE_OP_SOURCE_MASK 0x7 +#define INIT_WRITE_OP_SOURCE_SHIFT 4 +#define INIT_WRITE_OP_RESERVED_MASK 0x1 +#define INIT_WRITE_OP_RESERVED_SHIFT 7 +#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1 +#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8 +#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF +#define INIT_WRITE_OP_ADDRESS_SHIFT 9 + union init_write_args args /* Write init operation arguments */; +}; + +/* init operation: read */ +struct init_read_op { + __le32 op_data; +#define INIT_READ_OP_OP_MASK 0xF +#define INIT_READ_OP_OP_SHIFT 0 +#define INIT_READ_OP_POLL_COMP_MASK 0x7 +#define INIT_READ_OP_POLL_COMP_SHIFT 4 +#define INIT_READ_OP_RESERVED_MASK 0x1 +#define INIT_READ_OP_RESERVED_SHIFT 7 +#define INIT_READ_OP_POLL_MASK 0x1 +#define INIT_READ_OP_POLL_SHIFT 8 +#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF +#define INIT_READ_OP_ADDRESS_SHIFT 9 + __le32 expected_val; +}; + +/* Init operations union */ +union init_op { + struct init_raw_op raw /* raw init operation */; + struct init_write_op write /* write init operation */; + struct init_read_op read /* read init operation */; + struct init_if_mode_op if_mode /* if_mode init operation */; + struct init_if_phase_op if_phase /* if_phase init operation */; + struct init_callback_op callback /* callback init operation */; + struct init_delay_op delay /* delay init operation */; +}; + +/* Init command operation types */ +enum init_op_types { + INIT_OP_READ /* GRC read init command */, + INIT_OP_WRITE /* GRC write init command */, + INIT_OP_IF_MODE, + INIT_OP_IF_PHASE, + INIT_OP_DELAY /* delay init command */, + INIT_OP_CALLBACK /* callback init command */, + MAX_INIT_OP_TYPES +}; + +/* init source types */ +enum init_source_types { + INIT_SRC_INLINE /* init value is included in the init command */, + INIT_SRC_ZEROS /* init value is all zeros */, + INIT_SRC_ARRAY /* init value is an array of values */, + INIT_SRC_RUNTIME /* init value is provided during runtime */, + MAX_INIT_SOURCE_TYPES +}; + +/* Internal RAM Offsets macro data */ +struct iro { + u32 base /* RAM field offset */; + u16 m1 /* multiplier 1 */; + u16 m2 /* multiplier 2 */; + u16 m3 /* multiplier 3 */; + u16 size /* RAM field size */; +}; + +/* QM per-port init parameters */ +struct init_qm_port_params { + u8 active /* Indicates if this port is active */; + u8 num_active_phys_tcs; + u16 num_pbf_cmd_lines; + u16 num_btb_blocks; + __le16 reserved; +}; + +/* QM per-PQ init parameters */ +struct init_qm_pq_params { + u8 vport_id /* VPORT ID */; + u8 tc_id /* TC ID */; + u8 wrr_group /* WRR group */; + u8 reserved; +}; + +/* QM per-vport init parameters */ +struct init_qm_vport_params { + u32 vport_rl; + u16 vport_wfq; + u16 first_tx_pq_id[NUM_OF_TCS]; +}; + +/* Win 2 */ +#define GTT_BAR0_MAP_REG_IGU_CMD \ + 0x00f000UL +/* Win 3 */ +#define GTT_BAR0_MAP_REG_TSDM_RAM \ + 0x010000UL +/* Win 4 */ +#define GTT_BAR0_MAP_REG_MSDM_RAM \ + 0x011000UL +/* Win 5 */ +#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 \ + 0x012000UL +/* Win 6 */ +#define GTT_BAR0_MAP_REG_USDM_RAM \ + 0x013000UL +/* Win 7 */ +#define GTT_BAR0_MAP_REG_USDM_RAM_1024 \ + 0x014000UL +/* Win 8 */ +#define GTT_BAR0_MAP_REG_USDM_RAM_2048 \ + 0x015000UL +/* Win 9 */ +#define GTT_BAR0_MAP_REG_XSDM_RAM \ + 0x016000UL +/* Win 10 */ +#define GTT_BAR0_MAP_REG_YSDM_RAM \ + 0x017000UL +/* Win 11 */ +#define GTT_BAR0_MAP_REG_PSDM_RAM \ + 0x018000UL + +/** + * @brief qed_qm_pf_mem_size - prepare QM ILT sizes + * + * Returns the required host memory size in 4KB units. + * Must be called before all QM init HSI functions. + * + * @param pf_id - physical function ID + * @param num_pf_cids - number of connections used by this PF + * @param num_vf_cids - number of connections used by VFs of this PF + * @param num_tids - number of tasks used by this PF + * @param num_pf_pqs - number of PQs used by this PF + * @param num_vf_pqs - number of PQs used by VFs of this PF + * + * @return The required host memory size in 4KB units. + */ +u32 qed_qm_pf_mem_size(u8 pf_id, + u32 num_pf_cids, + u32 num_vf_cids, + u32 num_tids, + u16 num_pf_pqs, + u16 num_vf_pqs); + +struct qed_qm_common_rt_init_params { + u8 max_ports_per_engine; + u8 max_phys_tcs_per_port; + bool pf_rl_en; + bool pf_wfq_en; + bool vport_rl_en; + bool vport_wfq_en; + struct init_qm_port_params *port_params; +}; + +/** + * @brief qed_qm_common_rt_init - Prepare QM runtime init values for the + * engine phase. + * + * @param p_hwfn + * @param max_ports_per_engine - max number of ports per engine in HW + * @param max_phys_tcs_per_port - max number of physical TCs per port in HW + * @param pf_rl_en - enable per-PF rate limiters + * @param pf_wfq_en - enable per-PF WFQ + * @param vport_rl_en - enable per-VPORT rate limiters + * @param vport_wfq_en - enable per-VPORT WFQ + * @param port_params - array of size MAX_NUM_PORTS with + * arameters for each port + * + * @return 0 on success, -1 on error. + */ +int qed_qm_common_rt_init( + struct qed_hwfn *p_hwfn, + struct qed_qm_common_rt_init_params *p_params); + +struct qed_qm_pf_rt_init_params { + u8 port_id; + u8 pf_id; + u8 max_phys_tcs_per_port; + bool is_first_pf; + u32 num_pf_cids; + u32 num_vf_cids; + u32 num_tids; + u16 start_pq; + u16 num_pf_pqs; + u16 num_vf_pqs; + u8 start_vport; + u8 num_vports; + u8 pf_wfq; + u32 pf_rl; + struct init_qm_pq_params *pq_params; + struct init_qm_vport_params *vport_params; +}; + +int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_qm_pf_rt_init_params *p_params); + +/** + * @brief qed_init_pf_rl Initializes the rate limit of the specified PF + * + * @param p_hwfn + * @param p_ptt - ptt window used for writing the registers + * @param pf_id - PF ID + * @param pf_rl - rate limit in Mb/sec units + * + * @return 0 on success, -1 on error. + */ +int qed_init_pf_rl(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 pf_id, + u32 pf_rl); + +/** + * @brief qed_init_vport_rl Initializes the rate limit of the specified VPORT + * + * @param p_hwfn + * @param p_ptt - ptt window used for writing the registers + * @param vport_id - VPORT ID + * @param vport_rl - rate limit in Mb/sec units + * + * @return 0 on success, -1 on error. + */ + +int qed_init_vport_rl(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 vport_id, + u32 vport_rl); +/** + * @brief qed_send_qm_stop_cmd Sends a stop command to the QM + * + * @param p_hwfn + * @param p_ptt - ptt window used for writing the registers + * @param is_release_cmd - true for release, false for stop. + * @param is_tx_pq - true for Tx PQs, false for Other PQs. + * @param start_pq - first PQ ID to stop + * @param num_pqs - Number of PQs to stop, starting from start_pq. + * + * @return bool, true if successful, false if timeout occurred while waiting + * for QM command done. + */ + +bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool is_release_cmd, + bool is_tx_pq, + u16 start_pq, + u16 num_pqs); + +/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ +#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) +#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) +/* Tstorm port statistics */ +#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + \ + ((port_id) * \ + IRO[1].m1)) +#define TSTORM_PORT_STAT_SIZE (IRO[1].size) +/* Ustorm VF-PF Channel ready flag */ +#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) (IRO[2].base + \ + ((vf_id) * \ + IRO[2].m1)) +#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[2].size) +/* Ustorm Final flr cleanup ack */ +#define USTORM_FLR_FINAL_ACK_OFFSET (IRO[3].base) +#define USTORM_FLR_FINAL_ACK_SIZE (IRO[3].size) +/* Ustorm Event ring consumer */ +#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[4].base + \ + ((pf_id) * \ + IRO[4].m1)) +#define USTORM_EQE_CONS_SIZE (IRO[4].size) +/* Ustorm Completion ring consumer */ +#define USTORM_CQ_CONS_OFFSET(global_queue_id) (IRO[5].base + \ + ((global_queue_id) * \ + IRO[5].m1)) +#define USTORM_CQ_CONS_SIZE (IRO[5].size) +/* Xstorm Integration Test Data */ +#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[6].base) +#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[6].size) +/* Ystorm Integration Test Data */ +#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[7].base) +#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[7].size) +/* Pstorm Integration Test Data */ +#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base) +#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size) +/* Tstorm Integration Test Data */ +#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base) +#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size) +/* Mstorm Integration Test Data */ +#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base) +#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size) +/* Ustorm Integration Test Data */ +#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base) +#define USTORM_INTEG_TEST_DATA_SIZE (IRO[11].size) +/* Tstorm producers */ +#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) (IRO[12].base + \ + ((core_rx_queue_id) * \ + IRO[12].m1)) +#define TSTORM_LL2_RX_PRODS_SIZE (IRO[12].size) +/* Tstorm LiteL2 queue statistics */ +#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_q_id) (IRO[13].base + \ + ((core_rx_q_id) * \ + IRO[13].m1)) +#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[13].size) +/* Ustorm LiteL2 queue statistics */ +#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_q_id) (IRO[14].base + \ + ((core_rx_q_id) * \ + IRO[14].m1)) +#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[14].size) +/* Pstorm LiteL2 queue statistics */ +#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_txst_id) (IRO[15].base + \ + ((core_txst_id) * \ + IRO[15].m1)) +#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size) +/* Mstorm queue statistics */ +#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[16].base + \ + ((stat_counter_id) * \ + IRO[16].m1)) +#define MSTORM_QUEUE_STAT_SIZE (IRO[16].size) +/* Mstorm producers */ +#define MSTORM_PRODS_OFFSET(queue_id) (IRO[17].base + \ + ((queue_id) * \ + IRO[17].m1)) +#define MSTORM_PRODS_SIZE (IRO[17].size) +/* TPA agregation timeout in us resolution (on ASIC) */ +#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[18].base) +#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[18].size) +/* Ustorm queue statistics */ +#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[19].base + \ + ((stat_counter_id) * \ + IRO[19].m1)) +#define USTORM_QUEUE_STAT_SIZE (IRO[19].size) +/* Ustorm queue zone */ +#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[20].base + \ + ((queue_id) * \ + IRO[20].m1)) +#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[20].size) +/* Pstorm queue statistics */ +#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[21].base + \ + ((stat_counter_id) * \ + IRO[21].m1)) +#define PSTORM_QUEUE_STAT_SIZE (IRO[21].size) +/* Tstorm last parser message */ +#define TSTORM_ETH_PRS_INPUT_OFFSET(pf_id) (IRO[22].base + \ + ((pf_id) * \ + IRO[22].m1)) +#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[22].size) +/* Ystorm queue zone */ +#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[23].base + \ + ((queue_id) * \ + IRO[23].m1)) +#define YSTORM_ETH_QUEUE_ZONE_SIZE (IRO[23].size) +/* Ystorm cqe producer */ +#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[24].base + \ + ((rss_id) * \ + IRO[24].m1)) +#define YSTORM_TOE_CQ_PROD_SIZE (IRO[24].size) +/* Ustorm cqe producer */ +#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[25].base + \ + ((rss_id) * \ + IRO[25].m1)) +#define USTORM_TOE_CQ_PROD_SIZE (IRO[25].size) +/* Ustorm grq producer */ +#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) (IRO[26].base + \ + ((pf_id) * \ + IRO[26].m1)) +#define USTORM_TOE_GRQ_PROD_SIZE (IRO[26].size) +/* Tstorm cmdq-cons of given command queue-id */ +#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) (IRO[27].base + \ + ((cmdq_queue_id) * \ + IRO[27].m1)) +#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[27].size) +/* Mstorm rq-cons of given queue-id */ +#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) (IRO[28].base + \ + ((rq_queue_id) * \ + IRO[28].m1)) +#define MSTORM_SCSI_RQ_CONS_SIZE (IRO[28].size) +/* Pstorm RoCE statistics */ +#define PSTORM_ROCE_STAT_OFFSET(stat_counter_id) (IRO[29].base + \ + ((stat_counter_id) * \ + IRO[29].m1)) +#define PSTORM_ROCE_STAT_SIZE (IRO[29].size) +/* Tstorm RoCE statistics */ +#define TSTORM_ROCE_STAT_OFFSET(stat_counter_id) (IRO[30].base + \ + ((stat_counter_id) * \ + IRO[30].m1)) +#define TSTORM_ROCE_STAT_SIZE (IRO[30].size) + +static const struct iro iro_arr[31] = { + { 0x10, 0x0, 0x0, 0x0, 0x8 }, + { 0x4448, 0x60, 0x0, 0x0, 0x60 }, + { 0x498, 0x8, 0x0, 0x0, 0x4 }, + { 0x494, 0x0, 0x0, 0x0, 0x4 }, + { 0x10, 0x8, 0x0, 0x0, 0x2 }, + { 0x90, 0x8, 0x0, 0x0, 0x2 }, + { 0x4540, 0x0, 0x0, 0x0, 0xf8 }, + { 0x39e0, 0x0, 0x0, 0x0, 0xf8 }, + { 0x2598, 0x0, 0x0, 0x0, 0xf8 }, + { 0x4350, 0x0, 0x0, 0x0, 0xf8 }, + { 0x52d0, 0x0, 0x0, 0x0, 0xf8 }, + { 0x7a48, 0x0, 0x0, 0x0, 0xf8 }, + { 0x100, 0x8, 0x0, 0x0, 0x8 }, + { 0x5808, 0x10, 0x0, 0x0, 0x10 }, + { 0xb100, 0x30, 0x0, 0x0, 0x30 }, + { 0x95c0, 0x30, 0x0, 0x0, 0x30 }, + { 0x54f8, 0x40, 0x0, 0x0, 0x40 }, + { 0x200, 0x10, 0x0, 0x0, 0x8 }, + { 0x9e70, 0x0, 0x0, 0x0, 0x4 }, + { 0x7ca0, 0x40, 0x0, 0x0, 0x30 }, + { 0xd00, 0x8, 0x0, 0x0, 0x8 }, + { 0x2790, 0x80, 0x0, 0x0, 0x38 }, + { 0xa520, 0xf0, 0x0, 0x0, 0xf0 }, + { 0x80, 0x8, 0x0, 0x0, 0x8 }, + { 0xac0, 0x8, 0x0, 0x0, 0x8 }, + { 0x2580, 0x8, 0x0, 0x0, 0x8 }, + { 0x2500, 0x8, 0x0, 0x0, 0x8 }, + { 0x440, 0x8, 0x0, 0x0, 0x2 }, + { 0x1800, 0x8, 0x0, 0x0, 0x2 }, + { 0x27c8, 0x80, 0x0, 0x0, 0x10 }, + { 0x4710, 0x10, 0x0, 0x0, 0x10 }, +}; + +/* Runtime array offsets */ +#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0 +#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1 +#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2 +#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3 +#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4 +#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5 +#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6 +#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7 +#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8 +#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9 +#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10 +#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11 +#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12 +#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13 +#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14 +#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15 +#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16 +#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 17 +#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 18 +#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 19 +#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 20 +#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 21 +#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 22 +#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 23 +#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 760 +#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 +#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 760 +#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 +#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1496 +#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736 +#define CAU_REG_PI_MEMORY_RT_OFFSET 2232 +#define CAU_REG_PI_MEMORY_RT_SIZE 4416 +#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6648 +#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6649 +#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6650 +#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6651 +#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6652 +#define PRS_REG_SEARCH_TCP_RT_OFFSET 6653 +#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6654 +#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6655 +#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6656 +#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6657 +#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6658 +#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6659 +#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6660 +#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6661 +#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6662 +#define SRC_REG_FIRSTFREE_RT_OFFSET 6663 +#define SRC_REG_FIRSTFREE_RT_SIZE 2 +#define SRC_REG_LASTFREE_RT_OFFSET 6665 +#define SRC_REG_LASTFREE_RT_SIZE 2 +#define SRC_REG_COUNTFREE_RT_OFFSET 6667 +#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6668 +#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6669 +#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6670 +#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6671 +#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6672 +#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6673 +#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6674 +#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6675 +#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6676 +#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6677 +#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6678 +#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6679 +#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6680 +#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6681 +#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6682 +#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6683 +#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6684 +#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6685 +#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6686 +#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6687 +#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6688 +#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6689 +#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6690 +#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6691 +#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6692 +#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6693 +#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6694 +#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6695 +#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6696 +#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6697 +#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6698 +#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6699 +#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6700 +#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6701 +#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000 +#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28701 +#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28702 +#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28703 +#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28704 +#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28705 +#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28706 +#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28707 +#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28708 +#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28709 +#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28710 +#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28711 +#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416 +#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29127 +#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512 +#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29639 +#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29640 +#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29641 +#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29642 +#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29643 +#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29644 +#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29645 +#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29646 +#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29647 +#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29648 +#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29649 +#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29650 +#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29651 +#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29652 +#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29653 +#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29654 +#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29655 +#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29656 +#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29657 +#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29658 +#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29659 +#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29660 +#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29661 +#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29662 +#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29663 +#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29664 +#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29665 +#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29666 +#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29667 +#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29668 +#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29669 +#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29670 +#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29671 +#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29672 +#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29673 +#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29674 +#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29675 +#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29676 +#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29677 +#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29678 +#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29679 +#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29680 +#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29681 +#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29682 +#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29683 +#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29684 +#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29685 +#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29686 +#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29687 +#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29688 +#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29689 +#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29690 +#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29691 +#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29692 +#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29693 +#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29694 +#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29695 +#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29696 +#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29697 +#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29698 +#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29699 +#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29700 +#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29701 +#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29702 +#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29703 +#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29704 +#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29705 +#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29706 +#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 +#define QM_REG_VOQCRDLINE_RT_OFFSET 29834 +#define QM_REG_VOQCRDLINE_RT_SIZE 20 +#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29854 +#define QM_REG_VOQINITCRDLINE_RT_SIZE 20 +#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29874 +#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29875 +#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29876 +#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29877 +#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29878 +#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29879 +#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29880 +#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29881 +#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29882 +#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29883 +#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29884 +#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29885 +#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29886 +#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29887 +#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29888 +#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29889 +#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29890 +#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29891 +#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29892 +#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29893 +#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29894 +#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29895 +#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29896 +#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29897 +#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29898 +#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29899 +#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29900 +#define QM_REG_PQTX2PF_0_RT_OFFSET 29901 +#define QM_REG_PQTX2PF_1_RT_OFFSET 29902 +#define QM_REG_PQTX2PF_2_RT_OFFSET 29903 +#define QM_REG_PQTX2PF_3_RT_OFFSET 29904 +#define QM_REG_PQTX2PF_4_RT_OFFSET 29905 +#define QM_REG_PQTX2PF_5_RT_OFFSET 29906 +#define QM_REG_PQTX2PF_6_RT_OFFSET 29907 +#define QM_REG_PQTX2PF_7_RT_OFFSET 29908 +#define QM_REG_PQTX2PF_8_RT_OFFSET 29909 +#define QM_REG_PQTX2PF_9_RT_OFFSET 29910 +#define QM_REG_PQTX2PF_10_RT_OFFSET 29911 +#define QM_REG_PQTX2PF_11_RT_OFFSET 29912 +#define QM_REG_PQTX2PF_12_RT_OFFSET 29913 +#define QM_REG_PQTX2PF_13_RT_OFFSET 29914 +#define QM_REG_PQTX2PF_14_RT_OFFSET 29915 +#define QM_REG_PQTX2PF_15_RT_OFFSET 29916 +#define QM_REG_PQTX2PF_16_RT_OFFSET 29917 +#define QM_REG_PQTX2PF_17_RT_OFFSET 29918 +#define QM_REG_PQTX2PF_18_RT_OFFSET 29919 +#define QM_REG_PQTX2PF_19_RT_OFFSET 29920 +#define QM_REG_PQTX2PF_20_RT_OFFSET 29921 +#define QM_REG_PQTX2PF_21_RT_OFFSET 29922 +#define QM_REG_PQTX2PF_22_RT_OFFSET 29923 +#define QM_REG_PQTX2PF_23_RT_OFFSET 29924 +#define QM_REG_PQTX2PF_24_RT_OFFSET 29925 +#define QM_REG_PQTX2PF_25_RT_OFFSET 29926 +#define QM_REG_PQTX2PF_26_RT_OFFSET 29927 +#define QM_REG_PQTX2PF_27_RT_OFFSET 29928 +#define QM_REG_PQTX2PF_28_RT_OFFSET 29929 +#define QM_REG_PQTX2PF_29_RT_OFFSET 29930 +#define QM_REG_PQTX2PF_30_RT_OFFSET 29931 +#define QM_REG_PQTX2PF_31_RT_OFFSET 29932 +#define QM_REG_PQTX2PF_32_RT_OFFSET 29933 +#define QM_REG_PQTX2PF_33_RT_OFFSET 29934 +#define QM_REG_PQTX2PF_34_RT_OFFSET 29935 +#define QM_REG_PQTX2PF_35_RT_OFFSET 29936 +#define QM_REG_PQTX2PF_36_RT_OFFSET 29937 +#define QM_REG_PQTX2PF_37_RT_OFFSET 29938 +#define QM_REG_PQTX2PF_38_RT_OFFSET 29939 +#define QM_REG_PQTX2PF_39_RT_OFFSET 29940 +#define QM_REG_PQTX2PF_40_RT_OFFSET 29941 +#define QM_REG_PQTX2PF_41_RT_OFFSET 29942 +#define QM_REG_PQTX2PF_42_RT_OFFSET 29943 +#define QM_REG_PQTX2PF_43_RT_OFFSET 29944 +#define QM_REG_PQTX2PF_44_RT_OFFSET 29945 +#define QM_REG_PQTX2PF_45_RT_OFFSET 29946 +#define QM_REG_PQTX2PF_46_RT_OFFSET 29947 +#define QM_REG_PQTX2PF_47_RT_OFFSET 29948 +#define QM_REG_PQTX2PF_48_RT_OFFSET 29949 +#define QM_REG_PQTX2PF_49_RT_OFFSET 29950 +#define QM_REG_PQTX2PF_50_RT_OFFSET 29951 +#define QM_REG_PQTX2PF_51_RT_OFFSET 29952 +#define QM_REG_PQTX2PF_52_RT_OFFSET 29953 +#define QM_REG_PQTX2PF_53_RT_OFFSET 29954 +#define QM_REG_PQTX2PF_54_RT_OFFSET 29955 +#define QM_REG_PQTX2PF_55_RT_OFFSET 29956 +#define QM_REG_PQTX2PF_56_RT_OFFSET 29957 +#define QM_REG_PQTX2PF_57_RT_OFFSET 29958 +#define QM_REG_PQTX2PF_58_RT_OFFSET 29959 +#define QM_REG_PQTX2PF_59_RT_OFFSET 29960 +#define QM_REG_PQTX2PF_60_RT_OFFSET 29961 +#define QM_REG_PQTX2PF_61_RT_OFFSET 29962 +#define QM_REG_PQTX2PF_62_RT_OFFSET 29963 +#define QM_REG_PQTX2PF_63_RT_OFFSET 29964 +#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29965 +#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29966 +#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29967 +#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29968 +#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29969 +#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29970 +#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29971 +#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29972 +#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29973 +#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29974 +#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29975 +#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29976 +#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29977 +#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29978 +#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29979 +#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29980 +#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29981 +#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29982 +#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29983 +#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29984 +#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29985 +#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29986 +#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29987 +#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29988 +#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29989 +#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29990 +#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29991 +#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29992 +#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29993 +#define QM_REG_RLGLBLINCVAL_RT_SIZE 256 +#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30249 +#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 +#define QM_REG_RLGLBLCRD_RT_OFFSET 30505 +#define QM_REG_RLGLBLCRD_RT_SIZE 256 +#define QM_REG_RLGLBLENABLE_RT_OFFSET 30761 +#define QM_REG_RLPFPERIOD_RT_OFFSET 30762 +#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30763 +#define QM_REG_RLPFINCVAL_RT_OFFSET 30764 +#define QM_REG_RLPFINCVAL_RT_SIZE 16 +#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30780 +#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 +#define QM_REG_RLPFCRD_RT_OFFSET 30796 +#define QM_REG_RLPFCRD_RT_SIZE 16 +#define QM_REG_RLPFENABLE_RT_OFFSET 30812 +#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30813 +#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30814 +#define QM_REG_WFQPFWEIGHT_RT_SIZE 16 +#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30830 +#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 +#define QM_REG_WFQPFCRD_RT_OFFSET 30846 +#define QM_REG_WFQPFCRD_RT_SIZE 160 +#define QM_REG_WFQPFENABLE_RT_OFFSET 31006 +#define QM_REG_WFQVPENABLE_RT_OFFSET 31007 +#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31008 +#define QM_REG_BASEADDRTXPQ_RT_SIZE 512 +#define QM_REG_TXPQMAP_RT_OFFSET 31520 +#define QM_REG_TXPQMAP_RT_SIZE 512 +#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32032 +#define QM_REG_WFQVPWEIGHT_RT_SIZE 512 +#define QM_REG_WFQVPUPPERBOUND_RT_OFFSET 32544 +#define QM_REG_WFQVPUPPERBOUND_RT_SIZE 512 +#define QM_REG_WFQVPCRD_RT_OFFSET 33056 +#define QM_REG_WFQVPCRD_RT_SIZE 512 +#define QM_REG_WFQVPMAP_RT_OFFSET 33568 +#define QM_REG_WFQVPMAP_RT_SIZE 512 +#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34080 +#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160 +#define NIG_REG_LLH_CLS_TYPE_DUALMODE_RT_OFFSET 34240 +#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34241 +#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34242 +#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34243 +#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34244 +#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34245 +#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34246 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34247 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34251 +#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34255 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34259 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34260 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34292 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34308 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34324 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34340 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 +#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34356 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34357 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34358 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34359 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34360 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34361 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34362 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34363 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34364 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34365 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34366 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34367 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34368 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34369 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34370 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34371 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34372 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34373 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34374 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34375 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34376 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34377 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34378 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34379 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34380 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34381 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34382 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34383 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34384 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34385 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34386 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34387 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34388 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34389 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34390 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34391 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34392 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34393 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34394 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34395 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34396 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34397 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34398 +#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34399 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34400 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34401 +#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34402 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34403 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34404 +#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34405 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34406 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34407 +#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34408 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34409 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34410 +#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34411 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34412 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34413 +#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34414 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34415 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34416 +#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34417 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34418 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34419 +#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34420 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34421 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34422 +#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34423 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34424 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34425 +#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34426 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34427 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34428 +#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34429 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34430 +#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34431 + +#define RUNTIME_ARRAY_SIZE 34432 + +/* The eth storm context for the Ystorm */ +struct ystorm_eth_conn_st_ctx { + __le32 reserved[4]; +}; + +/* The eth storm context for the Pstorm */ +struct pstorm_eth_conn_st_ctx { + __le32 reserved[8]; +}; + +/* The eth storm context for the Xstorm */ +struct xstorm_eth_conn_st_ctx { + __le32 reserved[60]; +}; + +struct xstorm_eth_conn_ag_ctx { + u8 reserved0 /* cdu_validation */; + u8 eth_state /* state */; + u8 flags0; +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1 /* bit4 */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1 /* bit10 */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */ +#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */ +#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */ +#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 /* bit14 */ +#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 /* bit15 */ +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 + u8 flags2; +#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ +#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ +#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ +#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ +#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ +#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ +#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ +#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ +#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ +#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ +#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */ +#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */ +#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */ +#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */ +#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */ +#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 /* cf16 */ +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */ +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */ +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 + u8 flags7; +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */ +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */ +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ +#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ +#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ +#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ +#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ +#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ +#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ +#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */ +#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */ +#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */ +#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1 /* cf14en */ +#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */ +#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 /* cf16en */ +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 + u8 flags10; +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 /* cf18en */ +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 /* cf19en */ +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */ +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1 /* cf21en */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */ +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 /* cf23en */ +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1 /* rule0en */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1 /* rule1en */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7 + u8 flags11; +#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1 /* rule2en */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1 /* rule3en */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 /* rule4en */ +#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ +#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */ +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */ +#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1 /* rule10en */ +#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */ +#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */ +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */ +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1 /* rule14en */ +#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */ +#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */ +#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */ +#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1 /* rule18en */ +#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1 /* rule19en */ +#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 /* rule20en */ +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 /* rule21en */ +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */ +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 /* rule23en */ +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */ +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */ +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 /* bit16 */ +#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 /* bit17 */ +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 /* bit18 */ +#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 /* bit19 */ +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 /* bit20 */ +#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 /* bit21 */ +#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 /* cf23 */ +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 + u8 edpm_event_id /* byte2 */; + __le16 physical_q0 /* physical_q0 */; + __le16 word1 /* physical_q1 */; + __le16 edpm_num_bds /* physical_q2 */; + __le16 tx_bd_cons /* word3 */; + __le16 tx_bd_prod /* word4 */; + __le16 go_to_bd_cons /* word5 */; + __le16 conn_dpi /* conn_dpi */; + u8 byte3 /* byte3 */; + u8 byte4 /* byte4 */; + u8 byte5 /* byte5 */; + u8 byte6 /* byte6 */; + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le32 reg4 /* reg4 */; + __le32 reg5 /* cf_array0 */; + __le32 reg6 /* cf_array1 */; + __le16 word7 /* word7 */; + __le16 word8 /* word8 */; + __le16 word9 /* word9 */; + __le16 word10 /* word10 */; + __le32 reg7 /* reg7 */; + __le32 reg8 /* reg8 */; + __le32 reg9 /* reg9 */; + u8 byte7 /* byte7 */; + u8 byte8 /* byte8 */; + u8 byte9 /* byte9 */; + u8 byte10 /* byte10 */; + u8 byte11 /* byte11 */; + u8 byte12 /* byte12 */; + u8 byte13 /* byte13 */; + u8 byte14 /* byte14 */; + u8 byte15 /* byte15 */; + u8 byte16 /* byte16 */; + __le16 word11 /* word11 */; + __le32 reg10 /* reg10 */; + __le32 reg11 /* reg11 */; + __le32 reg12 /* reg12 */; + __le32 reg13 /* reg13 */; + __le32 reg14 /* reg14 */; + __le32 reg15 /* reg15 */; + __le32 reg16 /* reg16 */; + __le32 reg17 /* reg17 */; + __le32 reg18 /* reg18 */; + __le32 reg19 /* reg19 */; + __le16 word12 /* word12 */; + __le16 word13 /* word13 */; + __le16 word14 /* word14 */; + __le16 word15 /* word15 */; +}; + +/* The eth storm context for the Tstorm */ +struct tstorm_eth_conn_st_ctx { + __le32 reserved[4]; +}; + +/* The eth storm context for the Mstorm */ +struct mstorm_eth_conn_st_ctx { + __le32 reserved[8]; +}; + +/* The eth storm context for the Ustorm */ +struct ustorm_eth_conn_st_ctx { + __le32 reserved[40]; +}; + +/* eth connection context */ +struct eth_conn_context { + struct ystorm_eth_conn_st_ctx ystorm_st_context; + struct regpair ystorm_st_padding[2] /* padding */; + struct pstorm_eth_conn_st_ctx pstorm_st_context; + struct regpair pstorm_st_padding[2] /* padding */; + struct xstorm_eth_conn_st_ctx xstorm_st_context; + struct xstorm_eth_conn_ag_ctx xstorm_ag_context; + struct tstorm_eth_conn_st_ctx tstorm_st_context; + struct regpair tstorm_st_padding[2] /* padding */; + struct mstorm_eth_conn_st_ctx mstorm_st_context; + struct ustorm_eth_conn_st_ctx ustorm_st_context; +}; + +enum eth_filter_action { + ETH_FILTER_ACTION_REMOVE, + ETH_FILTER_ACTION_ADD, + ETH_FILTER_ACTION_REPLACE, + MAX_ETH_FILTER_ACTION +}; + +struct eth_filter_cmd { + u8 type /* Filter Type (MAC/VLAN/Pair/VNI) */; + u8 vport_id /* the vport id */; + u8 action /* filter command action: add/remove/replace */; + u8 reserved0; + __le32 vni; + __le16 mac_lsb; + __le16 mac_mid; + __le16 mac_msb; + __le16 vlan_id; +}; + +struct eth_filter_cmd_header { + u8 rx; + u8 tx; + u8 cmd_cnt; + u8 assert_on_error; + u8 reserved1[4]; +}; + +enum eth_filter_type { + ETH_FILTER_TYPE_MAC, + ETH_FILTER_TYPE_VLAN, + ETH_FILTER_TYPE_PAIR, + ETH_FILTER_TYPE_INNER_MAC, + ETH_FILTER_TYPE_INNER_VLAN, + ETH_FILTER_TYPE_INNER_PAIR, + ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR, + ETH_FILTER_TYPE_MAC_VNI_PAIR, + ETH_FILTER_TYPE_VNI, + MAX_ETH_FILTER_TYPE +}; + +enum eth_ramrod_cmd_id { + ETH_RAMROD_UNUSED, + ETH_RAMROD_VPORT_START /* VPort Start Ramrod */, + ETH_RAMROD_VPORT_UPDATE /* VPort Update Ramrod */, + ETH_RAMROD_VPORT_STOP /* VPort Stop Ramrod */, + ETH_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */, + ETH_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */, + ETH_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */, + ETH_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */, + ETH_RAMROD_FILTERS_UPDATE /* Add or Remove Mac/Vlan/Pair filters */, + ETH_RAMROD_RX_QUEUE_UPDATE /* RX Queue Update Ramrod */, + ETH_RAMROD_RESERVED, + ETH_RAMROD_RESERVED2, + ETH_RAMROD_RESERVED3, + ETH_RAMROD_RESERVED4, + ETH_RAMROD_RESERVED5, + ETH_RAMROD_RESERVED6, + ETH_RAMROD_RESERVED7, + ETH_RAMROD_RESERVED8, + MAX_ETH_RAMROD_CMD_ID +}; + +struct eth_vport_rss_config { + __le16 capabilities; +#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0 +#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1 +#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2 +#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3 +#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4 +#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5 +#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6 +#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_SHIFT 7 +#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_SHIFT 8 +#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x7F +#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 9 + u8 rss_id; + u8 rss_mode; + u8 update_rss_key; + u8 update_rss_ind_table; + u8 update_rss_capabilities; + u8 tbl_size; + __le32 reserved2[2]; + __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM]; + __le32 rss_key[ETH_RSS_KEY_SIZE_REGS]; + __le32 reserved3[2]; +}; + +enum eth_vport_rss_mode { + ETH_VPORT_RSS_MODE_DISABLED, + ETH_VPORT_RSS_MODE_REGULAR, + MAX_ETH_VPORT_RSS_MODE +}; + +struct eth_vport_rx_mode { + __le16 state; +#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0 +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1 +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1 +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2 +#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3 +#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4 +#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5 +#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF +#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6 + __le16 reserved2[3]; +}; + +struct eth_vport_tpa_param { + u64 reserved[2]; +}; + +struct eth_vport_tx_mode { + __le16 state; +#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0 +#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1 +#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2 +#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3 +#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4 +#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF +#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5 + __le16 reserved2[3]; +}; + +struct rx_queue_start_ramrod_data { + __le16 rx_queue_id; + __le16 num_of_pbl_pages; + __le16 bd_max_bytes; + __le16 sb_id; + u8 sb_index; + u8 vport_id; + u8 default_rss_queue_flg; + u8 complete_cqe_flg; + u8 complete_event_flg; + u8 stats_counter_id; + u8 pin_context; + u8 pxp_tph_valid_bd; + u8 pxp_tph_valid_pkt; + u8 pxp_st_hint; + __le16 pxp_st_index; + u8 reserved[4]; + struct regpair cqe_pbl_addr; + struct regpair bd_base; + struct regpair sge_base; +}; + +struct rx_queue_stop_ramrod_data { + __le16 rx_queue_id; + u8 complete_cqe_flg; + u8 complete_event_flg; + u8 vport_id; + u8 reserved[3]; +}; + +struct rx_queue_update_ramrod_data { + __le16 rx_queue_id; + u8 complete_cqe_flg; + u8 complete_event_flg; + u8 init_sge_ring_flg; + u8 vport_id; + u8 pxp_tph_valid_sge; + u8 pxp_st_hint; + __le16 pxp_st_index; + u8 reserved[6]; + struct regpair sge_base; +}; + +struct tx_queue_start_ramrod_data { + __le16 sb_id; + u8 sb_index; + u8 vport_id; + u8 tc; + u8 stats_counter_id; + __le16 qm_pq_id; + u8 flags; +#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0 +#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1 +#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2 +#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_MASK 0x1F +#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_SHIFT 3 + u8 pin_context; + u8 pxp_tph_valid_bd; + u8 pxp_tph_valid_pkt; + __le16 pxp_st_index; + u8 pxp_st_hint; + u8 reserved1[3]; + __le16 queue_zone_id; + __le16 test_dup_count; + __le16 pbl_size; + struct regpair pbl_base_addr; +}; + +struct tx_queue_stop_ramrod_data { + __le16 reserved[4]; +}; + +struct vport_filter_update_ramrod_data { + struct eth_filter_cmd_header filter_cmd_hdr; + struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT]; +}; + +struct vport_start_ramrod_data { + u8 vport_id; + u8 sw_fid; + __le16 mtu; + u8 drop_ttl0_en; + u8 inner_vlan_removal_en; + struct eth_vport_rx_mode rx_mode; + struct eth_vport_tx_mode tx_mode; + struct eth_vport_tpa_param tpa_param; + __le16 sge_buff_size; + u8 max_sges_num; + u8 tx_switching_en; + u8 anti_spoofing_en; + u8 default_vlan_en; + u8 handle_ptp_pkts; + u8 silent_vlan_removal_en; + __le16 default_vlan; + u8 untagged; + u8 reserved[7]; +}; + +struct vport_stop_ramrod_data { + u8 vport_id; + u8 reserved[7]; +}; + +struct vport_update_ramrod_data_cmn { + u8 vport_id; + u8 update_rx_active_flg; + u8 rx_active_flg; + u8 update_tx_active_flg; + u8 tx_active_flg; + u8 update_rx_mode_flg; + u8 update_tx_mode_flg; + u8 update_approx_mcast_flg; + u8 update_rss_flg; + u8 update_inner_vlan_removal_en_flg; + u8 inner_vlan_removal_en; + u8 update_tpa_param_flg; + u8 update_tpa_en_flg; + u8 update_sge_param_flg; + __le16 sge_buff_size; + u8 max_sges_num; + u8 update_tx_switching_en_flg; + u8 tx_switching_en; + u8 update_anti_spoofing_en_flg; + u8 anti_spoofing_en; + u8 update_handle_ptp_pkts; + u8 handle_ptp_pkts; + u8 update_default_vlan_en_flg; + u8 default_vlan_en; + u8 update_default_vlan_flg; + __le16 default_vlan; + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan; + u8 silent_vlan_removal_en; + u8 reserved; +}; + +struct vport_update_ramrod_mcast { + __le32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; +}; + +struct vport_update_ramrod_data { + struct vport_update_ramrod_data_cmn common; + struct eth_vport_rx_mode rx_mode; + struct eth_vport_tx_mode tx_mode; + struct eth_vport_tpa_param tpa_param; + struct vport_update_ramrod_mcast approx_mcast; + struct eth_vport_rss_config rss_config; +}; + +struct mstorm_eth_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */ +#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */ +#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */ +#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */ +#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0 /* word0 */; + __le16 word1 /* word1 */; + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; +}; + +struct tstorm_eth_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ +#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ +#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ +#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ +#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ +#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ +#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ +#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ +#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ +#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ +#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ +#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7 + u8 flags4; +#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ +#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ +#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ +#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ +#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ +#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ +#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ +#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */ +#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le32 reg4 /* reg4 */; + __le32 reg5 /* reg5 */; + __le32 reg6 /* reg6 */; + __le32 reg7 /* reg7 */; + __le32 reg8 /* reg8 */; + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 rx_bd_cons /* word0 */; + u8 byte4 /* byte4 */; + u8 byte5 /* byte5 */; + __le16 rx_bd_prod /* word1 */; + __le16 word2 /* conn_dpi */; + __le16 word3 /* word3 */; + __le32 reg9 /* reg9 */; + __le32 reg10 /* reg10 */; +}; + +struct ustorm_eth_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ +#define USTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ +#define USTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ +#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 /* cf4 */ +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 /* cf5 */ +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf6 */ +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6 + u8 flags2; +#define USTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define USTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define USTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 /* cf4en */ +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 /* cf5en */ +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf6en */ +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ +#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ +#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 word0 /* conn_dpi */; + __le16 tx_bd_cons /* word1 */; + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le16 tx_drv_bd_cons /* word2 */; + __le16 rx_drv_cqe_cons /* word3 */; +}; + +struct xstorm_eth_hw_conn_ag_ctx { + u8 reserved0 /* cdu_validation */; + u8 eth_state /* state */; + u8 flags0; +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 + u8 flags2; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 + u8 flags7; +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 + u8 flags10; +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7 + u8 flags11; +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 + u8 edpm_event_id /* byte2 */; + __le16 physical_q0 /* physical_q0 */; + __le16 word1 /* physical_q1 */; + __le16 edpm_num_bds /* physical_q2 */; + __le16 tx_bd_cons /* word3 */; + __le16 tx_bd_prod /* word4 */; + __le16 go_to_bd_cons /* word5 */; + __le16 conn_dpi /* conn_dpi */; +}; + +#define VF_MAX_STATIC 192 /* In case of K2 */ + +#define MCP_GLOB_PATH_MAX 2 +#define MCP_PORT_MAX 2 /* Global */ +#define MCP_GLOB_PORT_MAX 4 /* Global */ +#define MCP_GLOB_FUNC_MAX 16 /* Global */ + +typedef u32 offsize_t; /* In DWORDS !!! */ +/* Offset from the beginning of the MCP scratchpad */ +#define OFFSIZE_OFFSET_SHIFT 0 +#define OFFSIZE_OFFSET_MASK 0x0000ffff +/* Size of specific element (not the whole array if any) */ +#define OFFSIZE_SIZE_SHIFT 16 +#define OFFSIZE_SIZE_MASK 0xffff0000 + +/* SECTION_OFFSET is calculating the offset in bytes out of offsize */ +#define SECTION_OFFSET(_offsize) ((((_offsize & \ + OFFSIZE_OFFSET_MASK) >> \ + OFFSIZE_OFFSET_SHIFT) << 2)) + +/* QED_SECTION_SIZE is calculating the size in bytes out of offsize */ +#define QED_SECTION_SIZE(_offsize) (((_offsize & \ + OFFSIZE_SIZE_MASK) >> \ + OFFSIZE_SIZE_SHIFT) << 2) + +/* SECTION_ADDR returns the GRC addr of a section, given offsize and index + * within section. + */ +#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \ + SECTION_OFFSET(_offsize) + \ + (QED_SECTION_SIZE(_offsize) * idx)) + +/* SECTION_OFFSIZE_ADDR returns the GRC addr to the offsize address. + * Use offsetof, since the OFFSETUP collide with the firmware definition + */ +#define SECTION_OFFSIZE_ADDR(_pub_base, _section) (_pub_base + \ + offsetof(struct \ + mcp_public_data, \ + sections[_section])) +/* PHY configuration */ +struct pmm_phy_cfg { + u32 speed; +#define PMM_SPEED_AUTONEG 0 + + u32 pause; /* bitmask */ +#define PMM_PAUSE_NONE 0x0 +#define PMM_PAUSE_AUTONEG 0x1 +#define PMM_PAUSE_RX 0x2 +#define PMM_PAUSE_TX 0x4 + + u32 adv_speed; /* Default should be the speed_cap_mask */ + u32 loopback_mode; +#define PMM_LOOPBACK_NONE 0 +#define PMM_LOOPBACK_INT_PHY 1 +#define PMM_LOOPBACK_EXT_PHY 2 +#define PMM_LOOPBACK_EXT 3 +#define PMM_LOOPBACK_MAC 4 + + /* features */ + u32 feature_config_flags; +}; + +struct port_mf_cfg { + u32 dynamic_cfg; /* device control channel */ +#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff +#define PORT_MF_CFG_OV_TAG_SHIFT 0 +#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK + + u32 reserved[1]; +}; + +/* DO NOT add new fields in the middle + * MUST be synced with struct pmm_stats_map + */ +struct pmm_stats { + u64 r64; /* 0x00 (Offset 0x00 ) RX 64-byte frame counter*/ + u64 r127; /* 0x01 (Offset 0x08 ) RX 65 to 127 byte frame counter*/ + u64 r255; + u64 r511; + u64 r1023; + u64 r1518; + u64 r1522; + u64 r2047; + u64 r4095; + u64 r9216; + u64 r16383; + u64 rfcs; /* 0x0F (Offset 0x58 ) RX FCS error frame counter*/ + u64 rxcf; /* 0x10 (Offset 0x60 ) RX control frame counter*/ + u64 rxpf; /* 0x11 (Offset 0x68 ) RX pause frame counter*/ + u64 rxpp; /* 0x12 (Offset 0x70 ) RX PFC frame counter*/ + u64 raln; /* 0x16 (Offset 0x78 ) RX alignment error counter*/ + u64 rfcr; /* 0x19 (Offset 0x80 ) RX false carrier counter */ + u64 rovr; /* 0x1A (Offset 0x88 ) RX oversized frame counter*/ + u64 rjbr; /* 0x1B (Offset 0x90 ) RX jabber frame counter */ + u64 rund; /* 0x34 (Offset 0x98 ) RX undersized frame counter */ + u64 rfrg; /* 0x35 (Offset 0xa0 ) RX fragment counter */ + u64 t64; /* 0x40 (Offset 0xa8 ) TX 64-byte frame counter */ + u64 t127; + u64 t255; + u64 t511; + u64 t1023; + u64 t1518; + u64 t2047; + u64 t4095; + u64 t9216; + u64 t16383; + u64 txpf; /* 0x50 (Offset 0xf8 ) TX pause frame counter */ + u64 txpp; /* 0x51 (Offset 0x100) TX PFC frame counter */ + u64 tlpiec; + u64 tncl; + u64 rbyte; /* 0x3d (Offset 0x118) RX byte counter */ + u64 rxuca; /* 0x0c (Offset 0x120) RX UC frame counter */ + u64 rxmca; /* 0x0d (Offset 0x128) RX MC frame counter */ + u64 rxbca; /* 0x0e (Offset 0x130) RX BC frame counter */ + u64 rxpok; + u64 tbyte; /* 0x6f (Offset 0x140) TX byte counter */ + u64 txuca; /* 0x4d (Offset 0x148) TX UC frame counter */ + u64 txmca; /* 0x4e (Offset 0x150) TX MC frame counter */ + u64 txbca; /* 0x4f (Offset 0x158) TX BC frame counter */ + u64 txcf; /* 0x54 (Offset 0x160) TX control frame counter */ +}; + +struct brb_stats { + u64 brb_truncate[8]; + u64 brb_discard[8]; +}; + +struct port_stats { + struct brb_stats brb; + struct pmm_stats pmm; +}; + +#define CMT_TEAM0 0 +#define CMT_TEAM1 1 +#define CMT_TEAM_MAX 2 + +struct couple_mode_teaming { + u8 port_cmt[MCP_GLOB_PORT_MAX]; +#define PORT_CMT_IN_TEAM BIT(0) + +#define PORT_CMT_PORT_ROLE BIT(1) +#define PORT_CMT_PORT_INACTIVE (0 << 1) +#define PORT_CMT_PORT_ACTIVE BIT(1) + +#define PORT_CMT_TEAM_MASK BIT(2) +#define PORT_CMT_TEAM0 (0 << 2) +#define PORT_CMT_TEAM1 BIT(2) +}; + +/************************************** +* LLDP and DCBX HSI structures +**************************************/ +#define LLDP_CHASSIS_ID_STAT_LEN 4 +#define LLDP_PORT_ID_STAT_LEN 4 +#define DCBX_MAX_APP_PROTOCOL 32 +#define MAX_SYSTEM_LLDP_TLV_DATA 32 + +enum lldp_agent_e { + LLDP_NEAREST_BRIDGE = 0, + LLDP_NEAREST_NON_TPMR_BRIDGE, + LLDP_NEAREST_CUSTOMER_BRIDGE, + LLDP_MAX_LLDP_AGENTS +}; + +struct lldp_config_params_s { + u32 config; +#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff +#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0 +#define LLDP_CONFIG_HOLD_MASK 0x00000f00 +#define LLDP_CONFIG_HOLD_SHIFT 8 +#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000 +#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12 +#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000 +#define LLDP_CONFIG_ENABLE_RX_SHIFT 30 +#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000 +#define LLDP_CONFIG_ENABLE_TX_SHIFT 31 + u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; + u32 local_port_id[LLDP_PORT_ID_STAT_LEN]; +}; + +struct lldp_status_params_s { + u32 prefix_seq_num; + u32 status; /* TBD */ + + /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */ + u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; + + /* Holds remote Port ID TLV header, subtype and 9B of payload. */ + u32 peer_port_id[LLDP_PORT_ID_STAT_LEN]; + u32 suffix_seq_num; +}; + +struct dcbx_ets_feature { + u32 flags; +#define DCBX_ETS_ENABLED_MASK 0x00000001 +#define DCBX_ETS_ENABLED_SHIFT 0 +#define DCBX_ETS_WILLING_MASK 0x00000002 +#define DCBX_ETS_WILLING_SHIFT 1 +#define DCBX_ETS_ERROR_MASK 0x00000004 +#define DCBX_ETS_ERROR_SHIFT 2 +#define DCBX_ETS_CBS_MASK 0x00000008 +#define DCBX_ETS_CBS_SHIFT 3 +#define DCBX_ETS_MAX_TCS_MASK 0x000000f0 +#define DCBX_ETS_MAX_TCS_SHIFT 4 + u32 pri_tc_tbl[1]; +#define DCBX_ISCSI_OOO_TC 4 +#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_ISCSI_OOO_TC + 1) + u32 tc_bw_tbl[2]; + u32 tc_tsa_tbl[2]; +#define DCBX_ETS_TSA_STRICT 0 +#define DCBX_ETS_TSA_CBS 1 +#define DCBX_ETS_TSA_ETS 2 +}; + +struct dcbx_app_priority_entry { + u32 entry; +#define DCBX_APP_PRI_MAP_MASK 0x000000ff +#define DCBX_APP_PRI_MAP_SHIFT 0 +#define DCBX_APP_PRI_0 0x01 +#define DCBX_APP_PRI_1 0x02 +#define DCBX_APP_PRI_2 0x04 +#define DCBX_APP_PRI_3 0x08 +#define DCBX_APP_PRI_4 0x10 +#define DCBX_APP_PRI_5 0x20 +#define DCBX_APP_PRI_6 0x40 +#define DCBX_APP_PRI_7 0x80 +#define DCBX_APP_SF_MASK 0x00000300 +#define DCBX_APP_SF_SHIFT 8 +#define DCBX_APP_SF_ETHTYPE 0 +#define DCBX_APP_SF_PORT 1 +#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 +#define DCBX_APP_PROTOCOL_ID_SHIFT 16 +}; + +/* FW structure in BE */ +struct dcbx_app_priority_feature { + u32 flags; +#define DCBX_APP_ENABLED_MASK 0x00000001 +#define DCBX_APP_ENABLED_SHIFT 0 +#define DCBX_APP_WILLING_MASK 0x00000002 +#define DCBX_APP_WILLING_SHIFT 1 +#define DCBX_APP_ERROR_MASK 0x00000004 +#define DCBX_APP_ERROR_SHIFT 2 +/* Not in use + * #define DCBX_APP_DEFAULT_PRI_MASK 0x00000f00 + * #define DCBX_APP_DEFAULT_PRI_SHIFT 8 + */ +#define DCBX_APP_MAX_TCS_MASK 0x0000f000 +#define DCBX_APP_MAX_TCS_SHIFT 12 +#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000 +#define DCBX_APP_NUM_ENTRIES_SHIFT 16 + struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL]; +}; + +/* FW structure in BE */ +struct dcbx_features { + /* PG feature */ + struct dcbx_ets_feature ets; + + /* PFC feature */ + u32 pfc; +#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff +#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80 + +#define DCBX_PFC_FLAGS_MASK 0x0000ff00 +#define DCBX_PFC_FLAGS_SHIFT 8 +#define DCBX_PFC_CAPS_MASK 0x00000f00 +#define DCBX_PFC_CAPS_SHIFT 8 +#define DCBX_PFC_MBC_MASK 0x00004000 +#define DCBX_PFC_MBC_SHIFT 14 +#define DCBX_PFC_WILLING_MASK 0x00008000 +#define DCBX_PFC_WILLING_SHIFT 15 +#define DCBX_PFC_ENABLED_MASK 0x00010000 +#define DCBX_PFC_ENABLED_SHIFT 16 +#define DCBX_PFC_ERROR_MASK 0x00020000 +#define DCBX_PFC_ERROR_SHIFT 17 + + /* APP feature */ + struct dcbx_app_priority_feature app; +}; + +struct dcbx_local_params { + u32 config; +#define DCBX_CONFIG_VERSION_MASK 0x00000003 +#define DCBX_CONFIG_VERSION_SHIFT 0 +#define DCBX_CONFIG_VERSION_DISABLED 0 +#define DCBX_CONFIG_VERSION_IEEE 1 +#define DCBX_CONFIG_VERSION_CEE 2 + + u32 flags; + struct dcbx_features features; +}; + +struct dcbx_mib { + u32 prefix_seq_num; + u32 flags; + struct dcbx_features features; + u32 suffix_seq_num; +}; + +struct lldp_system_tlvs_buffer_s { + u16 valid; + u16 length; + u32 data[MAX_SYSTEM_LLDP_TLV_DATA]; +}; + +/**************************************/ +/* */ +/* P U B L I C G L O B A L */ +/* */ +/**************************************/ +struct public_global { + u32 max_path; +#define MAX_PATH_BIG_BEAR 2 +#define MAX_PATH_K2 1 + u32 max_ports; +#define MODE_1P 1 +#define MODE_2P 2 +#define MODE_3P 3 +#define MODE_4P 4 + u32 debug_mb_offset; + u32 phymod_dbg_mb_offset; + struct couple_mode_teaming cmt; + s32 internal_temperature; + u32 mfw_ver; + u32 running_bundle_id; +}; + +/**************************************/ +/* */ +/* P U B L I C P A T H */ +/* */ +/**************************************/ + +/**************************************************************************** +* Shared Memory 2 Region * +****************************************************************************/ +/* The fw_flr_ack is actually built in the following way: */ +/* 8 bit: PF ack */ +/* 128 bit: VF ack */ +/* 8 bit: ios_dis_ack */ +/* In order to maintain endianity in the mailbox hsi, we want to keep using */ +/* u32. The fw must have the VF right after the PF since this is how it */ +/* access arrays(it expects always the VF to reside after the PF, and that */ +/* makes the calculation much easier for it. ) */ +/* In order to answer both limitations, and keep the struct small, the code */ +/* will abuse the structure defined here to achieve the actual partition */ +/* above */ +/****************************************************************************/ +struct fw_flr_mb { + u32 aggint; + u32 opgen_addr; + u32 accum_ack; /* 0..15:PF, 16..207:VF, 256..271:IOV_DIS */ +#define ACCUM_ACK_PF_BASE 0 +#define ACCUM_ACK_PF_SHIFT 0 + +#define ACCUM_ACK_VF_BASE 8 +#define ACCUM_ACK_VF_SHIFT 3 + +#define ACCUM_ACK_IOV_DIS_BASE 256 +#define ACCUM_ACK_IOV_DIS_SHIFT 8 +}; + +struct public_path { + struct fw_flr_mb flr_mb; + u32 mcp_vf_disabled[VF_MAX_STATIC / 32]; + + u32 process_kill; +#define PROCESS_KILL_COUNTER_MASK 0x0000ffff +#define PROCESS_KILL_COUNTER_SHIFT 0 +#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000 +#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16 +#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit) +}; + +/**************************************/ +/* */ +/* P U B L I C P O R T */ +/* */ +/**************************************/ + +/**************************************************************************** +* Driver <-> FW Mailbox * +****************************************************************************/ + +struct public_port { + u32 validity_map; /* 0x0 (4*2 = 0x8) */ + + /* validity bits */ +#define MCP_VALIDITY_PCI_CFG 0x00100000 +#define MCP_VALIDITY_MB 0x00200000 +#define MCP_VALIDITY_DEV_INFO 0x00400000 +#define MCP_VALIDITY_RESERVED 0x00000007 + + /* One licensing bit should be set */ +#define MCP_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038 +#define MCP_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008 +#define MCP_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010 +#define MCP_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020 + + /* Active MFW */ +#define MCP_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000 +#define MCP_VALIDITY_ACTIVE_MFW_MASK 0x000001c0 +#define MCP_VALIDITY_ACTIVE_MFW_NCSI 0x00000040 +#define MCP_VALIDITY_ACTIVE_MFW_NONE 0x000001c0 + + u32 link_status; +#define LINK_STATUS_LINK_UP \ + 0x00000001 +#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e +#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD BIT(1) +#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1) + +#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 + +#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 +#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 + +#define LINK_STATUS_PFC_ENABLED \ + 0x00000100 +#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 +#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 +#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800 +#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000 +#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000 +#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000 +#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000 +#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000 + +#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000 +#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18) +#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE BIT(18) +#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18) +#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18) + +#define LINK_STATUS_SFP_TX_FAULT \ + 0x00100000 +#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000 +#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000 + + u32 link_status1; + u32 ext_phy_fw_version; + u32 drv_phy_cfg_addr; + + u32 port_stx; + + u32 stat_nig_timer; + + struct port_mf_cfg port_mf_config; + struct port_stats stats; + + u32 media_type; +#define MEDIA_UNSPECIFIED 0x0 +#define MEDIA_SFPP_10G_FIBER 0x1 +#define MEDIA_XFP_FIBER 0x2 +#define MEDIA_DA_TWINAX 0x3 +#define MEDIA_BASE_T 0x4 +#define MEDIA_SFP_1G_FIBER 0x5 +#define MEDIA_KR 0xf0 +#define MEDIA_NOT_PRESENT 0xff + + u32 lfa_status; +#define LFA_LINK_FLAP_REASON_OFFSET 0 +#define LFA_LINK_FLAP_REASON_MASK 0x000000ff +#define LFA_NO_REASON (0 << 0) +#define LFA_LINK_DOWN BIT(0) +#define LFA_FORCE_INIT BIT(1) +#define LFA_LOOPBACK_MISMATCH BIT(2) +#define LFA_SPEED_MISMATCH BIT(3) +#define LFA_FLOW_CTRL_MISMATCH BIT(4) +#define LFA_ADV_SPEED_MISMATCH BIT(5) +#define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8 +#define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00 +#define LINK_FLAP_COUNT_OFFSET 16 +#define LINK_FLAP_COUNT_MASK 0x00ff0000 + + u32 link_change_count; + + /* LLDP params */ + struct lldp_config_params_s lldp_config_params[ + LLDP_MAX_LLDP_AGENTS]; + struct lldp_status_params_s lldp_status_params[ + LLDP_MAX_LLDP_AGENTS]; + struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf; + + /* DCBX related MIB */ + struct dcbx_local_params local_admin_dcbx_mib; + struct dcbx_mib remote_dcbx_mib; + struct dcbx_mib operational_dcbx_mib; +}; + +/**************************************/ +/* */ +/* P U B L I C F U N C */ +/* */ +/**************************************/ + +struct public_func { + u32 iscsi_boot_signature; + u32 iscsi_boot_block_offset; + + u32 reserved[8]; + + u32 config; + + /* E/R/I/D */ + /* function 0 of each port cannot be hidden */ +#define FUNC_MF_CFG_FUNC_HIDE 0x00000001 +#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002 +#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001 + +#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0 +#define FUNC_MF_CFG_PROTOCOL_SHIFT 4 +#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000 +#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010 +#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020 +#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030 +#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030 + + /* MINBW, MAXBW */ + /* value range - 0..100, increments in 1 % */ +#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00 +#define FUNC_MF_CFG_MIN_BW_SHIFT 8 +#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 +#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000 +#define FUNC_MF_CFG_MAX_BW_SHIFT 16 +#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000 + + u32 status; +#define FUNC_STATUS_VLINK_DOWN 0x00000001 + + u32 mac_upper; /* MAC */ +#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff +#define FUNC_MF_CFG_UPPERMAC_SHIFT 0 +#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK + u32 mac_lower; +#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff + + u32 fcoe_wwn_port_name_upper; + u32 fcoe_wwn_port_name_lower; + + u32 fcoe_wwn_node_name_upper; + u32 fcoe_wwn_node_name_lower; + + u32 ovlan_stag; /* tags */ +#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff +#define FUNC_MF_CFG_OV_STAG_SHIFT 0 +#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK + + u32 pf_allocation; /* vf per pf */ + + u32 preserve_data; /* Will be used bt CCM */ + + u32 driver_last_activity_ts; + + u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; /* 0x0044 */ + + u32 drv_id; +#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff +#define DRV_ID_PDA_COMP_VER_SHIFT 0 + +#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000 +#define DRV_ID_MCP_HSI_VER_SHIFT 16 +#define DRV_ID_MCP_HSI_VER_CURRENT BIT(DRV_ID_MCP_HSI_VER_SHIFT) + +#define DRV_ID_DRV_TYPE_MASK 0xff000000 +#define DRV_ID_DRV_TYPE_SHIFT 24 +#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_LINUX BIT(DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_SOLARIS (5 << DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_SHIFT) +}; + +/**************************************/ +/* */ +/* P U B L I C M B */ +/* */ +/**************************************/ +/* This is the only section that the driver can write to, and each */ +/* Basically each driver request to set feature parameters, + * will be done using a different command, which will be linked + * to a specific data structure from the union below. + * For huge strucuture, the common blank structure should be used. + */ + +struct mcp_mac { + u32 mac_upper; /* Upper 16 bits are always zeroes */ + u32 mac_lower; +}; + +struct mcp_val64 { + u32 lo; + u32 hi; +}; + +struct mcp_file_att { + u32 nvm_start_addr; + u32 len; +}; + +#define MCP_DRV_VER_STR_SIZE 16 +#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32)) +#define MCP_DRV_NVM_BUF_LEN 32 +struct drv_version_stc { + u32 version; + u8 name[MCP_DRV_VER_STR_SIZE - 4]; +}; + +union drv_union_data { + u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD]; + struct mcp_mac wol_mac; + + struct pmm_phy_cfg drv_phy_cfg; + + struct mcp_val64 val64; /* For PHY / AVS commands */ + + u8 raw_data[MCP_DRV_NVM_BUF_LEN]; + + struct mcp_file_att file_att; + + u32 ack_vf_disabled[VF_MAX_STATIC / 32]; + + struct drv_version_stc drv_version; +}; + +struct public_drv_mb { + u32 drv_mb_header; +#define DRV_MSG_CODE_MASK 0xffff0000 +#define DRV_MSG_CODE_LOAD_REQ 0x10000000 +#define DRV_MSG_CODE_LOAD_DONE 0x11000000 +#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000 +#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 +#define DRV_MSG_CODE_INIT_PHY 0x22000000 + /* Params - FORCE - Reinitialize the link regardless of LFA */ + /* - DONT_CARE - Don't flap the link if up */ +#define DRV_MSG_CODE_LINK_RESET 0x23000000 + +#define DRV_MSG_CODE_SET_LLDP 0x24000000 +#define DRV_MSG_CODE_SET_DCBX 0x25000000 + +#define DRV_MSG_CODE_NIG_DRAIN 0x30000000 + +#define DRV_MSG_CODE_INITIATE_FLR 0x02000000 +#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 +#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000 +#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000 +#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000 +#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000 +#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000 +#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000 +#define DRV_MSG_CODE_NVM_DEL_FILE 0x00080000 +#define DRV_MSG_CODE_MCP_RESET 0x00090000 +#define DRV_MSG_CODE_SET_SECURE_MODE 0x000a0000 +#define DRV_MSG_CODE_PHY_RAW_READ 0x000b0000 +#define DRV_MSG_CODE_PHY_RAW_WRITE 0x000c0000 +#define DRV_MSG_CODE_PHY_CORE_READ 0x000d0000 +#define DRV_MSG_CODE_PHY_CORE_WRITE 0x000e0000 +#define DRV_MSG_CODE_SET_VERSION 0x000f0000 + +#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff + + u32 drv_mb_param; + + /* UNLOAD_REQ params */ +#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000 +#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001 +#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002 +#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003 + + /* UNLOAD_DONE_params */ +#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER 0x00000001 + + /* INIT_PHY params */ +#define DRV_MB_PARAM_INIT_PHY_FORCE 0x00000001 +#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002 + + /* LLDP / DCBX params*/ +#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001 +#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0 +#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006 +#define DRV_MB_PARAM_LLDP_AGENT_SHIFT 1 +#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008 +#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3 + +#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF +#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0 + +#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1 +#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2 + +#define DRV_MB_PARAM_NVM_OFFSET_SHIFT 0 +#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF +#define DRV_MB_PARAM_NVM_LEN_SHIFT 24 +#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000 + +#define DRV_MB_PARAM_PHY_ADDR_SHIFT 0 +#define DRV_MB_PARAM_PHY_ADDR_MASK 0x1FF0FFFF +#define DRV_MB_PARAM_PHY_LANE_SHIFT 16 +#define DRV_MB_PARAM_PHY_LANE_MASK 0x000F0000 +#define DRV_MB_PARAM_PHY_SELECT_PORT_SHIFT 29 +#define DRV_MB_PARAM_PHY_SELECT_PORT_MASK 0x20000000 +#define DRV_MB_PARAM_PHY_PORT_SHIFT 30 +#define DRV_MB_PARAM_PHY_PORT_MASK 0xc0000000 + +/* configure vf MSIX params*/ +#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0 +#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF +#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8 +#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00 + + u32 fw_mb_header; +#define FW_MSG_CODE_MASK 0xffff0000 +#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000 +#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 +#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 +#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000 +#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000 +#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000 +#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 +#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000 +#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000 +#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000 +#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000 +#define FW_MSG_CODE_INIT_PHY_DONE 0x21200000 +#define FW_MSG_CODE_INIT_PHY_ERR_INVALID_ARGS 0x21300000 +#define FW_MSG_CODE_LINK_RESET_DONE 0x23000000 +#define FW_MSG_CODE_SET_LLDP_DONE 0x24000000 +#define FW_MSG_CODE_SET_LLDP_UNSUPPORTED_AGENT 0x24010000 +#define FW_MSG_CODE_SET_DCBX_DONE 0x25000000 +#define FW_MSG_CODE_NIG_DRAIN_DONE 0x30000000 +#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000 +#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000 +#define FW_MSG_CODE_FLR_ACK 0x02000000 +#define FW_MSG_CODE_FLR_NACK 0x02100000 + +#define FW_MSG_CODE_NVM_OK 0x00010000 +#define FW_MSG_CODE_NVM_INVALID_MODE 0x00020000 +#define FW_MSG_CODE_NVM_PREV_CMD_WAS_NOT_FINISHED 0x00030000 +#define FW_MSG_CODE_NVM_FAILED_TO_ALLOCATE_PAGE 0x00040000 +#define FW_MSG_CODE_NVM_INVALID_DIR_FOUND 0x00050000 +#define FW_MSG_CODE_NVM_PAGE_NOT_FOUND 0x00060000 +#define FW_MSG_CODE_NVM_FAILED_PARSING_BNDLE_HEADER 0x00070000 +#define FW_MSG_CODE_NVM_FAILED_PARSING_IMAGE_HEADER 0x00080000 +#define FW_MSG_CODE_NVM_PARSING_OUT_OF_SYNC 0x00090000 +#define FW_MSG_CODE_NVM_FAILED_UPDATING_DIR 0x000a0000 +#define FW_MSG_CODE_NVM_FAILED_TO_FREE_PAGE 0x000b0000 +#define FW_MSG_CODE_NVM_FILE_NOT_FOUND 0x000c0000 +#define FW_MSG_CODE_NVM_OPERATION_FAILED 0x000d0000 +#define FW_MSG_CODE_NVM_FAILED_UNALIGNED 0x000e0000 +#define FW_MSG_CODE_NVM_BAD_OFFSET 0x000f0000 +#define FW_MSG_CODE_NVM_BAD_SIGNATURE 0x00100000 +#define FW_MSG_CODE_NVM_FILE_READ_ONLY 0x00200000 +#define FW_MSG_CODE_NVM_UNKNOWN_FILE 0x00300000 +#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000 +#define FW_MSG_CODE_MCP_RESET_REJECT 0x00600000 +#define FW_MSG_CODE_PHY_OK 0x00110000 +#define FW_MSG_CODE_PHY_ERROR 0x00120000 +#define FW_MSG_CODE_SET_SECURE_MODE_ERROR 0x00130000 +#define FW_MSG_CODE_SET_SECURE_MODE_OK 0x00140000 +#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000 + +#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff + + u32 fw_mb_param; + + u32 drv_pulse_mb; +#define DRV_PULSE_SEQ_MASK 0x00007fff +#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 +#define DRV_PULSE_ALWAYS_ALIVE 0x00008000 + u32 mcp_pulse_mb; +#define MCP_PULSE_SEQ_MASK 0x00007fff +#define MCP_PULSE_ALWAYS_ALIVE 0x00008000 +#define MCP_EVENT_MASK 0xffff0000 +#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 + + union drv_union_data union_data; +}; + +/* MFW - DRV MB */ +/********************************************************************** +* Description +* Incremental Aggregative +* 8-bit MFW counter per message +* 8-bit ack-counter per message +* Capabilities +* Provides up to 256 aggregative message per type +* Provides 4 message types in dword +* Message type pointers to byte offset +* Backward Compatibility by using sizeof for the counters. +* No lock requires for 32bit messages +* Limitations: +* In case of messages greater than 32bit, a dedicated mechanism(e.g lock) +* is required to prevent data corruption. +**********************************************************************/ +enum MFW_DRV_MSG_TYPE { + MFW_DRV_MSG_LINK_CHANGE, + MFW_DRV_MSG_FLR_FW_ACK_FAILED, + MFW_DRV_MSG_VF_DISABLED, + MFW_DRV_MSG_LLDP_DATA_UPDATED, + MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED, + MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED, + MFW_DRV_MSG_ERROR_RECOVERY, + MFW_DRV_MSG_MAX +}; + +#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1) +#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2) +#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3) +#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id)) + +struct public_mfw_mb { + u32 sup_msgs; + u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; + u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; +}; + +/**************************************/ +/* */ +/* P U B L I C D A T A */ +/* */ +/**************************************/ +enum public_sections { + PUBLIC_DRV_MB, /* Points to the first drv_mb of path0 */ + PUBLIC_MFW_MB, /* Points to the first mfw_mb of path0 */ + PUBLIC_GLOBAL, + PUBLIC_PATH, + PUBLIC_PORT, + PUBLIC_FUNC, + PUBLIC_MAX_SECTIONS +}; + +struct drv_ver_info_stc { + u32 ver; + u8 name[32]; +}; + +struct mcp_public_data { + /* The sections fields is an array */ + u32 num_sections; + offsize_t sections[PUBLIC_MAX_SECTIONS]; + struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX]; + struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX]; + struct public_global global; + struct public_path path[MCP_GLOB_PATH_MAX]; + struct public_port port[MCP_GLOB_PORT_MAX]; + struct public_func func[MCP_GLOB_FUNC_MAX]; + struct drv_ver_info_stc drv_info; +}; + +struct nvm_cfg_mac_address { + u32 mac_addr_hi; +#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF +#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0 + + u32 mac_addr_lo; +}; + +/****************************************** +* nvm_cfg1 structs +******************************************/ + +struct nvm_cfg1_glob { + u32 generic_cont0; /* 0x0 */ +#define NVM_CFG1_GLOB_BOARD_SWAP_MASK 0x0000000F +#define NVM_CFG1_GLOB_BOARD_SWAP_OFFSET 0 +#define NVM_CFG1_GLOB_BOARD_SWAP_NONE 0x0 +#define NVM_CFG1_GLOB_BOARD_SWAP_PATH 0x1 +#define NVM_CFG1_GLOB_BOARD_SWAP_PORT 0x2 +#define NVM_CFG1_GLOB_BOARD_SWAP_BOTH 0x3 +#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0 +#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4 +#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0 +#define NVM_CFG1_GLOB_MF_MODE_FORCED_SF 0x1 +#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2 +#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3 +#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4 +#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5 +#define NVM_CFG1_GLOB_MF_MODE_BD 0x6 +#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7 +#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_MASK 0x00001000 +#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_OFFSET 12 +#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_DISABLED 0x0 +#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_ENABLED 0x1 +#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_MASK 0x001FE000 +#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_OFFSET 13 +#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_MASK 0x1FE00000 +#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_OFFSET 21 +#define NVM_CFG1_GLOB_ENABLE_SRIOV_MASK 0x20000000 +#define NVM_CFG1_GLOB_ENABLE_SRIOV_OFFSET 29 +#define NVM_CFG1_GLOB_ENABLE_SRIOV_DISABLED 0x0 +#define NVM_CFG1_GLOB_ENABLE_SRIOV_ENABLED 0x1 +#define NVM_CFG1_GLOB_ENABLE_ATC_MASK 0x40000000 +#define NVM_CFG1_GLOB_ENABLE_ATC_OFFSET 30 +#define NVM_CFG1_GLOB_ENABLE_ATC_DISABLED 0x0 +#define NVM_CFG1_GLOB_ENABLE_ATC_ENABLED 0x1 +#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_MASK 0x80000000 +#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_OFFSET 31 +#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_DISABLED 0x0 +#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_ENABLED 0x1 + + u32 engineering_change[3]; /* 0x4 */ + + u32 manufacturing_id; /* 0x10 */ + + u32 serial_number[4]; /* 0x14 */ + + u32 pcie_cfg; /* 0x24 */ +#define NVM_CFG1_GLOB_PCI_GEN_MASK 0x00000003 +#define NVM_CFG1_GLOB_PCI_GEN_OFFSET 0 +#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN1 0x0 +#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN2 0x1 +#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN3 0x2 +#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_MASK 0x00000004 +#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_OFFSET 2 +#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_DISABLED 0x0 +#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_ENABLED 0x1 +#define NVM_CFG1_GLOB_ASPM_SUPPORT_MASK 0x00000018 +#define NVM_CFG1_GLOB_ASPM_SUPPORT_OFFSET 3 +#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_ENABLED 0x0 +#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_DISABLED 0x1 +#define NVM_CFG1_GLOB_ASPM_SUPPORT_L1_DISABLED 0x2 +#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_DISABLED 0x3 +#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_MASK 0x00000020 +#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_OFFSET 5 +#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_DISABLED 0x0 +#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_ENABLED 0x1 +#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_MASK 0x000003C0 +#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_OFFSET 6 +#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_MASK 0x00001C00 +#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_OFFSET 10 +#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_HW 0x0 +#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_0DB 0x1 +#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_3_5DB 0x2 +#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_6_0DB 0x3 +#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_MASK 0x001FE000 +#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_OFFSET 13 +#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_MASK 0x1FE00000 +#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_OFFSET 21 +#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_MASK 0x60000000 +#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_OFFSET 29 + + u32 mgmt_traffic; /* 0x28 */ +#define NVM_CFG1_GLOB_RESERVED60_MASK 0x00000001 +#define NVM_CFG1_GLOB_RESERVED60_OFFSET 0 +#define NVM_CFG1_GLOB_RESERVED60_100KHZ 0x0 +#define NVM_CFG1_GLOB_RESERVED60_400KHZ 0x1 +#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_MASK 0x000001FE +#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_OFFSET 1 +#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_MASK 0x0001FE00 +#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_OFFSET 9 +#define NVM_CFG1_GLOB_SMBUS_ADDRESS_MASK 0x01FE0000 +#define NVM_CFG1_GLOB_SMBUS_ADDRESS_OFFSET 17 +#define NVM_CFG1_GLOB_SIDEBAND_MODE_MASK 0x06000000 +#define NVM_CFG1_GLOB_SIDEBAND_MODE_OFFSET 25 +#define NVM_CFG1_GLOB_SIDEBAND_MODE_DISABLED 0x0 +#define NVM_CFG1_GLOB_SIDEBAND_MODE_RMII 0x1 +#define NVM_CFG1_GLOB_SIDEBAND_MODE_SGMII 0x2 + + u32 core_cfg; /* 0x2C */ +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000FF +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G 0x0 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G 0x1 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G 0x2 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F 0x3 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E 0x4 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G 0x5 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G 0xB +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G 0xC +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G 0xD +#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_MASK 0x00000100 +#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_OFFSET 8 +#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_DISABLED 0x0 +#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_ENABLED 0x1 +#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_MASK 0x00000200 +#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_OFFSET 9 +#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_DISABLED 0x0 +#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_ENABLED 0x1 +#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_MASK 0x0003FC00 +#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_OFFSET 10 +#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_MASK 0x03FC0000 +#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_OFFSET 18 +#define NVM_CFG1_GLOB_AVS_MODE_MASK 0x1C000000 +#define NVM_CFG1_GLOB_AVS_MODE_OFFSET 26 +#define NVM_CFG1_GLOB_AVS_MODE_CLOSE_LOOP 0x0 +#define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP 0x1 +#define NVM_CFG1_GLOB_AVS_MODE_DISABLED 0x3 +#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_MASK 0x60000000 +#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_OFFSET 29 +#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_DISABLED 0x0 +#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_ENABLED 0x1 + + u32 e_lane_cfg1; /* 0x30 */ +#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F +#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0 +#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0 +#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4 +#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00 +#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8 +#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000 +#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12 +#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000 +#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16 +#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000 +#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20 +#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000 +#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24 +#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000 +#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28 + + u32 e_lane_cfg2; /* 0x34 */ +#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001 +#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0 +#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002 +#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1 +#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004 +#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2 +#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008 +#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3 +#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010 +#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4 +#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020 +#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5 +#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040 +#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6 +#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080 +#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7 +#define NVM_CFG1_GLOB_SMBUS_MODE_MASK 0x00000F00 +#define NVM_CFG1_GLOB_SMBUS_MODE_OFFSET 8 +#define NVM_CFG1_GLOB_SMBUS_MODE_DISABLED 0x0 +#define NVM_CFG1_GLOB_SMBUS_MODE_100KHZ 0x1 +#define NVM_CFG1_GLOB_SMBUS_MODE_400KHZ 0x2 +#define NVM_CFG1_GLOB_NCSI_MASK 0x0000F000 +#define NVM_CFG1_GLOB_NCSI_OFFSET 12 +#define NVM_CFG1_GLOB_NCSI_DISABLED 0x0 +#define NVM_CFG1_GLOB_NCSI_ENABLED 0x1 + + u32 f_lane_cfg1; /* 0x38 */ +#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F +#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0 +#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0 +#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4 +#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00 +#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8 +#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000 +#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12 +#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000 +#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16 +#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000 +#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20 +#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000 +#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24 +#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000 +#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28 + + u32 f_lane_cfg2; /* 0x3C */ +#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001 +#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0 +#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002 +#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1 +#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004 +#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2 +#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008 +#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3 +#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010 +#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4 +#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020 +#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5 +#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040 +#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6 +#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080 +#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7 + + u32 eagle_preemphasis; /* 0x40 */ +#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF +#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0 +#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00 +#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8 +#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000 +#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16 +#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000 +#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24 + + u32 eagle_driver_current; /* 0x44 */ +#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF +#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0 +#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00 +#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8 +#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000 +#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16 +#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000 +#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24 + + u32 falcon_preemphasis; /* 0x48 */ +#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF +#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0 +#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00 +#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8 +#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000 +#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16 +#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000 +#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24 + + u32 falcon_driver_current; /* 0x4C */ +#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF +#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0 +#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00 +#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8 +#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000 +#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16 +#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000 +#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24 + + u32 pci_id; /* 0x50 */ +#define NVM_CFG1_GLOB_VENDOR_ID_MASK 0x0000FFFF +#define NVM_CFG1_GLOB_VENDOR_ID_OFFSET 0 + + u32 pci_subsys_id; /* 0x54 */ +#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFF +#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_OFFSET 0 +#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_MASK 0xFFFF0000 +#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_OFFSET 16 + + u32 bar; /* 0x58 */ +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_MASK 0x0000000F +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_OFFSET 0 +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_DISABLED 0x0 +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2K 0x1 +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4K 0x2 +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8K 0x3 +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16K 0x4 +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32K 0x5 +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_64K 0x6 +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_128K 0x7 +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_256K 0x8 +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_512K 0x9 +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_1M 0xA +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2M 0xB +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4M 0xC +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8M 0xD +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16M 0xE +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32M 0xF +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_MASK 0x000000F0 +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_OFFSET 4 +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_DISABLED 0x0 +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4K 0x1 +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8K 0x2 +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16K 0x3 +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32K 0x4 +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64K 0x5 +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_128K 0x6 +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_256K 0x7 +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_512K 0x8 +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_1M 0x9 +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_2M 0xA +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4M 0xB +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8M 0xC +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16M 0xD +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32M 0xE +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64M 0xF +#define NVM_CFG1_GLOB_BAR2_SIZE_MASK 0x00000F00 +#define NVM_CFG1_GLOB_BAR2_SIZE_OFFSET 8 +#define NVM_CFG1_GLOB_BAR2_SIZE_DISABLED 0x0 +#define NVM_CFG1_GLOB_BAR2_SIZE_64K 0x1 +#define NVM_CFG1_GLOB_BAR2_SIZE_128K 0x2 +#define NVM_CFG1_GLOB_BAR2_SIZE_256K 0x3 +#define NVM_CFG1_GLOB_BAR2_SIZE_512K 0x4 +#define NVM_CFG1_GLOB_BAR2_SIZE_1M 0x5 +#define NVM_CFG1_GLOB_BAR2_SIZE_2M 0x6 +#define NVM_CFG1_GLOB_BAR2_SIZE_4M 0x7 +#define NVM_CFG1_GLOB_BAR2_SIZE_8M 0x8 +#define NVM_CFG1_GLOB_BAR2_SIZE_16M 0x9 +#define NVM_CFG1_GLOB_BAR2_SIZE_32M 0xA +#define NVM_CFG1_GLOB_BAR2_SIZE_64M 0xB +#define NVM_CFG1_GLOB_BAR2_SIZE_128M 0xC +#define NVM_CFG1_GLOB_BAR2_SIZE_256M 0xD +#define NVM_CFG1_GLOB_BAR2_SIZE_512M 0xE +#define NVM_CFG1_GLOB_BAR2_SIZE_1G 0xF + + u32 eagle_txfir_main; /* 0x5C */ +#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF +#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0 +#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00 +#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8 +#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000 +#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16 +#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000 +#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24 + + u32 eagle_txfir_post; /* 0x60 */ +#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF +#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0 +#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00 +#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8 +#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000 +#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16 +#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000 +#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24 + + u32 falcon_txfir_main; /* 0x64 */ +#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF +#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0 +#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00 +#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8 +#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000 +#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16 +#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000 +#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24 + + u32 falcon_txfir_post; /* 0x68 */ +#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF +#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0 +#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00 +#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8 +#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000 +#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16 +#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000 +#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24 + + u32 manufacture_ver; /* 0x6C */ +#define NVM_CFG1_GLOB_MANUF0_VER_MASK 0x0000003F +#define NVM_CFG1_GLOB_MANUF0_VER_OFFSET 0 +#define NVM_CFG1_GLOB_MANUF1_VER_MASK 0x00000FC0 +#define NVM_CFG1_GLOB_MANUF1_VER_OFFSET 6 +#define NVM_CFG1_GLOB_MANUF2_VER_MASK 0x0003F000 +#define NVM_CFG1_GLOB_MANUF2_VER_OFFSET 12 +#define NVM_CFG1_GLOB_MANUF3_VER_MASK 0x00FC0000 +#define NVM_CFG1_GLOB_MANUF3_VER_OFFSET 18 +#define NVM_CFG1_GLOB_MANUF4_VER_MASK 0x3F000000 +#define NVM_CFG1_GLOB_MANUF4_VER_OFFSET 24 + + u32 manufacture_time; /* 0x70 */ +#define NVM_CFG1_GLOB_MANUF0_TIME_MASK 0x0000003F +#define NVM_CFG1_GLOB_MANUF0_TIME_OFFSET 0 +#define NVM_CFG1_GLOB_MANUF1_TIME_MASK 0x00000FC0 +#define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET 6 +#define NVM_CFG1_GLOB_MANUF2_TIME_MASK 0x0003F000 +#define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET 12 + + u32 led_global_settings; /* 0x74 */ +#define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F +#define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0 +#define NVM_CFG1_GLOB_LED_SWAP_1_MASK 0x000000F0 +#define NVM_CFG1_GLOB_LED_SWAP_1_OFFSET 4 +#define NVM_CFG1_GLOB_LED_SWAP_2_MASK 0x00000F00 +#define NVM_CFG1_GLOB_LED_SWAP_2_OFFSET 8 +#define NVM_CFG1_GLOB_LED_SWAP_3_MASK 0x0000F000 +#define NVM_CFG1_GLOB_LED_SWAP_3_OFFSET 12 + + u32 generic_cont1; /* 0x78 */ +#define NVM_CFG1_GLOB_AVS_DAC_CODE_MASK 0x000003FF +#define NVM_CFG1_GLOB_AVS_DAC_CODE_OFFSET 0 + + u32 mbi_version; /* 0x7C */ +#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF +#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0 +#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00 +#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8 +#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000 +#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16 + + u32 mbi_date; /* 0x80 */ + + u32 misc_sig; /* 0x84 */ + + /* Define the GPIO mapping to switch i2c mux */ +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_MASK 0x000000FF +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_OFFSET 0 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_MASK 0x0000FF00 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_OFFSET 8 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__NA 0x0 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO0 0x1 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO1 0x2 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO2 0x3 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO3 0x4 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO4 0x5 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO5 0x6 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO6 0x7 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO7 0x8 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO8 0x9 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO9 0xA +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO10 0xB +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO11 0xC +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO12 0xD +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO13 0xE +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO14 0xF +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO15 0x10 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO16 0x11 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO17 0x12 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO18 0x13 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO19 0x14 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO20 0x15 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO21 0x16 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO22 0x17 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO23 0x18 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO24 0x19 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO25 0x1A +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO26 0x1B +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO27 0x1C +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO28 0x1D +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20 + + u32 reserved[46]; /* 0x88 */ +}; + +struct nvm_cfg1_path { + u32 reserved[30]; /* 0x0 */ +}; + +struct nvm_cfg1_port { + u32 power_dissipated; /* 0x0 */ +#define NVM_CFG1_PORT_POWER_DIS_D0_MASK 0x000000FF +#define NVM_CFG1_PORT_POWER_DIS_D0_OFFSET 0 +#define NVM_CFG1_PORT_POWER_DIS_D1_MASK 0x0000FF00 +#define NVM_CFG1_PORT_POWER_DIS_D1_OFFSET 8 +#define NVM_CFG1_PORT_POWER_DIS_D2_MASK 0x00FF0000 +#define NVM_CFG1_PORT_POWER_DIS_D2_OFFSET 16 +#define NVM_CFG1_PORT_POWER_DIS_D3_MASK 0xFF000000 +#define NVM_CFG1_PORT_POWER_DIS_D3_OFFSET 24 + + u32 power_consumed; /* 0x4 */ +#define NVM_CFG1_PORT_POWER_CONS_D0_MASK 0x000000FF +#define NVM_CFG1_PORT_POWER_CONS_D0_OFFSET 0 +#define NVM_CFG1_PORT_POWER_CONS_D1_MASK 0x0000FF00 +#define NVM_CFG1_PORT_POWER_CONS_D1_OFFSET 8 +#define NVM_CFG1_PORT_POWER_CONS_D2_MASK 0x00FF0000 +#define NVM_CFG1_PORT_POWER_CONS_D2_OFFSET 16 +#define NVM_CFG1_PORT_POWER_CONS_D3_MASK 0xFF000000 +#define NVM_CFG1_PORT_POWER_CONS_D3_OFFSET 24 + + u32 generic_cont0; /* 0x8 */ +#define NVM_CFG1_PORT_LED_MODE_MASK 0x000000FF +#define NVM_CFG1_PORT_LED_MODE_OFFSET 0 +#define NVM_CFG1_PORT_LED_MODE_MAC1 0x0 +#define NVM_CFG1_PORT_LED_MODE_PHY1 0x1 +#define NVM_CFG1_PORT_LED_MODE_PHY2 0x2 +#define NVM_CFG1_PORT_LED_MODE_PHY3 0x3 +#define NVM_CFG1_PORT_LED_MODE_MAC2 0x4 +#define NVM_CFG1_PORT_LED_MODE_PHY4 0x5 +#define NVM_CFG1_PORT_LED_MODE_PHY5 0x6 +#define NVM_CFG1_PORT_LED_MODE_PHY6 0x7 +#define NVM_CFG1_PORT_LED_MODE_MAC3 0x8 +#define NVM_CFG1_PORT_LED_MODE_PHY7 0x9 +#define NVM_CFG1_PORT_LED_MODE_PHY8 0xA +#define NVM_CFG1_PORT_LED_MODE_PHY9 0xB +#define NVM_CFG1_PORT_LED_MODE_MAC4 0xC +#define NVM_CFG1_PORT_LED_MODE_PHY10 0xD +#define NVM_CFG1_PORT_LED_MODE_PHY11 0xE +#define NVM_CFG1_PORT_LED_MODE_PHY12 0xF +#define NVM_CFG1_PORT_ROCE_PRIORITY_MASK 0x0000FF00 +#define NVM_CFG1_PORT_ROCE_PRIORITY_OFFSET 8 +#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000F0000 +#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16 +#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0 +#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1 +#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2 +#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3 + + u32 pcie_cfg; /* 0xC */ +#define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007 +#define NVM_CFG1_PORT_RESERVED15_OFFSET 0 + + u32 features; /* 0x10 */ +#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_MASK 0x00000001 +#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_OFFSET 0 +#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_DISABLED 0x0 +#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_ENABLED 0x1 +#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_MASK 0x00000002 +#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_OFFSET 1 +#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_DISABLED 0x0 +#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_ENABLED 0x1 + + u32 speed_cap_mask; /* 0x14 */ +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G 0x40 +#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_MASK 0xFFFF0000 +#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_OFFSET 16 +#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_1G 0x1 +#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_10G 0x2 +#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_25G 0x8 +#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_40G 0x10 +#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_50G 0x20 +#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_100G 0x40 + + u32 link_settings; /* 0x18 */ +#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F +#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_100G 0x7 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4 +#define NVM_CFG1_PORT_MFW_LINK_SPEED_MASK 0x00000780 +#define NVM_CFG1_PORT_MFW_LINK_SPEED_OFFSET 7 +#define NVM_CFG1_PORT_MFW_LINK_SPEED_AUTONEG 0x0 +#define NVM_CFG1_PORT_MFW_LINK_SPEED_1G 0x1 +#define NVM_CFG1_PORT_MFW_LINK_SPEED_10G 0x2 +#define NVM_CFG1_PORT_MFW_LINK_SPEED_25G 0x4 +#define NVM_CFG1_PORT_MFW_LINK_SPEED_40G 0x5 +#define NVM_CFG1_PORT_MFW_LINK_SPEED_50G 0x6 +#define NVM_CFG1_PORT_MFW_LINK_SPEED_100G 0x7 +#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_MASK 0x00003800 +#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_OFFSET 11 +#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_AUTONEG 0x1 +#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_RX 0x2 +#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_TX 0x4 +#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_MASK 0x00004000 +#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_OFFSET 14 +#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_DISABLED 0x0 +#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_ENABLED 0x1 + + u32 phy_cfg; /* 0x1C */ +#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK 0x0000FFFF +#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET 0 +#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_HIGIG 0x1 +#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_SCRAMBLER 0x2 +#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_FIBER 0x4 +#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_CL72_AN 0x8 +#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_FEC_AN 0x10 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_MASK 0x00FF0000 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_OFFSET 16 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_BYPASS 0x0 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR 0x2 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR2 0x3 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR4 0x4 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XFI 0x8 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI 0x9 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X 0xB +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII 0xC +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0xD +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0xE +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0xF +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x10 +#define NVM_CFG1_PORT_AN_MODE_MASK 0xFF000000 +#define NVM_CFG1_PORT_AN_MODE_OFFSET 24 +#define NVM_CFG1_PORT_AN_MODE_NONE 0x0 +#define NVM_CFG1_PORT_AN_MODE_CL73 0x1 +#define NVM_CFG1_PORT_AN_MODE_CL37 0x2 +#define NVM_CFG1_PORT_AN_MODE_CL73_BAM 0x3 +#define NVM_CFG1_PORT_AN_MODE_CL37_BAM 0x4 +#define NVM_CFG1_PORT_AN_MODE_HPAM 0x5 +#define NVM_CFG1_PORT_AN_MODE_SGMII 0x6 + + u32 mgmt_traffic; /* 0x20 */ +#define NVM_CFG1_PORT_RESERVED61_MASK 0x0000000F +#define NVM_CFG1_PORT_RESERVED61_OFFSET 0 +#define NVM_CFG1_PORT_RESERVED61_DISABLED 0x0 +#define NVM_CFG1_PORT_RESERVED61_NCSI_OVER_RMII 0x1 +#define NVM_CFG1_PORT_RESERVED61_NCSI_OVER_SMBUS 0x2 + + u32 ext_phy; /* 0x24 */ +#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF +#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET 0 +#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE 0x0 +#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM84844 0x1 +#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK 0x0000FF00 +#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8 + + u32 mba_cfg1; /* 0x28 */ +#define NVM_CFG1_PORT_MBA_MASK 0x00000001 +#define NVM_CFG1_PORT_MBA_OFFSET 0 +#define NVM_CFG1_PORT_MBA_DISABLED 0x0 +#define NVM_CFG1_PORT_MBA_ENABLED 0x1 +#define NVM_CFG1_PORT_MBA_BOOT_TYPE_MASK 0x00000006 +#define NVM_CFG1_PORT_MBA_BOOT_TYPE_OFFSET 1 +#define NVM_CFG1_PORT_MBA_BOOT_TYPE_AUTO 0x0 +#define NVM_CFG1_PORT_MBA_BOOT_TYPE_BBS 0x1 +#define NVM_CFG1_PORT_MBA_BOOT_TYPE_INT18H 0x2 +#define NVM_CFG1_PORT_MBA_BOOT_TYPE_INT19H 0x3 +#define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK 0x00000078 +#define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET 3 +#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK 0x00000080 +#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_OFFSET 7 +#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_S 0x0 +#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_B 0x1 +#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_MASK 0x00000100 +#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_OFFSET 8 +#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_DISABLED 0x0 +#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED 0x1 +#define NVM_CFG1_PORT_RESERVED5_MASK 0x0001FE00 +#define NVM_CFG1_PORT_RESERVED5_OFFSET 9 +#define NVM_CFG1_PORT_RESERVED5_DISABLED 0x0 +#define NVM_CFG1_PORT_RESERVED5_2K 0x1 +#define NVM_CFG1_PORT_RESERVED5_4K 0x2 +#define NVM_CFG1_PORT_RESERVED5_8K 0x3 +#define NVM_CFG1_PORT_RESERVED5_16K 0x4 +#define NVM_CFG1_PORT_RESERVED5_32K 0x5 +#define NVM_CFG1_PORT_RESERVED5_64K 0x6 +#define NVM_CFG1_PORT_RESERVED5_128K 0x7 +#define NVM_CFG1_PORT_RESERVED5_256K 0x8 +#define NVM_CFG1_PORT_RESERVED5_512K 0x9 +#define NVM_CFG1_PORT_RESERVED5_1M 0xA +#define NVM_CFG1_PORT_RESERVED5_2M 0xB +#define NVM_CFG1_PORT_RESERVED5_4M 0xC +#define NVM_CFG1_PORT_RESERVED5_8M 0xD +#define NVM_CFG1_PORT_RESERVED5_16M 0xE +#define NVM_CFG1_PORT_RESERVED5_32M 0xF +#define NVM_CFG1_PORT_MBA_LINK_SPEED_MASK 0x001E0000 +#define NVM_CFG1_PORT_MBA_LINK_SPEED_OFFSET 17 +#define NVM_CFG1_PORT_MBA_LINK_SPEED_AUTONEG 0x0 +#define NVM_CFG1_PORT_MBA_LINK_SPEED_1G 0x1 +#define NVM_CFG1_PORT_MBA_LINK_SPEED_10G 0x2 +#define NVM_CFG1_PORT_MBA_LINK_SPEED_25G 0x4 +#define NVM_CFG1_PORT_MBA_LINK_SPEED_40G 0x5 +#define NVM_CFG1_PORT_MBA_LINK_SPEED_50G 0x6 +#define NVM_CFG1_PORT_MBA_LINK_SPEED_100G 0x7 +#define NVM_CFG1_PORT_MBA_BOOT_RETRY_COUNT_MASK 0x00E00000 +#define NVM_CFG1_PORT_MBA_BOOT_RETRY_COUNT_OFFSET 21 + + u32 mba_cfg2; /* 0x2C */ +#define NVM_CFG1_PORT_MBA_VLAN_VALUE_MASK 0x0000FFFF +#define NVM_CFG1_PORT_MBA_VLAN_VALUE_OFFSET 0 +#define NVM_CFG1_PORT_MBA_VLAN_MASK 0x00010000 +#define NVM_CFG1_PORT_MBA_VLAN_OFFSET 16 + + u32 vf_cfg; /* 0x30 */ +#define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF +#define NVM_CFG1_PORT_RESERVED8_OFFSET 0 +#define NVM_CFG1_PORT_RESERVED6_MASK 0x000F0000 +#define NVM_CFG1_PORT_RESERVED6_OFFSET 16 +#define NVM_CFG1_PORT_RESERVED6_DISABLED 0x0 +#define NVM_CFG1_PORT_RESERVED6_4K 0x1 +#define NVM_CFG1_PORT_RESERVED6_8K 0x2 +#define NVM_CFG1_PORT_RESERVED6_16K 0x3 +#define NVM_CFG1_PORT_RESERVED6_32K 0x4 +#define NVM_CFG1_PORT_RESERVED6_64K 0x5 +#define NVM_CFG1_PORT_RESERVED6_128K 0x6 +#define NVM_CFG1_PORT_RESERVED6_256K 0x7 +#define NVM_CFG1_PORT_RESERVED6_512K 0x8 +#define NVM_CFG1_PORT_RESERVED6_1M 0x9 +#define NVM_CFG1_PORT_RESERVED6_2M 0xA +#define NVM_CFG1_PORT_RESERVED6_4M 0xB +#define NVM_CFG1_PORT_RESERVED6_8M 0xC +#define NVM_CFG1_PORT_RESERVED6_16M 0xD +#define NVM_CFG1_PORT_RESERVED6_32M 0xE +#define NVM_CFG1_PORT_RESERVED6_64M 0xF + + struct nvm_cfg_mac_address lldp_mac_address; /* 0x34 */ + + u32 led_port_settings; /* 0x3C */ +#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_MASK 0x000000FF +#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_OFFSET 0 +#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_MASK 0x0000FF00 +#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_OFFSET 8 +#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_MASK 0x00FF0000 +#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_OFFSET 16 +#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_1G 0x1 +#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_10G 0x2 +#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_25G 0x8 +#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_40G 0x10 +#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_50G 0x20 +#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_100G 0x40 + + u32 transceiver_00; /* 0x40 */ + + /* Define for mapping of transceiver signal module absent */ +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_MASK 0x000000FF +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_OFFSET 0 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_NA 0x0 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO0 0x1 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO1 0x2 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO2 0x3 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO3 0x4 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO4 0x5 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO5 0x6 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO6 0x7 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO7 0x8 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO8 0x9 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO9 0xA +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO10 0xB +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO11 0xC +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO12 0xD +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO13 0xE +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO14 0xF +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO15 0x10 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO16 0x11 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO17 0x12 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO18 0x13 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO19 0x14 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO20 0x15 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO21 0x16 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO22 0x17 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO23 0x18 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO24 0x19 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO25 0x1A +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO26 0x1B +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO27 0x1C +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO28 0x1D +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO29 0x1E +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO30 0x1F +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO31 0x20 + /* Define the GPIO mux settings to switch i2c mux to this port */ +#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_MASK 0x00000F00 +#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_OFFSET 8 +#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_MASK 0x0000F000 +#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_OFFSET 12 + + u32 reserved[133]; /* 0x44 */ +}; + +struct nvm_cfg1_func { + struct nvm_cfg_mac_address mac_address; /* 0x0 */ + + u32 rsrv1; /* 0x8 */ +#define NVM_CFG1_FUNC_RESERVED1_MASK 0x0000FFFF +#define NVM_CFG1_FUNC_RESERVED1_OFFSET 0 +#define NVM_CFG1_FUNC_RESERVED2_MASK 0xFFFF0000 +#define NVM_CFG1_FUNC_RESERVED2_OFFSET 16 + + u32 rsrv2; /* 0xC */ +#define NVM_CFG1_FUNC_RESERVED3_MASK 0x0000FFFF +#define NVM_CFG1_FUNC_RESERVED3_OFFSET 0 +#define NVM_CFG1_FUNC_RESERVED4_MASK 0xFFFF0000 +#define NVM_CFG1_FUNC_RESERVED4_OFFSET 16 + + u32 device_id; /* 0x10 */ +#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK 0x0000FFFF +#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET 0 +#define NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK 0xFFFF0000 +#define NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET 16 + + u32 cmn_cfg; /* 0x14 */ +#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_MASK 0x00000007 +#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_OFFSET 0 +#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_PXE 0x0 +#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_RPL 0x1 +#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_BOOTP 0x2 +#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_ISCSI_BOOT 0x3 +#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_FCOE_BOOT 0x4 +#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_NONE 0x7 +#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK 0x0007FFF8 +#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET 3 +#define NVM_CFG1_FUNC_PERSONALITY_MASK 0x00780000 +#define NVM_CFG1_FUNC_PERSONALITY_OFFSET 19 +#define NVM_CFG1_FUNC_PERSONALITY_ETHERNET 0x0 +#define NVM_CFG1_FUNC_PERSONALITY_ISCSI 0x1 +#define NVM_CFG1_FUNC_PERSONALITY_FCOE 0x2 +#define NVM_CFG1_FUNC_PERSONALITY_ROCE 0x3 +#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_MASK 0x7F800000 +#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_OFFSET 23 +#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_MASK 0x80000000 +#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_OFFSET 31 +#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_DISABLED 0x0 +#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_ENABLED 0x1 + + u32 pci_cfg; /* 0x18 */ +#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_MASK 0x0000007F +#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_OFFSET 0 +#define NVM_CFG1_FUNC_RESERVESD12_MASK 0x00003F80 +#define NVM_CFG1_FUNC_RESERVESD12_OFFSET 7 +#define NVM_CFG1_FUNC_BAR1_SIZE_MASK 0x0003C000 +#define NVM_CFG1_FUNC_BAR1_SIZE_OFFSET 14 +#define NVM_CFG1_FUNC_BAR1_SIZE_DISABLED 0x0 +#define NVM_CFG1_FUNC_BAR1_SIZE_64K 0x1 +#define NVM_CFG1_FUNC_BAR1_SIZE_128K 0x2 +#define NVM_CFG1_FUNC_BAR1_SIZE_256K 0x3 +#define NVM_CFG1_FUNC_BAR1_SIZE_512K 0x4 +#define NVM_CFG1_FUNC_BAR1_SIZE_1M 0x5 +#define NVM_CFG1_FUNC_BAR1_SIZE_2M 0x6 +#define NVM_CFG1_FUNC_BAR1_SIZE_4M 0x7 +#define NVM_CFG1_FUNC_BAR1_SIZE_8M 0x8 +#define NVM_CFG1_FUNC_BAR1_SIZE_16M 0x9 +#define NVM_CFG1_FUNC_BAR1_SIZE_32M 0xA +#define NVM_CFG1_FUNC_BAR1_SIZE_64M 0xB +#define NVM_CFG1_FUNC_BAR1_SIZE_128M 0xC +#define NVM_CFG1_FUNC_BAR1_SIZE_256M 0xD +#define NVM_CFG1_FUNC_BAR1_SIZE_512M 0xE +#define NVM_CFG1_FUNC_BAR1_SIZE_1G 0xF +#define NVM_CFG1_FUNC_MAX_BANDWIDTH_MASK 0x03FC0000 +#define NVM_CFG1_FUNC_MAX_BANDWIDTH_OFFSET 18 + + struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; /* 0x1C */ + + struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; /* 0x24 */ + + u32 reserved[9]; /* 0x2C */ +}; + +struct nvm_cfg1 { + struct nvm_cfg1_glob glob; /* 0x0 */ + + struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; /* 0x140 */ + + struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; /* 0x230 */ + + struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; /* 0xB90 */ +}; + +/****************************************** +* nvm_cfg structs +******************************************/ + +enum nvm_cfg_sections { + NVM_CFG_SECTION_NVM_CFG1, + NVM_CFG_SECTION_MAX +}; + +struct nvm_cfg { + u32 num_sections; + u32 sections_offset[NVM_CFG_SECTION_MAX]; + struct nvm_cfg1 cfg1; +}; + +#define PORT_0 0 +#define PORT_1 1 +#define PORT_2 2 +#define PORT_3 3 + +extern struct spad_layout g_spad; + +#define MCP_SPAD_SIZE 0x00028000 /* 160 KB */ + +#define SPAD_OFFSET(addr) (((u32)addr - (u32)CPU_SPAD_BASE)) + +#define TO_OFFSIZE(_offset, _size) \ + (u32)((((u32)(_offset) >> 2) << OFFSIZE_OFFSET_SHIFT) | \ + (((u32)(_size) >> 2) << OFFSIZE_SIZE_SHIFT)) + +enum spad_sections { + SPAD_SECTION_TRACE, + SPAD_SECTION_NVM_CFG, + SPAD_SECTION_PUBLIC, + SPAD_SECTION_PRIVATE, + SPAD_SECTION_MAX +}; + +struct spad_layout { + struct nvm_cfg nvm_cfg; + struct mcp_public_data public_data; +}; + +#define CRC_MAGIC_VALUE 0xDEBB20E3 +#define CRC32_POLYNOMIAL 0xEDB88320 +#define NVM_CRC_SIZE (sizeof(u32)) + +enum nvm_sw_arbitrator { + NVM_SW_ARB_HOST, + NVM_SW_ARB_MCP, + NVM_SW_ARB_UART, + NVM_SW_ARB_RESERVED +}; + +/**************************************************************************** +* Boot Strap Region * +****************************************************************************/ +struct legacy_bootstrap_region { + u32 magic_value; +#define NVM_MAGIC_VALUE 0x669955aa + u32 sram_start_addr; + u32 code_len; /* boot code length (in dwords) */ + u32 code_start_addr; + u32 crc; /* 32-bit CRC */ +}; + +/**************************************************************************** +* Directories Region * +****************************************************************************/ +struct nvm_code_entry { + u32 image_type; /* Image type */ + u32 nvm_start_addr; /* NVM address of the image */ + u32 len; /* Include CRC */ + u32 sram_start_addr; + u32 sram_run_addr; /* Relevant in case of MIM only */ +}; + +enum nvm_image_type { + NVM_TYPE_TIM1 = 0x01, + NVM_TYPE_TIM2 = 0x02, + NVM_TYPE_MIM1 = 0x03, + NVM_TYPE_MIM2 = 0x04, + NVM_TYPE_MBA = 0x05, + NVM_TYPE_MODULES_PN = 0x06, + NVM_TYPE_VPD = 0x07, + NVM_TYPE_MFW_TRACE1 = 0x08, + NVM_TYPE_MFW_TRACE2 = 0x09, + NVM_TYPE_NVM_CFG1 = 0x0a, + NVM_TYPE_L2B = 0x0b, + NVM_TYPE_DIR1 = 0x0c, + NVM_TYPE_EAGLE_FW1 = 0x0d, + NVM_TYPE_FALCON_FW1 = 0x0e, + NVM_TYPE_PCIE_FW1 = 0x0f, + NVM_TYPE_HW_SET = 0x10, + NVM_TYPE_LIM = 0x11, + NVM_TYPE_AVS_FW1 = 0x12, + NVM_TYPE_DIR2 = 0x13, + NVM_TYPE_CCM = 0x14, + NVM_TYPE_EAGLE_FW2 = 0x15, + NVM_TYPE_FALCON_FW2 = 0x16, + NVM_TYPE_PCIE_FW2 = 0x17, + NVM_TYPE_AVS_FW2 = 0x18, + + NVM_TYPE_MAX, +}; + +#define MAX_NVM_DIR_ENTRIES 200 + +struct nvm_dir { + s32 seq; +#define NVM_DIR_NEXT_MFW_MASK 0x00000001 +#define NVM_DIR_SEQ_MASK 0xfffffffe +#define NVM_DIR_NEXT_MFW(seq) ((seq) & NVM_DIR_NEXT_MFW_MASK) + +#define IS_DIR_SEQ_VALID(seq) ((seq & NVM_DIR_SEQ_MASK) != NVM_DIR_SEQ_MASK) + + u32 num_images; + u32 rsrv; + struct nvm_code_entry code[1]; /* Up to MAX_NVM_DIR_ENTRIES */ +}; + +#define NVM_DIR_SIZE(_num_images) (sizeof(struct nvm_dir) + \ + (_num_images - \ + 1) * sizeof(struct nvm_code_entry) + \ + NVM_CRC_SIZE) + +struct nvm_vpd_image { + u32 format_revision; +#define VPD_IMAGE_VERSION 1 + + /* This array length depends on the number of VPD fields */ + u8 vpd_data[1]; +}; + +/**************************************************************************** +* NVRAM FULL MAP * +****************************************************************************/ +#define DIR_ID_1 (0) +#define DIR_ID_2 (1) +#define MAX_DIR_IDS (2) + +#define MFW_BUNDLE_1 (0) +#define MFW_BUNDLE_2 (1) +#define MAX_MFW_BUNDLES (2) + +#define FLASH_PAGE_SIZE 0x1000 +#define NVM_DIR_MAX_SIZE (FLASH_PAGE_SIZE) /* 4Kb */ +#define ASIC_MIM_MAX_SIZE (300 * FLASH_PAGE_SIZE) /* 1.2Mb */ +#define FPGA_MIM_MAX_SIZE (25 * FLASH_PAGE_SIZE) /* 60Kb */ + +#define LIM_MAX_SIZE ((2 * \ + FLASH_PAGE_SIZE) - \ + sizeof(struct legacy_bootstrap_region) - \ + NVM_RSV_SIZE) +#define LIM_OFFSET (NVM_OFFSET(lim_image)) +#define NVM_RSV_SIZE (44) +#define MIM_MAX_SIZE(is_asic) ((is_asic) ? ASIC_MIM_MAX_SIZE : \ + FPGA_MIM_MAX_SIZE) +#define MIM_OFFSET(idx, is_asic) (NVM_OFFSET(dir[MAX_MFW_BUNDLES]) + \ + ((idx == \ + NVM_TYPE_MIM2) ? MIM_MAX_SIZE(is_asic) : 0)) +#define NVM_FIXED_AREA_SIZE(is_asic) (sizeof(struct nvm_image) + \ + MIM_MAX_SIZE(is_asic) * 2) + +union nvm_dir_union { + struct nvm_dir dir; + u8 page[FLASH_PAGE_SIZE]; +}; + +/* Address + * +-------------------+ 0x000000 + * | Bootstrap: | + * | magic_number | + * | sram_start_addr | + * | code_len | + * | code_start_addr | + * | crc | + * +-------------------+ 0x000014 + * | rsrv | + * +-------------------+ 0x000040 + * | LIM | + * +-------------------+ 0x002000 + * | Dir1 | + * +-------------------+ 0x003000 + * | Dir2 | + * +-------------------+ 0x004000 + * | MIM1 | + * +-------------------+ 0x130000 + * | MIM2 | + * +-------------------+ 0x25C000 + * | Rest Images: | + * | TIM1/2 | + * | MFW_TRACE1/2 | + * | Eagle/Falcon FW | + * | PCIE/AVS FW | + * | MBA/CCM/L2B | + * | VPD | + * | optic_modules | + * | ... | + * +-------------------+ 0x400000 + */ +struct nvm_image { +/*********** !!! FIXED SECTIONS !!! DO NOT MODIFY !!! **********************/ + /* NVM Offset (size) */ + struct legacy_bootstrap_region bootstrap; + u8 rsrv[NVM_RSV_SIZE]; + u8 lim_image[LIM_MAX_SIZE]; + union nvm_dir_union dir[MAX_MFW_BUNDLES]; + + /* MIM1_IMAGE 0x004000 (0x12c000) */ + /* MIM2_IMAGE 0x130000 (0x12c000) */ +/*********** !!! FIXED SECTIONS !!! DO NOT MODIFY !!! **********************/ +}; /* 0x134 */ + +#define NVM_OFFSET(f) ((u32_t)((int_ptr_t)(&(((struct nvm_image *)0)->f)))) + +struct hw_set_info { + u32 reg_type; +#define GRC_REG_TYPE 1 +#define PHY_REG_TYPE 2 +#define PCI_REG_TYPE 4 + + u32 bank_num; + u32 pf_num; + u32 operation; +#define READ_OP 1 +#define WRITE_OP 2 +#define RMW_SET_OP 3 +#define RMW_CLR_OP 4 + + u32 reg_addr; + u32 reg_data; + + u32 reset_type; +#define POR_RESET_TYPE BIT(0) +#define HARD_RESET_TYPE BIT(1) +#define CORE_RESET_TYPE BIT(2) +#define MCP_RESET_TYPE BIT(3) +#define PERSET_ASSERT BIT(4) +#define PERSET_DEASSERT BIT(5) +}; + +struct hw_set_image { + u32 format_version; +#define HW_SET_IMAGE_VERSION 1 + u32 no_hw_sets; + + /* This array length depends on the no_hw_sets */ + struct hw_set_info hw_sets[1]; +}; + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c new file mode 100644 index 000000000000..ffa99273b353 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -0,0 +1,776 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include <linux/types.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/qed/qed_chain.h> +#include "qed.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_reg_addr.h" + +#define QED_BAR_ACQUIRE_TIMEOUT 1000 + +/* Invalid values */ +#define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1)) + +struct qed_ptt { + struct list_head list_entry; + unsigned int idx; + struct pxp_ptt_entry pxp; +}; + +struct qed_ptt_pool { + struct list_head free_list; + spinlock_t lock; /* ptt synchronized access */ + struct qed_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM]; +}; + +int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool), + GFP_ATOMIC); + int i; + + if (!p_pool) + return -ENOMEM; + + INIT_LIST_HEAD(&p_pool->free_list); + for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) { + p_pool->ptts[i].idx = i; + p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET; + p_pool->ptts[i].pxp.pretend.control = 0; + if (i >= RESERVED_PTT_MAX) + list_add(&p_pool->ptts[i].list_entry, + &p_pool->free_list); + } + + p_hwfn->p_ptt_pool = p_pool; + spin_lock_init(&p_pool->lock); + + return 0; +} + +void qed_ptt_invalidate(struct qed_hwfn *p_hwfn) +{ + struct qed_ptt *p_ptt; + int i; + + for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) { + p_ptt = &p_hwfn->p_ptt_pool->ptts[i]; + p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET; + } +} + +void qed_ptt_pool_free(struct qed_hwfn *p_hwfn) +{ + kfree(p_hwfn->p_ptt_pool); + p_hwfn->p_ptt_pool = NULL; +} + +struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn) +{ + struct qed_ptt *p_ptt; + unsigned int i; + + /* Take the free PTT from the list */ + for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) { + spin_lock_bh(&p_hwfn->p_ptt_pool->lock); + + if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) { + p_ptt = list_first_entry(&p_hwfn->p_ptt_pool->free_list, + struct qed_ptt, list_entry); + list_del(&p_ptt->list_entry); + + spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); + + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "allocated ptt %d\n", p_ptt->idx); + return p_ptt; + } + + spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); + usleep_range(1000, 2000); + } + + DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n"); + return NULL; +} + +void qed_ptt_release(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + spin_lock_bh(&p_hwfn->p_ptt_pool->lock); + list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list); + spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); +} + +u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + /* The HW is using DWORDS and we need to translate it to Bytes */ + return le32_to_cpu(p_ptt->pxp.offset) << 2; +} + +static u32 qed_ptt_config_addr(struct qed_ptt *p_ptt) +{ + return PXP_PF_WINDOW_ADMIN_PER_PF_START + + p_ptt->idx * sizeof(struct pxp_ptt_entry); +} + +u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt) +{ + return PXP_EXTERNAL_BAR_PF_WINDOW_START + + p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE; +} + +void qed_ptt_set_win(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 new_hw_addr) +{ + u32 prev_hw_addr; + + prev_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt); + + if (new_hw_addr == prev_hw_addr) + return; + + /* Update PTT entery in admin window */ + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "Updating PTT entry %d to offset 0x%x\n", + p_ptt->idx, new_hw_addr); + + /* The HW is using DWORDS and the address is in Bytes */ + p_ptt->pxp.offset = cpu_to_le32(new_hw_addr >> 2); + + REG_WR(p_hwfn, + qed_ptt_config_addr(p_ptt) + + offsetof(struct pxp_ptt_entry, offset), + le32_to_cpu(p_ptt->pxp.offset)); +} + +static u32 qed_set_ptt(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 hw_addr) +{ + u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt); + u32 offset; + + offset = hw_addr - win_hw_addr; + + /* Verify the address is within the window */ + if (hw_addr < win_hw_addr || + offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) { + qed_ptt_set_win(p_hwfn, p_ptt, hw_addr); + offset = 0; + } + + return qed_ptt_get_bar_addr(p_ptt) + offset; +} + +struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn, + enum reserved_ptts ptt_idx) +{ + if (ptt_idx >= RESERVED_PTT_MAX) { + DP_NOTICE(p_hwfn, + "Requested PTT %d is out of range\n", ptt_idx); + return NULL; + } + + return &p_hwfn->p_ptt_pool->ptts[ptt_idx]; +} + +void qed_wr(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 hw_addr, u32 val) +{ + u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr); + + REG_WR(p_hwfn, bar_addr, val); + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n", + bar_addr, hw_addr, val); +} + +u32 qed_rd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 hw_addr) +{ + u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr); + u32 val = REG_RD(p_hwfn, bar_addr); + + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n", + bar_addr, hw_addr, val); + + return val; +} + +static void qed_memcpy_hw(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + void *addr, + u32 hw_addr, + size_t n, + bool to_device) +{ + u32 dw_count, *host_addr, hw_offset; + size_t quota, done = 0; + u32 __iomem *reg_addr; + + while (done < n) { + quota = min_t(size_t, n - done, + PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE); + + qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done); + hw_offset = qed_ptt_get_bar_addr(p_ptt); + + dw_count = quota / 4; + host_addr = (u32 *)((u8 *)addr + done); + reg_addr = (u32 __iomem *)REG_ADDR(p_hwfn, hw_offset); + if (to_device) + while (dw_count--) + DIRECT_REG_WR(reg_addr++, *host_addr++); + else + while (dw_count--) + *host_addr++ = DIRECT_REG_RD(reg_addr++); + + done += quota; + } +} + +void qed_memcpy_from(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + void *dest, u32 hw_addr, size_t n) +{ + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n", + hw_addr, dest, hw_addr, (unsigned long)n); + + qed_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false); +} + +void qed_memcpy_to(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 hw_addr, void *src, size_t n) +{ + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n", + hw_addr, hw_addr, src, (unsigned long)n); + + qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true); +} + +void qed_fid_pretend(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 fid) +{ + u16 control = 0; + + SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1); + + /* Every pretend undos previous pretends, including + * previous port pretend. + */ + SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0); + SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); + + if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID)) + fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID); + + p_ptt->pxp.pretend.control = cpu_to_le16(control); + p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid); + + REG_WR(p_hwfn, + qed_ptt_config_addr(p_ptt) + + offsetof(struct pxp_ptt_entry, pretend), + *(u32 *)&p_ptt->pxp.pretend); +} + +void qed_port_pretend(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 port_id) +{ + u16 control = 0; + + SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id); + SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); + + p_ptt->pxp.pretend.control = cpu_to_le16(control); + + REG_WR(p_hwfn, + qed_ptt_config_addr(p_ptt) + + offsetof(struct pxp_ptt_entry, pretend), + *(u32 *)&p_ptt->pxp.pretend); +} + +void qed_port_unpretend(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u16 control = 0; + + SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0); + SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); + + p_ptt->pxp.pretend.control = cpu_to_le16(control); + + REG_WR(p_hwfn, + qed_ptt_config_addr(p_ptt) + + offsetof(struct pxp_ptt_entry, pretend), + *(u32 *)&p_ptt->pxp.pretend); +} + +/* DMAE */ +static void qed_dmae_opcode(struct qed_hwfn *p_hwfn, + const u8 is_src_type_grc, + const u8 is_dst_type_grc, + struct qed_dmae_params *p_params) +{ + u32 opcode = 0; + u16 opcodeB = 0; + + /* Whether the source is the PCIe or the GRC. + * 0- The source is the PCIe + * 1- The source is the GRC. + */ + opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC + : DMAE_CMD_SRC_MASK_PCIE) << + DMAE_CMD_SRC_SHIFT; + opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) << + DMAE_CMD_SRC_PF_ID_SHIFT); + + /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */ + opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC + : DMAE_CMD_DST_MASK_PCIE) << + DMAE_CMD_DST_SHIFT; + opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) << + DMAE_CMD_DST_PF_ID_SHIFT); + + /* Whether to write a completion word to the completion destination: + * 0-Do not write a completion word + * 1-Write the completion word + */ + opcode |= (DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT); + opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK << + DMAE_CMD_SRC_ADDR_RESET_SHIFT); + + if (p_params->flags & QED_DMAE_FLAG_COMPLETION_DST) + opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT); + + opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT); + + opcode |= ((p_hwfn->port_id) << DMAE_CMD_PORT_ID_SHIFT); + + /* reset source address in next go */ + opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK << + DMAE_CMD_SRC_ADDR_RESET_SHIFT); + + /* reset dest address in next go */ + opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK << + DMAE_CMD_DST_ADDR_RESET_SHIFT); + + opcodeB |= (DMAE_CMD_SRC_VF_ID_MASK << + DMAE_CMD_SRC_VF_ID_SHIFT); + + opcodeB |= (DMAE_CMD_DST_VF_ID_MASK << + DMAE_CMD_DST_VF_ID_SHIFT); + + p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode); + p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcodeB); +} + +u32 qed_dmae_idx_to_go_cmd(u8 idx) +{ + /* All the DMAE 'go' registers form an array in internal memory */ + return DMAE_REG_GO_C0 + (idx << 2); +} + +static int +qed_dmae_post_command(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct dmae_cmd *command = p_hwfn->dmae_info.p_dmae_cmd; + u8 idx_cmd = p_hwfn->dmae_info.channel, i; + int qed_status = 0; + + /* verify address is not NULL */ + if ((((command->dst_addr_lo == 0) && (command->dst_addr_hi == 0)) || + ((command->src_addr_lo == 0) && (command->src_addr_hi == 0)))) { + DP_NOTICE(p_hwfn, + "source or destination address 0 idx_cmd=%d\n" + "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n", + idx_cmd, + le32_to_cpu(command->opcode), + le16_to_cpu(command->opcode_b), + le16_to_cpu(command->length), + le32_to_cpu(command->src_addr_hi), + le32_to_cpu(command->src_addr_lo), + le32_to_cpu(command->dst_addr_hi), + le32_to_cpu(command->dst_addr_lo)); + + return -EINVAL; + } + + DP_VERBOSE(p_hwfn, + NETIF_MSG_HW, + "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n", + idx_cmd, + le32_to_cpu(command->opcode), + le16_to_cpu(command->opcode_b), + le16_to_cpu(command->length), + le32_to_cpu(command->src_addr_hi), + le32_to_cpu(command->src_addr_lo), + le32_to_cpu(command->dst_addr_hi), + le32_to_cpu(command->dst_addr_lo)); + + /* Copy the command to DMAE - need to do it before every call + * for source/dest address no reset. + * The first 9 DWs are the command registers, the 10 DW is the + * GO register, and the rest are result registers + * (which are read only by the client). + */ + for (i = 0; i < DMAE_CMD_SIZE; i++) { + u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ? + *(((u32 *)command) + i) : 0; + + qed_wr(p_hwfn, p_ptt, + DMAE_REG_CMD_MEM + + (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) + + (i * sizeof(u32)), data); + } + + qed_wr(p_hwfn, p_ptt, + qed_dmae_idx_to_go_cmd(idx_cmd), + DMAE_GO_VALUE); + + return qed_status; +} + +int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn) +{ + dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr; + struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd; + u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer; + u32 **p_comp = &p_hwfn->dmae_info.p_completion_word; + + *p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(u32), + p_addr, + GFP_KERNEL); + if (!*p_comp) { + DP_NOTICE(p_hwfn, "Failed to allocate `p_completion_word'\n"); + goto err; + } + + p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr; + *p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(struct dmae_cmd), + p_addr, GFP_KERNEL); + if (!*p_cmd) { + DP_NOTICE(p_hwfn, "Failed to allocate `struct dmae_cmd'\n"); + goto err; + } + + p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr; + *p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(u32) * DMAE_MAX_RW_SIZE, + p_addr, GFP_KERNEL); + if (!*p_buff) { + DP_NOTICE(p_hwfn, "Failed to allocate `intermediate_buffer'\n"); + goto err; + } + + p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id; + + return 0; +err: + qed_dmae_info_free(p_hwfn); + return -ENOMEM; +} + +void qed_dmae_info_free(struct qed_hwfn *p_hwfn) +{ + dma_addr_t p_phys; + + /* Just make sure no one is in the middle */ + mutex_lock(&p_hwfn->dmae_info.mutex); + + if (p_hwfn->dmae_info.p_completion_word) { + p_phys = p_hwfn->dmae_info.completion_word_phys_addr; + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(u32), + p_hwfn->dmae_info.p_completion_word, + p_phys); + p_hwfn->dmae_info.p_completion_word = NULL; + } + + if (p_hwfn->dmae_info.p_dmae_cmd) { + p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr; + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(struct dmae_cmd), + p_hwfn->dmae_info.p_dmae_cmd, + p_phys); + p_hwfn->dmae_info.p_dmae_cmd = NULL; + } + + if (p_hwfn->dmae_info.p_intermediate_buffer) { + p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr; + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(u32) * DMAE_MAX_RW_SIZE, + p_hwfn->dmae_info.p_intermediate_buffer, + p_phys); + p_hwfn->dmae_info.p_intermediate_buffer = NULL; + } + + mutex_unlock(&p_hwfn->dmae_info.mutex); +} + +static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn) +{ + u32 wait_cnt = 0; + u32 wait_cnt_limit = 10000; + + int qed_status = 0; + + barrier(); + while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) { + udelay(DMAE_MIN_WAIT_TIME); + if (++wait_cnt > wait_cnt_limit) { + DP_NOTICE(p_hwfn->cdev, + "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n", + *p_hwfn->dmae_info.p_completion_word, + DMAE_COMPLETION_VAL); + qed_status = -EBUSY; + break; + } + + /* to sync the completion_word since we are not + * using the volatile keyword for p_completion_word + */ + barrier(); + } + + if (qed_status == 0) + *p_hwfn->dmae_info.p_completion_word = 0; + + return qed_status; +} + +static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u64 src_addr, + u64 dst_addr, + u8 src_type, + u8 dst_type, + u32 length) +{ + dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr; + struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd; + int qed_status = 0; + + switch (src_type) { + case QED_DMAE_ADDRESS_GRC: + case QED_DMAE_ADDRESS_HOST_PHYS: + cmd->src_addr_hi = cpu_to_le32(upper_32_bits(src_addr)); + cmd->src_addr_lo = cpu_to_le32(lower_32_bits(src_addr)); + break; + /* for virtual source addresses we use the intermediate buffer. */ + case QED_DMAE_ADDRESS_HOST_VIRT: + cmd->src_addr_hi = cpu_to_le32(upper_32_bits(phys)); + cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys)); + memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0], + (void *)(uintptr_t)src_addr, + length * sizeof(u32)); + break; + default: + return -EINVAL; + } + + switch (dst_type) { + case QED_DMAE_ADDRESS_GRC: + case QED_DMAE_ADDRESS_HOST_PHYS: + cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(dst_addr)); + cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(dst_addr)); + break; + /* for virtual source addresses we use the intermediate buffer. */ + case QED_DMAE_ADDRESS_HOST_VIRT: + cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(phys)); + cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(phys)); + break; + default: + return -EINVAL; + } + + cmd->length = cpu_to_le16((u16)length); + + qed_dmae_post_command(p_hwfn, p_ptt); + + qed_status = qed_dmae_operation_wait(p_hwfn); + + if (qed_status) { + DP_NOTICE(p_hwfn, + "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n", + src_addr, + dst_addr, + length); + return qed_status; + } + + if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT) + memcpy((void *)(uintptr_t)(dst_addr), + &p_hwfn->dmae_info.p_intermediate_buffer[0], + length * sizeof(u32)); + + return 0; +} + +static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u64 src_addr, u64 dst_addr, + u8 src_type, u8 dst_type, + u32 size_in_dwords, + struct qed_dmae_params *p_params) +{ + dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr; + u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0; + struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd; + u64 src_addr_split = 0, dst_addr_split = 0; + u16 length_limit = DMAE_MAX_RW_SIZE; + int qed_status = 0; + u32 offset = 0; + + qed_dmae_opcode(p_hwfn, + (src_type == QED_DMAE_ADDRESS_GRC), + (dst_type == QED_DMAE_ADDRESS_GRC), + p_params); + + cmd->comp_addr_lo = cpu_to_le32(lower_32_bits(phys)); + cmd->comp_addr_hi = cpu_to_le32(upper_32_bits(phys)); + cmd->comp_val = cpu_to_le32(DMAE_COMPLETION_VAL); + + /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */ + cnt_split = size_in_dwords / length_limit; + length_mod = size_in_dwords % length_limit; + + src_addr_split = src_addr; + dst_addr_split = dst_addr; + + for (i = 0; i <= cnt_split; i++) { + offset = length_limit * i; + + if (!(p_params->flags & QED_DMAE_FLAG_RW_REPL_SRC)) { + if (src_type == QED_DMAE_ADDRESS_GRC) + src_addr_split = src_addr + offset; + else + src_addr_split = src_addr + (offset * 4); + } + + if (dst_type == QED_DMAE_ADDRESS_GRC) + dst_addr_split = dst_addr + offset; + else + dst_addr_split = dst_addr + (offset * 4); + + length_cur = (cnt_split == i) ? length_mod : length_limit; + + /* might be zero on last iteration */ + if (!length_cur) + continue; + + qed_status = qed_dmae_execute_sub_operation(p_hwfn, + p_ptt, + src_addr_split, + dst_addr_split, + src_type, + dst_type, + length_cur); + if (qed_status) { + DP_NOTICE(p_hwfn, + "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n", + qed_status, + src_addr, + dst_addr, + length_cur); + break; + } + } + + return qed_status; +} + +int qed_dmae_host2grc(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u64 source_addr, + u32 grc_addr, + u32 size_in_dwords, + u32 flags) +{ + u32 grc_addr_in_dw = grc_addr / sizeof(u32); + struct qed_dmae_params params; + int rc; + + memset(¶ms, 0, sizeof(struct qed_dmae_params)); + params.flags = flags; + + mutex_lock(&p_hwfn->dmae_info.mutex); + + rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr, + grc_addr_in_dw, + QED_DMAE_ADDRESS_HOST_VIRT, + QED_DMAE_ADDRESS_GRC, + size_in_dwords, ¶ms); + + mutex_unlock(&p_hwfn->dmae_info.mutex); + + return rc; +} + +u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, + enum protocol_type proto, + union qed_qm_pq_params *p_params) +{ + u16 pq_id = 0; + + if ((proto == PROTOCOLID_CORE || proto == PROTOCOLID_ETH) && + !p_params) { + DP_NOTICE(p_hwfn, + "Protocol %d received NULL PQ params\n", + proto); + return 0; + } + + switch (proto) { + case PROTOCOLID_CORE: + if (p_params->core.tc == LB_TC) + pq_id = p_hwfn->qm_info.pure_lb_pq; + else + pq_id = p_hwfn->qm_info.offload_pq; + break; + case PROTOCOLID_ETH: + pq_id = p_params->eth.tc; + break; + default: + pq_id = 0; + } + + pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, QED_PQ); + + return pq_id; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h new file mode 100644 index 000000000000..e56d433793be --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -0,0 +1,263 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_HW_H +#define _QED_HW_H + +#include <linux/types.h> +#include <linux/bitops.h> +#include <linux/slab.h> +#include <linux/string.h> +#include "qed.h" +#include "qed_dev_api.h" + +/* Forward decleration */ +struct qed_ptt; + +enum reserved_ptts { + RESERVED_PTT_EDIAG, + RESERVED_PTT_USER_SPACE, + RESERVED_PTT_MAIN, + RESERVED_PTT_DPC, + RESERVED_PTT_MAX +}; + +enum _dmae_cmd_dst_mask { + DMAE_CMD_DST_MASK_NONE = 0, + DMAE_CMD_DST_MASK_PCIE = 1, + DMAE_CMD_DST_MASK_GRC = 2 +}; + +enum _dmae_cmd_src_mask { + DMAE_CMD_SRC_MASK_PCIE = 0, + DMAE_CMD_SRC_MASK_GRC = 1 +}; + +enum _dmae_cmd_crc_mask { + DMAE_CMD_COMP_CRC_EN_MASK_NONE = 0, + DMAE_CMD_COMP_CRC_EN_MASK_SET = 1 +}; + +/* definitions for DMA constants */ +#define DMAE_GO_VALUE 0x1 + +#define DMAE_COMPLETION_VAL 0xD1AE +#define DMAE_CMD_ENDIANITY 0x2 + +#define DMAE_CMD_SIZE 14 +#define DMAE_CMD_SIZE_TO_FILL (DMAE_CMD_SIZE - 5) +#define DMAE_MIN_WAIT_TIME 0x2 +#define DMAE_MAX_CLIENTS 32 + +/** + * @brief qed_gtt_init - Initialize GTT windows + * + * @param p_hwfn + */ +void qed_gtt_init(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_ptt_invalidate - Forces all ptt entries to be re-configured + * + * @param p_hwfn + */ +void qed_ptt_invalidate(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_ptt_pool_alloc - Allocate and initialize PTT pool + * + * @param p_hwfn + * + * @return struct _qed_status - success (0), negative - error. + */ +int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_ptt_pool_free - + * + * @param p_hwfn + */ +void qed_ptt_pool_free(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_ptt_get_hw_addr - Get PTT's GRC/HW address + * + * @param p_hwfn + * @param p_ptt + * + * @return u32 + */ +u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * @brief qed_ptt_get_bar_addr - Get PPT's external BAR address + * + * @param p_hwfn + * @param p_ptt + * + * @return u32 + */ +u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt); + +/** + * @brief qed_ptt_set_win - Set PTT Window's GRC BAR address + * + * @param p_hwfn + * @param new_hw_addr + * @param p_ptt + */ +void qed_ptt_set_win(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 new_hw_addr); + +/** + * @brief qed_get_reserved_ptt - Get a specific reserved PTT + * + * @param p_hwfn + * @param ptt_idx + * + * @return struct qed_ptt * + */ +struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn, + enum reserved_ptts ptt_idx); + +/** + * @brief qed_wr - Write value to BAR using the given ptt + * + * @param p_hwfn + * @param p_ptt + * @param val + * @param hw_addr + */ +void qed_wr(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 hw_addr, + u32 val); + +/** + * @brief qed_rd - Read value from BAR using the given ptt + * + * @param p_hwfn + * @param p_ptt + * @param val + * @param hw_addr + */ +u32 qed_rd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 hw_addr); + +/** + * @brief qed_memcpy_from - copy n bytes from BAR using the given + * ptt + * + * @param p_hwfn + * @param p_ptt + * @param dest + * @param hw_addr + * @param n + */ +void qed_memcpy_from(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + void *dest, + u32 hw_addr, + size_t n); + +/** + * @brief qed_memcpy_to - copy n bytes to BAR using the given + * ptt + * + * @param p_hwfn + * @param p_ptt + * @param hw_addr + * @param src + * @param n + */ +void qed_memcpy_to(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 hw_addr, + void *src, + size_t n); +/** + * @brief qed_fid_pretend - pretend to another function when + * accessing the ptt window. There is no way to unpretend + * a function. The only way to cancel a pretend is to + * pretend back to the original function. + * + * @param p_hwfn + * @param p_ptt + * @param fid - fid field of pxp_pretend structure. Can contain + * either pf / vf, port/path fields are don't care. + */ +void qed_fid_pretend(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 fid); + +/** + * @brief qed_port_pretend - pretend to another port when + * accessing the ptt window + * + * @param p_hwfn + * @param p_ptt + * @param port_id - the port to pretend to + */ +void qed_port_pretend(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 port_id); + +/** + * @brief qed_port_unpretend - cancel any previously set port + * pretend + * + * @param p_hwfn + * @param p_ptt + */ +void qed_port_unpretend(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd + * this is declared here since other files will require it. + * @param idx + */ +u32 qed_dmae_idx_to_go_cmd(u8 idx); + +/** + * @brief qed_dmae_info_alloc - Init the dmae_info structure + * which is part of p_hwfn. + * @param p_hwfn + */ +int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_dmae_info_free - Free the dmae_info structure + * which is part of p_hwfn + * + * @param p_hwfn + */ +void qed_dmae_info_free(struct qed_hwfn *p_hwfn); + +union qed_qm_pq_params { + struct { + u8 tc; + } core; + + struct { + u8 is_vf; + u8 vf_id; + u8 tc; + } eth; +}; + +u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, + enum protocol_type proto, + union qed_qm_pq_params *params); + +int qed_init_fw_data(struct qed_dev *cdev, + const u8 *fw_data); +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c new file mode 100644 index 000000000000..0b21a553cc7d --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -0,0 +1,798 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include <linux/types.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/string.h> +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_init_ops.h" +#include "qed_reg_addr.h" + +enum cminterface { + MCM_SEC, + MCM_PRI, + UCM_SEC, + UCM_PRI, + TCM_SEC, + TCM_PRI, + YCM_SEC, + YCM_PRI, + XCM_SEC, + XCM_PRI, + NUM_OF_CM_INTERFACES +}; + +/* general constants */ +#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */ +#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \ + QM_PQ_ELEMENT_SIZE, \ + 0x1000) : 0) +#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \ + 0x100) - 1 : 0) +#define QM_INVALID_PQ_ID 0xffff +/* feature enable */ +#define QM_BYPASS_EN 1 +#define QM_BYTE_CRD_EN 1 +/* other PQ constants */ +#define QM_OTHER_PQS_PER_PF 4 +/* WFQ constants */ +#define QM_WFQ_UPPER_BOUND 6250000 +#define QM_WFQ_VP_PQ_VOQ_SHIFT 0 +#define QM_WFQ_VP_PQ_PF_SHIFT 5 +#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000) +#define QM_WFQ_MAX_INC_VAL 4375000 +#define QM_WFQ_INIT_CRD(inc_val) (2 * (inc_val)) +/* RL constants */ +#define QM_RL_UPPER_BOUND 6250000 +#define QM_RL_PERIOD 5 /* in us */ +#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD) +#define QM_RL_INC_VAL(rate) max_t(u32, \ + (((rate ? rate : 1000000) \ + * QM_RL_PERIOD) / 8), 1) +#define QM_RL_MAX_INC_VAL 4375000 +/* AFullOprtnstcCrdMask constants */ +#define QM_OPPOR_LINE_VOQ_DEF 1 +#define QM_OPPOR_FW_STOP_DEF 0 +#define QM_OPPOR_PQ_EMPTY_DEF 1 +#define EAGLE_WORKAROUND_TC 7 +/* Command Queue constants */ +#define PBF_CMDQ_PURE_LB_LINES 150 +#define PBF_CMDQ_EAGLE_WORKAROUND_LINES 8 +#define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \ + PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \ + (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \ + PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET)) +#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) ( \ + PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \ + (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \ + PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET)) +#define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - \ + 4) * \ + 2) | QM_LINE_CRD_REG_SIGN_BIT) +/* BTB: blocks constants (block size = 256B) */ +#define BTB_JUMBO_PKT_BLOCKS 38 +#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS +#define BTB_EAGLE_WORKAROUND_BLOCKS 4 +#define BTB_PURE_LB_FACTOR 10 +#define BTB_PURE_LB_RATIO 7 +/* QM stop command constants */ +#define QM_STOP_PQ_MASK_WIDTH 32 +#define QM_STOP_CMD_ADDR 0x2 +#define QM_STOP_CMD_STRUCT_SIZE 2 +#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0 +#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0 +#define QM_STOP_CMD_PAUSE_MASK_MASK -1 +#define QM_STOP_CMD_GROUP_ID_OFFSET 1 +#define QM_STOP_CMD_GROUP_ID_SHIFT 16 +#define QM_STOP_CMD_GROUP_ID_MASK 15 +#define QM_STOP_CMD_PQ_TYPE_OFFSET 1 +#define QM_STOP_CMD_PQ_TYPE_SHIFT 24 +#define QM_STOP_CMD_PQ_TYPE_MASK 1 +#define QM_STOP_CMD_MAX_POLL_COUNT 100 +#define QM_STOP_CMD_POLL_PERIOD_US 500 +/* QM command macros */ +#define QM_CMD_STRUCT_SIZE(cmd) cmd ## \ + _STRUCT_SIZE +#define QM_CMD_SET_FIELD(var, cmd, field, \ + value) SET_FIELD(var[cmd ## _ ## field ## \ + _OFFSET], \ + cmd ## _ ## field, \ + value) +/* QM: VOQ macros */ +#define PHYS_VOQ(port, tc, max_phy_tcs_pr_port) ((port) * \ + (max_phy_tcs_pr_port) \ + + (tc)) +#define LB_VOQ(port) ( \ + MAX_PHYS_VOQS + (port)) +#define VOQ(port, tc, max_phy_tcs_pr_port) \ + ((tc) < \ + LB_TC ? PHYS_VOQ(port, \ + tc, \ + max_phy_tcs_pr_port) \ + : LB_VOQ(port)) +/******************** INTERNAL IMPLEMENTATION *********************/ +/* Prepare PF RL enable/disable runtime init values */ +static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, + bool pf_rl_en) +{ + STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0); + if (pf_rl_en) { + /* enable RLs for all VOQs */ + STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET, + (1 << MAX_NUM_VOQS) - 1); + /* write RL period */ + STORE_RT_REG(p_hwfn, + QM_REG_RLPFPERIOD_RT_OFFSET, + QM_RL_PERIOD_CLK_25M); + STORE_RT_REG(p_hwfn, + QM_REG_RLPFPERIODTIMER_RT_OFFSET, + QM_RL_PERIOD_CLK_25M); + /* set credit threshold for QM bypass flow */ + if (QM_BYPASS_EN) + STORE_RT_REG(p_hwfn, + QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET, + QM_RL_UPPER_BOUND); + } +} + +/* Prepare PF WFQ enable/disable runtime init values */ +static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, + bool pf_wfq_en) +{ + STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0); + /* set credit threshold for QM bypass flow */ + if (pf_wfq_en && QM_BYPASS_EN) + STORE_RT_REG(p_hwfn, + QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET, + QM_WFQ_UPPER_BOUND); +} + +/* Prepare VPORT RL enable/disable runtime init values */ +static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, + bool vport_rl_en) +{ + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET, + vport_rl_en ? 1 : 0); + if (vport_rl_en) { + /* write RL period (use timer 0 only) */ + STORE_RT_REG(p_hwfn, + QM_REG_RLGLBLPERIOD_0_RT_OFFSET, + QM_RL_PERIOD_CLK_25M); + STORE_RT_REG(p_hwfn, + QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET, + QM_RL_PERIOD_CLK_25M); + /* set credit threshold for QM bypass flow */ + if (QM_BYPASS_EN) + STORE_RT_REG(p_hwfn, + QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET, + QM_RL_UPPER_BOUND); + } +} + +/* Prepare VPORT WFQ enable/disable runtime init values */ +static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, + bool vport_wfq_en) +{ + STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET, + vport_wfq_en ? 1 : 0); + /* set credit threshold for QM bypass flow */ + if (vport_wfq_en && QM_BYPASS_EN) + STORE_RT_REG(p_hwfn, + QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET, + QM_WFQ_UPPER_BOUND); +} + +/* Prepare runtime init values to allocate PBF command queue lines for + * the specified VOQ + */ +static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn, + u8 voq, + u16 cmdq_lines) +{ + u32 qm_line_crd; + + /* In A0 - Limit the size of pbf queue so that only 511 commands with + * the minimum size of 4 (FCoE minimum size) + */ + bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev); + + if (is_bb_a0) + cmdq_lines = min_t(u32, cmdq_lines, 1022); + qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines); + OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), + (u32)cmdq_lines); + STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd); + STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq, + qm_line_crd); +} + +/* Prepare runtime init values to allocate PBF command queue lines. */ +static void qed_cmdq_lines_rt_init( + struct qed_hwfn *p_hwfn, + u8 max_ports_per_engine, + u8 max_phys_tcs_per_port, + struct init_qm_port_params port_params[MAX_NUM_PORTS]) +{ + u8 tc, voq, port_id; + + /* clear PBF lines for all VOQs */ + for (voq = 0; voq < MAX_NUM_VOQS; voq++) + STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0); + for (port_id = 0; port_id < max_ports_per_engine; port_id++) { + if (port_params[port_id].active) { + u16 phys_lines, phys_lines_per_tc; + u8 phys_tcs = port_params[port_id].num_active_phys_tcs; + + /* find #lines to divide between the active + * physical TCs. + */ + phys_lines = port_params[port_id].num_pbf_cmd_lines - + PBF_CMDQ_PURE_LB_LINES; + /* find #lines per active physical TC */ + phys_lines_per_tc = phys_lines / phys_tcs; + /* init registers per active TC */ + for (tc = 0; tc < phys_tcs; tc++) { + voq = PHYS_VOQ(port_id, tc, + max_phys_tcs_per_port); + qed_cmdq_lines_voq_rt_init(p_hwfn, voq, + phys_lines_per_tc); + } + /* init registers for pure LB TC */ + qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id), + PBF_CMDQ_PURE_LB_LINES); + } + } +} + +static void qed_btb_blocks_rt_init( + struct qed_hwfn *p_hwfn, + u8 max_ports_per_engine, + u8 max_phys_tcs_per_port, + struct init_qm_port_params port_params[MAX_NUM_PORTS]) +{ + u32 usable_blocks, pure_lb_blocks, phys_blocks; + u8 tc, voq, port_id; + + for (port_id = 0; port_id < max_ports_per_engine; port_id++) { + u32 temp; + u8 phys_tcs; + + if (!port_params[port_id].active) + continue; + + phys_tcs = port_params[port_id].num_active_phys_tcs; + + /* subtract headroom blocks */ + usable_blocks = port_params[port_id].num_btb_blocks - + BTB_HEADROOM_BLOCKS; + + /* find blocks per physical TC. use factor to avoid + * floating arithmethic. + */ + pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) / + (phys_tcs * BTB_PURE_LB_FACTOR + + BTB_PURE_LB_RATIO); + pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS, + pure_lb_blocks / BTB_PURE_LB_FACTOR); + phys_blocks = (usable_blocks - pure_lb_blocks) / phys_tcs; + + /* init physical TCs */ + for (tc = 0; tc < phys_tcs; tc++) { + voq = PHYS_VOQ(port_id, tc, max_phys_tcs_per_port); + STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq), + phys_blocks); + } + + /* init pure LB TC */ + temp = LB_VOQ(port_id); + STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp), + pure_lb_blocks); + } +} + +/* Prepare Tx PQ mapping runtime init values for the specified PF */ +static void qed_tx_pq_map_rt_init( + struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_qm_pf_rt_init_params *p_params, + u32 base_mem_addr_4kb) +{ + struct init_qm_vport_params *vport_params = p_params->vport_params; + u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs; + u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE; + u16 last_pq_group = (p_params->start_pq + num_pqs - 1) / + QM_PF_QUEUE_GROUP_SIZE; + bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev); + u16 i, pq_id, pq_group; + + /* a bit per Tx PQ indicating if the PQ is associated with a VF */ + u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 }; + u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE; + u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width; + u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids); + u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids); + u32 mem_addr_4kb = base_mem_addr_4kb; + + /* set mapping from PQ group to PF */ + for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++) + STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group, + (u32)(p_params->pf_id)); + /* set PQ sizes */ + STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET, + QM_PQ_SIZE_256B(p_params->num_pf_cids)); + STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET, + QM_PQ_SIZE_256B(p_params->num_vf_cids)); + + /* go over all Tx PQs */ + for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) { + u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id, + p_params->max_phys_tcs_per_port); + bool is_vf_pq = (i >= p_params->num_pf_pqs); + struct qm_rf_pq_map tx_pq_map; + + /* update first Tx PQ of VPORT/TC */ + u8 vport_id_in_pf = p_params->pq_params[i].vport_id - + p_params->start_vport; + u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0]; + u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id]; + + if (first_tx_pq_id == QM_INVALID_PQ_ID) { + /* create new VP PQ */ + pq_ids[p_params->pq_params[i].tc_id] = pq_id; + first_tx_pq_id = pq_id; + /* map VP PQ to VOQ and PF */ + STORE_RT_REG(p_hwfn, + QM_REG_WFQVPMAP_RT_OFFSET + + first_tx_pq_id, + (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | + (p_params->pf_id << + QM_WFQ_VP_PQ_PF_SHIFT)); + } + /* fill PQ map entry */ + memset(&tx_pq_map, 0, sizeof(tx_pq_map)); + SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1); + SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID, + is_vf_pq ? 1 : 0); + SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id); + SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID, + is_vf_pq ? p_params->pq_params[i].vport_id : 0); + SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq); + SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, + p_params->pq_params[i].wrr_group); + /* write PQ map entry to CAM */ + STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, + *((u32 *)&tx_pq_map)); + /* set base address */ + STORE_RT_REG(p_hwfn, + QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id, + mem_addr_4kb); + /* check if VF PQ */ + if (is_vf_pq) { + /* if PQ is associated with a VF, add indication + * to PQ VF mask + */ + tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |= + (1 << (pq_id % tx_pq_vf_mask_width)); + mem_addr_4kb += vport_pq_mem_4kb; + } else { + mem_addr_4kb += pq_mem_4kb; + } + } + + /* store Tx PQ VF mask to size select register */ + for (i = 0; i < num_tx_pq_vf_masks; i++) { + if (tx_pq_vf_mask[i]) { + if (is_bb_a0) { + u32 curr_mask = 0, addr; + + addr = QM_REG_MAXPQSIZETXSEL_0 + (i * 4); + if (!p_params->is_first_pf) + curr_mask = qed_rd(p_hwfn, p_ptt, + addr); + + addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i; + + STORE_RT_REG(p_hwfn, addr, + curr_mask | tx_pq_vf_mask[i]); + } else { + u32 addr; + + addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i; + STORE_RT_REG(p_hwfn, addr, + tx_pq_vf_mask[i]); + } + } + } +} + +/* Prepare Other PQ mapping runtime init values for the specified PF */ +static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn, + u8 port_id, + u8 pf_id, + u32 num_pf_cids, + u32 num_tids, + u32 base_mem_addr_4kb) +{ + u16 i, pq_id; + + /* a single other PQ group is used in each PF, + * where PQ group i is used in PF i. + */ + u16 pq_group = pf_id; + u32 pq_size = num_pf_cids + num_tids; + u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size); + u32 mem_addr_4kb = base_mem_addr_4kb; + + /* map PQ group to PF */ + STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group, + (u32)(pf_id)); + /* set PQ sizes */ + STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET, + QM_PQ_SIZE_256B(pq_size)); + /* set base address */ + for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE; + i < QM_OTHER_PQS_PER_PF; i++, pq_id++) { + STORE_RT_REG(p_hwfn, + QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id, + mem_addr_4kb); + mem_addr_4kb += pq_mem_4kb; + } +} + +/* Prepare PF WFQ runtime init values for the specified PF. + * Return -1 on error. + */ +static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, + struct qed_qm_pf_rt_init_params *p_params) +{ + u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs; + u32 crd_reg_offset; + u32 inc_val; + u16 i; + + if (p_params->pf_id < MAX_NUM_PFS_BB) + crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET; + else + crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET + + (p_params->pf_id % MAX_NUM_PFS_BB); + + inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq); + if (inc_val > QM_WFQ_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration"); + return -1; + } + STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id, + inc_val); + STORE_RT_REG(p_hwfn, + QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id, + QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT); + + for (i = 0; i < num_tx_pqs; i++) { + u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id, + p_params->max_phys_tcs_per_port); + + OVERWRITE_RT_REG(p_hwfn, + crd_reg_offset + voq * MAX_NUM_PFS_BB, + QM_WFQ_INIT_CRD(inc_val) | + QM_WFQ_CRD_REG_SIGN_BIT); + } + + return 0; +} + +/* Prepare PF RL runtime init values for the specified PF. + * Return -1 on error. + */ +static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, + u8 pf_id, + u32 pf_rl) +{ + u32 inc_val = QM_RL_INC_VAL(pf_rl); + + if (inc_val > QM_RL_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration"); + return -1; + } + STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id, + QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id, + QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val); + return 0; +} + +/* Prepare VPORT WFQ runtime init values for the specified VPORTs. + * Return -1 on error. + */ +static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, + u8 start_vport, + u8 num_vports, + struct init_qm_vport_params *vport_params) +{ + u8 tc, i, vport_id; + u32 inc_val; + + /* go over all PF VPORTs */ + for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) { + u32 temp = QM_REG_WFQVPUPPERBOUND_RT_OFFSET; + u16 *pq_ids = &vport_params[i].first_tx_pq_id[0]; + + if (!vport_params[i].vport_wfq) + continue; + + inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq); + if (inc_val > QM_WFQ_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, + "Invalid VPORT WFQ weight configuration"); + return -1; + } + + /* each VPORT can have several VPORT PQ IDs for + * different TCs + */ + for (tc = 0; tc < NUM_OF_TCS; tc++) { + u16 vport_pq_id = pq_ids[tc]; + + if (vport_pq_id != QM_INVALID_PQ_ID) { + STORE_RT_REG(p_hwfn, + QM_REG_WFQVPWEIGHT_RT_OFFSET + + vport_pq_id, inc_val); + STORE_RT_REG(p_hwfn, temp + vport_pq_id, + QM_WFQ_UPPER_BOUND | + QM_WFQ_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, + QM_REG_WFQVPCRD_RT_OFFSET + + vport_pq_id, + QM_WFQ_INIT_CRD(inc_val) | + QM_WFQ_CRD_REG_SIGN_BIT); + } + } + } + + return 0; +} + +static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn, + u8 start_vport, + u8 num_vports, + struct init_qm_vport_params *vport_params) +{ + u8 i, vport_id; + + /* go over all PF VPORTs */ + for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) { + u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl); + + if (inc_val > QM_RL_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, + "Invalid VPORT rate-limit configuration"); + return -1; + } + + STORE_RT_REG(p_hwfn, + QM_REG_RLGLBLCRD_RT_OFFSET + vport_id, + QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, + QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id, + QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, + QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id, + inc_val); + } + + return 0; +} + +static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 reg_val, i; + + for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0; + i++) { + udelay(QM_STOP_CMD_POLL_PERIOD_US); + reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY); + } + + /* check if timeout while waiting for SDM command ready */ + if (i == QM_STOP_CMD_MAX_POLL_COUNT) { + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "Timeout when waiting for QM SDM command ready signal\n"); + return false; + } + + return true; +} + +static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 cmd_addr, + u32 cmd_data_lsb, + u32 cmd_data_msb) +{ + if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt)) + return false; + + qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr); + qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb); + qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb); + qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1); + qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0); + + return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt); +} + +/******************** INTERFACE IMPLEMENTATION *********************/ +u32 qed_qm_pf_mem_size(u8 pf_id, + u32 num_pf_cids, + u32 num_vf_cids, + u32 num_tids, + u16 num_pf_pqs, + u16 num_vf_pqs) +{ + return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs + + QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs + + QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF; +} + +int qed_qm_common_rt_init( + struct qed_hwfn *p_hwfn, + struct qed_qm_common_rt_init_params *p_params) +{ + /* init AFullOprtnstcCrdMask */ + u32 mask = (QM_OPPOR_LINE_VOQ_DEF << + QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) | + (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) | + (p_params->pf_wfq_en << + QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) | + (p_params->vport_wfq_en << + QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) | + (p_params->pf_rl_en << + QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) | + (p_params->vport_rl_en << + QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) | + (QM_OPPOR_FW_STOP_DEF << + QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) | + (QM_OPPOR_PQ_EMPTY_DEF << + QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT); + + STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask); + qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en); + qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en); + qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en); + qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en); + qed_cmdq_lines_rt_init(p_hwfn, + p_params->max_ports_per_engine, + p_params->max_phys_tcs_per_port, + p_params->port_params); + qed_btb_blocks_rt_init(p_hwfn, + p_params->max_ports_per_engine, + p_params->max_phys_tcs_per_port, + p_params->port_params); + return 0; +} + +int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_qm_pf_rt_init_params *p_params) +{ + struct init_qm_vport_params *vport_params = p_params->vport_params; + u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids + + p_params->num_tids) * + QM_OTHER_PQS_PER_PF; + u8 tc, i; + + /* clear first Tx PQ ID array for each VPORT */ + for (i = 0; i < p_params->num_vports; i++) + for (tc = 0; tc < NUM_OF_TCS; tc++) + vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID; + + /* map Other PQs (if any) */ + qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id, + p_params->num_pf_cids, p_params->num_tids, 0); + + /* map Tx PQs */ + qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb); + + if (p_params->pf_wfq) + if (qed_pf_wfq_rt_init(p_hwfn, p_params)) + return -1; + + if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl)) + return -1; + + if (qed_vp_wfq_rt_init(p_hwfn, p_params->start_vport, + p_params->num_vports, vport_params)) + return -1; + + if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport, + p_params->num_vports, vport_params)) + return -1; + + return 0; +} + +int qed_init_pf_rl(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 pf_id, + u32 pf_rl) +{ + u32 inc_val = QM_RL_INC_VAL(pf_rl); + + if (inc_val > QM_RL_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration"); + return -1; + } + + qed_wr(p_hwfn, p_ptt, + QM_REG_RLPFCRD + pf_id * 4, + QM_RL_CRD_REG_SIGN_BIT); + qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val); + + return 0; +} + +int qed_init_vport_rl(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 vport_id, + u32 vport_rl) +{ + u32 inc_val = QM_RL_INC_VAL(vport_rl); + + if (inc_val > QM_RL_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration"); + return -1; + } + + qed_wr(p_hwfn, p_ptt, + QM_REG_RLGLBLCRD + vport_id * 4, + QM_RL_CRD_REG_SIGN_BIT); + qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val); + + return 0; +} + +bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool is_release_cmd, + bool is_tx_pq, + u16 start_pq, + u16 num_pqs) +{ + u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 }; + u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id; + + /* set command's PQ type */ + QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1); + + for (pq_id = start_pq; pq_id <= last_pq; pq_id++) { + /* set PQ bit in mask (stop command only) */ + if (!is_release_cmd) + pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH)); + + /* if last PQ or end of PQ mask, write command */ + if ((pq_id == last_pq) || + (pq_id % QM_STOP_PQ_MASK_WIDTH == + (QM_STOP_PQ_MASK_WIDTH - 1))) { + QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, + PAUSE_MASK, pq_mask); + QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, + GROUP_ID, + pq_id / QM_STOP_PQ_MASK_WIDTH); + if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR, + cmd_arr[0], cmd_arr[1])) + return false; + pq_mask = 0; + } + } + + return true; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c new file mode 100644 index 000000000000..796f1390e598 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -0,0 +1,531 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include <linux/types.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/string.h> +#include "qed.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_init_ops.h" +#include "qed_reg_addr.h" + +#define QED_INIT_MAX_POLL_COUNT 100 +#define QED_INIT_POLL_PERIOD_US 500 + +static u32 pxp_global_win[] = { + 0, + 0, + 0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */ + 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */ + 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */ + 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */ + 0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */ + 0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */ + 0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */ + 0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */ + 0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */ + 0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */ + 0, + 0, + 0, + 0, + 0, + 0, + 0, +}; + +void qed_init_iro_array(struct qed_dev *cdev) +{ + cdev->iro_arr = iro_arr; +} + +/* Runtime configuration helpers */ +void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn) +{ + int i; + + for (i = 0; i < RUNTIME_ARRAY_SIZE; i++) + p_hwfn->rt_data[i].b_valid = false; +} + +void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, + u32 rt_offset, + u32 val) +{ + p_hwfn->rt_data[rt_offset].init_val = val; + p_hwfn->rt_data[rt_offset].b_valid = true; +} + +void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn, + u32 rt_offset, + u32 *val, + size_t size) +{ + size_t i; + + for (i = 0; i < size / sizeof(u32); i++) { + p_hwfn->rt_data[rt_offset + i].init_val = val[i]; + p_hwfn->rt_data[rt_offset + i].b_valid = true; + } +} + +static void qed_init_rt(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 addr, + u32 rt_offset, + u32 size) +{ + struct qed_rt_data *rt_data = p_hwfn->rt_data + rt_offset; + u32 i; + + for (i = 0; i < size; i++) { + if (!rt_data[i].b_valid) + continue; + qed_wr(p_hwfn, p_ptt, addr + (i << 2), rt_data[i].init_val); + } +} + +int qed_init_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_rt_data *rt_data; + + rt_data = kzalloc(sizeof(*rt_data) * RUNTIME_ARRAY_SIZE, GFP_ATOMIC); + if (!rt_data) + return -ENOMEM; + + p_hwfn->rt_data = rt_data; + + return 0; +} + +void qed_init_free(struct qed_hwfn *p_hwfn) +{ + kfree(p_hwfn->rt_data); + p_hwfn->rt_data = NULL; +} + +static int qed_init_array_dmae(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 addr, + u32 dmae_data_offset, + u32 size, + const u32 *buf, + bool b_must_dmae, + bool b_can_dmae) +{ + int rc = 0; + + /* Perform DMAE only for lengthy enough sections or for wide-bus */ + if (!b_can_dmae || (!b_must_dmae && (size < 16))) { + const u32 *data = buf + dmae_data_offset; + u32 i; + + for (i = 0; i < size; i++) + qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]); + } else { + rc = qed_dmae_host2grc(p_hwfn, p_ptt, + (uintptr_t)(buf + dmae_data_offset), + addr, size, 0); + } + + return rc; +} + +static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 addr, + u32 fill, + u32 fill_count) +{ + static u32 zero_buffer[DMAE_MAX_RW_SIZE]; + + memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE); + + /* invoke the DMAE virtual/physical buffer API with + * 1. DMAE init channel + * 2. addr, + * 3. p_hwfb->temp_data, + * 4. fill_count + */ + + return qed_dmae_host2grc(p_hwfn, p_ptt, + (uintptr_t)(&zero_buffer[0]), + addr, fill_count, + QED_DMAE_FLAG_RW_REPL_SRC); +} + +static void qed_init_fill(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 addr, + u32 fill, + u32 fill_count) +{ + u32 i; + + for (i = 0; i < fill_count; i++, addr += sizeof(u32)) + qed_wr(p_hwfn, p_ptt, addr, fill); +} + +static int qed_init_cmd_array(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct init_write_op *cmd, + bool b_must_dmae, + bool b_can_dmae) +{ + u32 data = le32_to_cpu(cmd->data); + u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2; + u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset); + u32 offset, output_len, input_len, max_size; + struct qed_dev *cdev = p_hwfn->cdev; + union init_array_hdr *hdr; + const u32 *array_data; + int rc = 0; + u32 size; + + array_data = cdev->fw_data->arr_data; + + hdr = (union init_array_hdr *)(array_data + + dmae_array_offset); + data = le32_to_cpu(hdr->raw.data); + switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) { + case INIT_ARR_ZIPPED: + offset = dmae_array_offset + 1; + input_len = GET_FIELD(data, + INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE); + max_size = MAX_ZIPPED_SIZE * 4; + memset(p_hwfn->unzip_buf, 0, max_size); + + output_len = qed_unzip_data(p_hwfn, input_len, + (u8 *)&array_data[offset], + max_size, (u8 *)p_hwfn->unzip_buf); + if (output_len) { + rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, 0, + output_len, + p_hwfn->unzip_buf, + b_must_dmae, b_can_dmae); + } else { + DP_NOTICE(p_hwfn, "Failed to unzip dmae data\n"); + rc = -EINVAL; + } + break; + case INIT_ARR_PATTERN: + { + u32 repeats = GET_FIELD(data, + INIT_ARRAY_PATTERN_HDR_REPETITIONS); + u32 i; + + size = GET_FIELD(data, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE); + + for (i = 0; i < repeats; i++, addr += size << 2) { + rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, + dmae_array_offset + 1, + size, array_data, + b_must_dmae, b_can_dmae); + if (rc) + break; + } + break; + } + case INIT_ARR_STANDARD: + size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE); + rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, + dmae_array_offset + 1, + size, array_data, + b_must_dmae, b_can_dmae); + break; + } + + return rc; +} + +/* init_ops write command */ +static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct init_write_op *cmd, + bool b_can_dmae) +{ + u32 data = le32_to_cpu(cmd->data); + u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2; + bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS); + union init_write_args *arg = &cmd->args; + int rc = 0; + + /* Sanitize */ + if (b_must_dmae && !b_can_dmae) { + DP_NOTICE(p_hwfn, + "Need to write to %08x for Wide-bus but DMAE isn't allowed\n", + addr); + return -EINVAL; + } + + switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) { + case INIT_SRC_INLINE: + qed_wr(p_hwfn, p_ptt, addr, + le32_to_cpu(arg->inline_val)); + break; + case INIT_SRC_ZEROS: + if (b_must_dmae || + (b_can_dmae && (le32_to_cpu(arg->zeros_count) >= 64))) + rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0, + le32_to_cpu(arg->zeros_count)); + else + qed_init_fill(p_hwfn, p_ptt, addr, 0, + le32_to_cpu(arg->zeros_count)); + break; + case INIT_SRC_ARRAY: + rc = qed_init_cmd_array(p_hwfn, p_ptt, cmd, + b_must_dmae, b_can_dmae); + break; + case INIT_SRC_RUNTIME: + qed_init_rt(p_hwfn, p_ptt, addr, + le16_to_cpu(arg->runtime.offset), + le16_to_cpu(arg->runtime.size)); + break; + } + + return rc; +} + +static inline bool comp_eq(u32 val, u32 expected_val) +{ + return val == expected_val; +} + +static inline bool comp_and(u32 val, u32 expected_val) +{ + return (val & expected_val) == expected_val; +} + +static inline bool comp_or(u32 val, u32 expected_val) +{ + return (val | expected_val) > 0; +} + +/* init_ops read/poll commands */ +static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct init_read_op *cmd) +{ + u32 data = le32_to_cpu(cmd->op_data); + u32 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2; + + bool (*comp_check)(u32 val, + u32 expected_val); + u32 delay = QED_INIT_POLL_PERIOD_US, val; + + val = qed_rd(p_hwfn, p_ptt, addr); + + data = le32_to_cpu(cmd->op_data); + if (GET_FIELD(data, INIT_READ_OP_POLL)) { + int i; + + switch (GET_FIELD(data, INIT_READ_OP_POLL_COMP)) { + case INIT_COMPARISON_EQ: + comp_check = comp_eq; + break; + case INIT_COMPARISON_OR: + comp_check = comp_or; + break; + case INIT_COMPARISON_AND: + comp_check = comp_and; + break; + default: + comp_check = NULL; + DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n", + data); + return; + } + + for (i = 0; + i < QED_INIT_MAX_POLL_COUNT && + !comp_check(val, le32_to_cpu(cmd->expected_val)); + i++) { + udelay(delay); + val = qed_rd(p_hwfn, p_ptt, addr); + } + + if (i == QED_INIT_MAX_POLL_COUNT) + DP_ERR(p_hwfn, + "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n", + addr, le32_to_cpu(cmd->expected_val), + val, data); + } +} + +/* init_ops callbacks entry point */ +static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct init_callback_op *p_cmd) +{ + DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n"); +} + +static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn, + u16 *offset, + int modes) +{ + struct qed_dev *cdev = p_hwfn->cdev; + const u8 *modes_tree_buf; + u8 arg1, arg2, tree_val; + + modes_tree_buf = cdev->fw_data->modes_tree_buf; + tree_val = modes_tree_buf[(*offset)++]; + switch (tree_val) { + case INIT_MODE_OP_NOT: + return qed_init_cmd_mode_match(p_hwfn, offset, modes) ^ 1; + case INIT_MODE_OP_OR: + arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes); + arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes); + return arg1 | arg2; + case INIT_MODE_OP_AND: + arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes); + arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes); + return arg1 & arg2; + default: + tree_val -= MAX_INIT_MODE_OPS; + return (modes & (1 << tree_val)) ? 1 : 0; + } +} + +static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn, + struct init_if_mode_op *p_cmd, + int modes) +{ + u16 offset = le16_to_cpu(p_cmd->modes_buf_offset); + + if (qed_init_cmd_mode_match(p_hwfn, &offset, modes)) + return 0; + else + return GET_FIELD(le32_to_cpu(p_cmd->op_data), + INIT_IF_MODE_OP_CMD_OFFSET); +} + +static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn, + struct init_if_phase_op *p_cmd, + u32 phase, + u32 phase_id) +{ + u32 data = le32_to_cpu(p_cmd->phase_data); + u32 op_data = le32_to_cpu(p_cmd->op_data); + + if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase && + (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID || + GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id))) + return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET); + else + return 0; +} + +int qed_init_run(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + int phase, + int phase_id, + int modes) +{ + struct qed_dev *cdev = p_hwfn->cdev; + u32 cmd_num, num_init_ops; + union init_op *init_ops; + bool b_dmae = false; + int rc = 0; + + num_init_ops = cdev->fw_data->init_ops_size; + init_ops = cdev->fw_data->init_ops; + + p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC); + if (!p_hwfn->unzip_buf) { + DP_NOTICE(p_hwfn, "Failed to allocate unzip buffer\n"); + return -ENOMEM; + } + + for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) { + union init_op *cmd = &init_ops[cmd_num]; + u32 data = le32_to_cpu(cmd->raw.op_data); + + switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) { + case INIT_OP_WRITE: + rc = qed_init_cmd_wr(p_hwfn, p_ptt, &cmd->write, + b_dmae); + break; + case INIT_OP_READ: + qed_init_cmd_rd(p_hwfn, p_ptt, &cmd->read); + break; + case INIT_OP_IF_MODE: + cmd_num += qed_init_cmd_mode(p_hwfn, &cmd->if_mode, + modes); + break; + case INIT_OP_IF_PHASE: + cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase, + phase, phase_id); + b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE); + break; + case INIT_OP_DELAY: + /* qed_init_run is always invoked from + * sleep-able context + */ + udelay(le32_to_cpu(cmd->delay.delay)); + break; + + case INIT_OP_CALLBACK: + qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback); + break; + } + + if (rc) + break; + } + + kfree(p_hwfn->unzip_buf); + return rc; +} + +void qed_gtt_init(struct qed_hwfn *p_hwfn) +{ + u32 gtt_base; + u32 i; + + /* Set the global windows */ + gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START; + + for (i = 0; i < ARRAY_SIZE(pxp_global_win); i++) + if (pxp_global_win[i]) + REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE, + pxp_global_win[i]); +} + +int qed_init_fw_data(struct qed_dev *cdev, + const u8 *data) +{ + struct qed_fw_data *fw = cdev->fw_data; + struct bin_buffer_hdr *buf_hdr; + u32 offset, len; + + if (!data) { + DP_NOTICE(cdev, "Invalid fw data\n"); + return -EINVAL; + } + + buf_hdr = (struct bin_buffer_hdr *)data; + + offset = buf_hdr[BIN_BUF_INIT_CMD].offset; + fw->init_ops = (union init_op *)(data + offset); + + offset = buf_hdr[BIN_BUF_INIT_VAL].offset; + fw->arr_data = (u32 *)(data + offset); + + offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset; + fw->modes_tree_buf = (u8 *)(data + offset); + len = buf_hdr[BIN_BUF_INIT_CMD].length; + fw->init_ops_size = len / sizeof(struct init_raw_op); + + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h new file mode 100644 index 000000000000..1e832049983d --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h @@ -0,0 +1,110 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_INIT_OPS_H +#define _QED_INIT_OPS_H + +#include <linux/types.h> +#include <linux/slab.h> +#include "qed.h" + +/** + * @brief qed_init_iro_array - init iro_arr. + * + * + * @param cdev + */ +void qed_init_iro_array(struct qed_dev *cdev); + +/** + * @brief qed_init_run - Run the init-sequence. + * + * + * @param p_hwfn + * @param p_ptt + * @param phase + * @param phase_id + * @param modes + * @return _qed_status_t + */ +int qed_init_run(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + int phase, + int phase_id, + int modes); + +/** + * @brief qed_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs. + * + * + * @param p_hwfn + * + * @return _qed_status_t + */ +int qed_init_alloc(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_init_hwfn_deallocate + * + * + * @param p_hwfn + */ +void qed_init_free(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_init_clear_rt_data - Clears the runtime init array. + * + * + * @param p_hwfn + */ +void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_init_store_rt_reg - Store a configuration value in the RT array. + * + * + * @param p_hwfn + * @param rt_offset + * @param val + */ +void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, + u32 rt_offset, + u32 val); + +#define STORE_RT_REG(hwfn, offset, val) \ + qed_init_store_rt_reg(hwfn, offset, val) + +#define OVERWRITE_RT_REG(hwfn, offset, val) \ + qed_init_store_rt_reg(hwfn, offset, val) + +/** + * @brief + * + * + * @param p_hwfn + * @param rt_offset + * @param val + * @param size + */ +void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn, + u32 rt_offset, + u32 *val, + size_t size); + +#define STORE_RT_REG_AGG(hwfn, offset, val) \ + qed_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val)) + +/** + * @brief + * Initialize GTT global windows and set admin window + * related params of GTT/PTT to default values. + * + * @param p_hwfn + */ +void qed_gtt_init(struct qed_hwfn *p_hwfn); +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c new file mode 100644 index 000000000000..2e399b6137a2 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -0,0 +1,1134 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/io.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/string.h> +#include "qed.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_init_ops.h" +#include "qed_int.h" +#include "qed_mcp.h" +#include "qed_reg_addr.h" +#include "qed_sp.h" + +struct qed_pi_info { + qed_int_comp_cb_t comp_cb; + void *cookie; +}; + +struct qed_sb_sp_info { + struct qed_sb_info sb_info; + + /* per protocol index data */ + struct qed_pi_info pi_info_arr[PIS_PER_SB]; +}; + +#define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ + ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) + +#define ATTN_STATE_BITS (0xfff) +#define ATTN_BITS_MASKABLE (0x3ff) +struct qed_sb_attn_info { + /* Virtual & Physical address of the SB */ + struct atten_status_block *sb_attn; + dma_addr_t sb_phys; + + /* Last seen running index */ + u16 index; + + /* Previously asserted attentions, which are still unasserted */ + u16 known_attn; + + /* Cleanup address for the link's general hw attention */ + u32 mfw_attn_addr; +}; + +static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn, + struct qed_sb_attn_info *p_sb_desc) +{ + u16 rc = 0; + u16 index; + + /* Make certain HW write took affect */ + mmiowb(); + + index = le16_to_cpu(p_sb_desc->sb_attn->sb_index); + if (p_sb_desc->index != index) { + p_sb_desc->index = index; + rc = QED_SB_ATT_IDX; + } + + /* Make certain we got a consistent view with HW */ + mmiowb(); + + return rc; +} + +/** + * @brief qed_int_assertion - handles asserted attention bits + * + * @param p_hwfn + * @param asserted_bits newly asserted bits + * @return int + */ +static int qed_int_assertion(struct qed_hwfn *p_hwfn, + u16 asserted_bits) +{ + struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; + u32 igu_mask; + + /* Mask the source of the attention in the IGU */ + igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + IGU_REG_ATTENTION_ENABLE); + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", + igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); + igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask); + + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "inner known ATTN state: 0x%04x --> 0x%04x\n", + sb_attn_sw->known_attn, + sb_attn_sw->known_attn | asserted_bits); + sb_attn_sw->known_attn |= asserted_bits; + + /* Handle MCP events */ + if (asserted_bits & 0x100) { + qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt); + /* Clean the MCP attention */ + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, + sb_attn_sw->mfw_attn_addr, 0); + } + + DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + + GTT_BAR0_MAP_REG_IGU_CMD + + ((IGU_CMD_ATTN_BIT_SET_UPPER - + IGU_CMD_INT_ACK_BASE) << 3), + (u32)asserted_bits); + + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n", + asserted_bits); + + return 0; +} + +/** + * @brief - handles deassertion of previously asserted attentions. + * + * @param p_hwfn + * @param deasserted_bits - newly deasserted bits + * @return int + * + */ +static int qed_int_deassertion(struct qed_hwfn *p_hwfn, + u16 deasserted_bits) +{ + struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; + u32 aeu_mask; + + if (deasserted_bits != 0x100) + DP_ERR(p_hwfn, "Unexpected - non-link deassertion\n"); + + /* Clear IGU indication for the deasserted bits */ + DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + + GTT_BAR0_MAP_REG_IGU_CMD + + ((IGU_CMD_ATTN_BIT_CLR_UPPER - + IGU_CMD_INT_ACK_BASE) << 3), + ~((u32)deasserted_bits)); + + /* Unmask deasserted attentions in IGU */ + aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + IGU_REG_ATTENTION_ENABLE); + aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); + + /* Clear deassertion from inner state */ + sb_attn_sw->known_attn &= ~deasserted_bits; + + return 0; +} + +static int qed_int_attentions(struct qed_hwfn *p_hwfn) +{ + struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn; + struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn; + u32 attn_bits = 0, attn_acks = 0; + u16 asserted_bits, deasserted_bits; + __le16 index; + int rc = 0; + + /* Read current attention bits/acks - safeguard against attentions + * by guaranting work on a synchronized timeframe + */ + do { + index = p_sb_attn->sb_index; + attn_bits = le32_to_cpu(p_sb_attn->atten_bits); + attn_acks = le32_to_cpu(p_sb_attn->atten_ack); + } while (index != p_sb_attn->sb_index); + p_sb_attn->sb_index = index; + + /* Attention / Deassertion are meaningful (and in correct state) + * only when they differ and consistent with known state - deassertion + * when previous attention & current ack, and assertion when current + * attention with no previous attention + */ + asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) & + ~p_sb_attn_sw->known_attn; + deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) & + p_sb_attn_sw->known_attn; + + if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) { + DP_INFO(p_hwfn, + "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n", + index, attn_bits, attn_acks, asserted_bits, + deasserted_bits, p_sb_attn_sw->known_attn); + } else if (asserted_bits == 0x100) { + DP_INFO(p_hwfn, + "MFW indication via attention\n"); + } else { + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "MFW indication [deassertion]\n"); + } + + if (asserted_bits) { + rc = qed_int_assertion(p_hwfn, asserted_bits); + if (rc) + return rc; + } + + if (deasserted_bits) { + rc = qed_int_deassertion(p_hwfn, deasserted_bits); + if (rc) + return rc; + } + + return rc; +} + +static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn, + void __iomem *igu_addr, + u32 ack_cons) +{ + struct igu_prod_cons_update igu_ack = { 0 }; + + igu_ack.sb_id_and_flags = + ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | + (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | + (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | + (IGU_SEG_ACCESS_ATTN << + IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); + + DIRECT_REG_WR(igu_addr, igu_ack.sb_id_and_flags); + + /* Both segments (interrupts & acks) are written to same place address; + * Need to guarantee all commands will be received (in-order) by HW. + */ + mmiowb(); + barrier(); +} + +void qed_int_sp_dpc(unsigned long hwfn_cookie) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie; + struct qed_pi_info *pi_info = NULL; + struct qed_sb_attn_info *sb_attn; + struct qed_sb_info *sb_info; + int arr_size; + u16 rc = 0; + + if (!p_hwfn) { + DP_ERR(p_hwfn->cdev, "DPC called - no hwfn!\n"); + return; + } + + if (!p_hwfn->p_sp_sb) { + DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n"); + return; + } + + sb_info = &p_hwfn->p_sp_sb->sb_info; + arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr); + if (!sb_info) { + DP_ERR(p_hwfn->cdev, + "Status block is NULL - cannot ack interrupts\n"); + return; + } + + if (!p_hwfn->p_sb_attn) { + DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn"); + return; + } + sb_attn = p_hwfn->p_sb_attn; + + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n", + p_hwfn, p_hwfn->my_id); + + /* Disable ack for def status block. Required both for msix + + * inta in non-mask mode, in inta does no harm. + */ + qed_sb_ack(sb_info, IGU_INT_DISABLE, 0); + + /* Gather Interrupts/Attentions information */ + if (!sb_info->sb_virt) { + DP_ERR( + p_hwfn->cdev, + "Interrupt Status block is NULL - cannot check for new interrupts!\n"); + } else { + u32 tmp_index = sb_info->sb_ack; + + rc = qed_sb_update_sb_idx(sb_info); + DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, + "Interrupt indices: 0x%08x --> 0x%08x\n", + tmp_index, sb_info->sb_ack); + } + + if (!sb_attn || !sb_attn->sb_attn) { + DP_ERR( + p_hwfn->cdev, + "Attentions Status block is NULL - cannot check for new attentions!\n"); + } else { + u16 tmp_index = sb_attn->index; + + rc |= qed_attn_update_idx(p_hwfn, sb_attn); + DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, + "Attention indices: 0x%08x --> 0x%08x\n", + tmp_index, sb_attn->index); + } + + /* Check if we expect interrupts at this time. if not just ack them */ + if (!(rc & QED_SB_EVENT_MASK)) { + qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); + return; + } + + /* Check the validity of the DPC ptt. If not ack interrupts and fail */ + if (!p_hwfn->p_dpc_ptt) { + DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n"); + qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); + return; + } + + if (rc & QED_SB_ATT_IDX) + qed_int_attentions(p_hwfn); + + if (rc & QED_SB_IDX) { + int pi; + + /* Look for a free index */ + for (pi = 0; pi < arr_size; pi++) { + pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; + if (pi_info->comp_cb) + pi_info->comp_cb(p_hwfn, pi_info->cookie); + } + } + + if (sb_attn && (rc & QED_SB_ATT_IDX)) + /* This should be done before the interrupts are enabled, + * since otherwise a new attention will be generated. + */ + qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); + + qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); +} + +static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn) +{ + struct qed_dev *cdev = p_hwfn->cdev; + struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn; + + if (p_sb) { + if (p_sb->sb_attn) + dma_free_coherent(&cdev->pdev->dev, + SB_ATTN_ALIGNED_SIZE(p_hwfn), + p_sb->sb_attn, + p_sb->sb_phys); + kfree(p_sb); + } +} + +static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; + + memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn)); + + sb_info->index = 0; + sb_info->known_attn = 0; + + /* Configure Attention Status Block in IGU */ + qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L, + lower_32_bits(p_hwfn->p_sb_attn->sb_phys)); + qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H, + upper_32_bits(p_hwfn->p_sb_attn->sb_phys)); +} + +static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + void *sb_virt_addr, + dma_addr_t sb_phy_addr) +{ + struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; + + sb_info->sb_attn = sb_virt_addr; + sb_info->sb_phys = sb_phy_addr; + + /* Set the address of cleanup for the mcp attention */ + sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + + MISC_REG_AEU_GENERAL_ATTN_0; + + qed_int_sb_attn_setup(p_hwfn, p_ptt); +} + +static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct qed_dev *cdev = p_hwfn->cdev; + struct qed_sb_attn_info *p_sb; + void *p_virt; + dma_addr_t p_phys = 0; + + /* SB struct */ + p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC); + if (!p_sb) { + DP_NOTICE(cdev, "Failed to allocate `struct qed_sb_attn_info'\n"); + return -ENOMEM; + } + + /* SB ring */ + p_virt = dma_alloc_coherent(&cdev->pdev->dev, + SB_ATTN_ALIGNED_SIZE(p_hwfn), + &p_phys, GFP_KERNEL); + + if (!p_virt) { + DP_NOTICE(cdev, "Failed to allocate status block (attentions)\n"); + kfree(p_sb); + return -ENOMEM; + } + + /* Attention setup */ + p_hwfn->p_sb_attn = p_sb; + qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys); + + return 0; +} + +/* coalescing timeout = timeset << (timer_res + 1) */ +#define QED_CAU_DEF_RX_USECS 24 +#define QED_CAU_DEF_TX_USECS 48 + +void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, + struct cau_sb_entry *p_sb_entry, + u8 pf_id, + u16 vf_number, + u8 vf_valid) +{ + u32 cau_state; + + memset(p_sb_entry, 0, sizeof(*p_sb_entry)); + + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id); + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number); + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid); + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); + + /* setting the time resultion to a fixed value ( = 1) */ + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, + QED_CAU_DEF_RX_TIMER_RES); + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, + QED_CAU_DEF_TX_TIMER_RES); + + cau_state = CAU_HC_DISABLE_STATE; + + if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { + cau_state = CAU_HC_ENABLE_STATE; + if (!p_hwfn->cdev->rx_coalesce_usecs) + p_hwfn->cdev->rx_coalesce_usecs = + QED_CAU_DEF_RX_USECS; + if (!p_hwfn->cdev->tx_coalesce_usecs) + p_hwfn->cdev->tx_coalesce_usecs = + QED_CAU_DEF_TX_USECS; + } + + SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); + SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); +} + +void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + dma_addr_t sb_phys, + u16 igu_sb_id, + u16 vf_number, + u8 vf_valid) +{ + struct cau_sb_entry sb_entry; + u32 val; + + qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, + vf_number, vf_valid); + + if (p_hwfn->hw_init_done) { + val = CAU_REG_SB_ADDR_MEMORY + igu_sb_id * sizeof(u64); + qed_wr(p_hwfn, p_ptt, val, lower_32_bits(sb_phys)); + qed_wr(p_hwfn, p_ptt, val + sizeof(u32), + upper_32_bits(sb_phys)); + + val = CAU_REG_SB_VAR_MEMORY + igu_sb_id * sizeof(u64); + qed_wr(p_hwfn, p_ptt, val, sb_entry.data); + qed_wr(p_hwfn, p_ptt, val + sizeof(u32), sb_entry.params); + } else { + /* Initialize Status Block Address */ + STORE_RT_REG_AGG(p_hwfn, + CAU_REG_SB_ADDR_MEMORY_RT_OFFSET + + igu_sb_id * 2, + sb_phys); + + STORE_RT_REG_AGG(p_hwfn, + CAU_REG_SB_VAR_MEMORY_RT_OFFSET + + igu_sb_id * 2, + sb_entry); + } + + /* Configure pi coalescing if set */ + if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { + u8 timeset = p_hwfn->cdev->rx_coalesce_usecs >> + (QED_CAU_DEF_RX_TIMER_RES + 1); + u8 num_tc = 1, i; + + qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, + QED_COAL_RX_STATE_MACHINE, + timeset); + + timeset = p_hwfn->cdev->tx_coalesce_usecs >> + (QED_CAU_DEF_TX_TIMER_RES + 1); + + for (i = 0; i < num_tc; i++) { + qed_int_cau_conf_pi(p_hwfn, p_ptt, + igu_sb_id, TX_PI(i), + QED_COAL_TX_STATE_MACHINE, + timeset); + } + } +} + +void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 igu_sb_id, + u32 pi_index, + enum qed_coalescing_fsm coalescing_fsm, + u8 timeset) +{ + struct cau_pi_entry pi_entry; + u32 sb_offset; + u32 pi_offset; + + sb_offset = igu_sb_id * PIS_PER_SB; + memset(&pi_entry, 0, sizeof(struct cau_pi_entry)); + + SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); + if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE) + SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0); + else + SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1); + + pi_offset = sb_offset + pi_index; + if (p_hwfn->hw_init_done) { + qed_wr(p_hwfn, p_ptt, + CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), + *((u32 *)&(pi_entry))); + } else { + STORE_RT_REG(p_hwfn, + CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, + *((u32 *)&(pi_entry))); + } +} + +void qed_int_sb_setup(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_sb_info *sb_info) +{ + /* zero status block and ack counter */ + sb_info->sb_ack = 0; + memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); + + qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, + sb_info->igu_sb_id, 0, 0); +} + +/** + * @brief qed_get_igu_sb_id - given a sw sb_id return the + * igu_sb_id + * + * @param p_hwfn + * @param sb_id + * + * @return u16 + */ +static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, + u16 sb_id) +{ + u16 igu_sb_id; + + /* Assuming continuous set of IGU SBs dedicated for given PF */ + if (sb_id == QED_SP_SB_ID) + igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; + else + igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb; + + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n", + (sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id); + + return igu_sb_id; +} + +int qed_int_sb_init(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_sb_info *sb_info, + void *sb_virt_addr, + dma_addr_t sb_phy_addr, + u16 sb_id) +{ + sb_info->sb_virt = sb_virt_addr; + sb_info->sb_phys = sb_phy_addr; + + sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id); + + if (sb_id != QED_SP_SB_ID) { + p_hwfn->sbs_info[sb_id] = sb_info; + p_hwfn->num_sbs++; + } + + sb_info->cdev = p_hwfn->cdev; + + /* The igu address will hold the absolute address that needs to be + * written to for a specific status block + */ + sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + + GTT_BAR0_MAP_REG_IGU_CMD + + (sb_info->igu_sb_id << 3); + + sb_info->flags |= QED_SB_INFO_INIT; + + qed_int_sb_setup(p_hwfn, p_ptt, sb_info); + + return 0; +} + +int qed_int_sb_release(struct qed_hwfn *p_hwfn, + struct qed_sb_info *sb_info, + u16 sb_id) +{ + if (sb_id == QED_SP_SB_ID) { + DP_ERR(p_hwfn, "Do Not free sp sb using this function"); + return -EINVAL; + } + + /* zero status block and ack counter */ + sb_info->sb_ack = 0; + memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); + + p_hwfn->sbs_info[sb_id] = NULL; + p_hwfn->num_sbs--; + + return 0; +} + +static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn) +{ + struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb; + + if (p_sb) { + if (p_sb->sb_info.sb_virt) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + SB_ALIGNED_SIZE(p_hwfn), + p_sb->sb_info.sb_virt, + p_sb->sb_info.sb_phys); + kfree(p_sb); + } +} + +static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct qed_sb_sp_info *p_sb; + dma_addr_t p_phys = 0; + void *p_virt; + + /* SB struct */ + p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC); + if (!p_sb) { + DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sb_info'\n"); + return -ENOMEM; + } + + /* SB ring */ + p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + SB_ALIGNED_SIZE(p_hwfn), + &p_phys, GFP_KERNEL); + if (!p_virt) { + DP_NOTICE(p_hwfn, "Failed to allocate status block\n"); + kfree(p_sb); + return -ENOMEM; + } + + /* Status Block setup */ + p_hwfn->p_sp_sb = p_sb; + qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt, + p_phys, QED_SP_SB_ID); + + memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr)); + + return 0; +} + +static void qed_int_sp_sb_setup(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + if (!p_hwfn) + return; + + if (p_hwfn->p_sp_sb) + qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); + else + DP_NOTICE(p_hwfn->cdev, + "Failed to setup Slow path status block - NULL pointer\n"); + + if (p_hwfn->p_sb_attn) + qed_int_sb_attn_setup(p_hwfn, p_ptt); + else + DP_NOTICE(p_hwfn->cdev, + "Failed to setup attentions status block - NULL pointer\n"); +} + +int qed_int_register_cb(struct qed_hwfn *p_hwfn, + qed_int_comp_cb_t comp_cb, + void *cookie, + u8 *sb_idx, + __le16 **p_fw_cons) +{ + struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; + int qed_status = -ENOMEM; + u8 pi; + + /* Look for a free index */ + for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) { + if (!p_sp_sb->pi_info_arr[pi].comp_cb) { + p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; + p_sp_sb->pi_info_arr[pi].cookie = cookie; + *sb_idx = pi; + *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi]; + qed_status = 0; + break; + } + } + + return qed_status; +} + +int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi) +{ + struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; + int qed_status = -ENOMEM; + + if (p_sp_sb->pi_info_arr[pi].comp_cb) { + p_sp_sb->pi_info_arr[pi].comp_cb = NULL; + p_sp_sb->pi_info_arr[pi].cookie = NULL; + qed_status = 0; + } + + return qed_status; +} + +u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn) +{ + return p_hwfn->p_sp_sb->sb_info.igu_sb_id; +} + +void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_int_mode int_mode) +{ + u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN; + + p_hwfn->cdev->int_mode = int_mode; + switch (p_hwfn->cdev->int_mode) { + case QED_INT_MODE_INTA: + igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN; + igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; + break; + + case QED_INT_MODE_MSI: + igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; + igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; + break; + + case QED_INT_MODE_MSIX: + igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; + break; + case QED_INT_MODE_POLL: + break; + } + + qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); +} + +void qed_int_igu_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_int_mode int_mode) +{ + int i; + + p_hwfn->b_int_enabled = 1; + + /* Mask non-link attentions */ + for (i = 0; i < 9; i++) + qed_wr(p_hwfn, p_ptt, + MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0); + + /* Enable interrupt Generation */ + qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode); + + /* Configure AEU signal change to produce attentions for link */ + qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); + qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); + + /* Flush the writes to IGU */ + mmiowb(); + + /* Unmask AEU signals toward IGU */ + qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); +} + +void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + p_hwfn->b_int_enabled = 0; + + qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); +} + +#define IGU_CLEANUP_SLEEP_LENGTH (1000) +void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 sb_id, + bool cleanup_set, + u16 opaque_fid + ) +{ + u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id; + u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; + u32 data = 0; + u32 cmd_ctrl = 0; + u32 val = 0; + u32 sb_bit = 0; + u32 sb_bit_addr = 0; + + /* Set the data field */ + SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0); + SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0); + SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET); + + /* Set the control register */ + SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr); + SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid); + SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR); + + qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data); + + barrier(); + + qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); + + /* Flush the write to IGU */ + mmiowb(); + + /* calculate where to read the status bit from */ + sb_bit = 1 << (sb_id % 32); + sb_bit_addr = sb_id / 32 * sizeof(u32); + + sb_bit_addr += IGU_REG_CLEANUP_STATUS_0; + + /* Now wait for the command to complete */ + do { + val = qed_rd(p_hwfn, p_ptt, sb_bit_addr); + + if ((val & sb_bit) == (cleanup_set ? sb_bit : 0)) + break; + + usleep_range(5000, 10000); + } while (--sleep_cnt); + + if (!sleep_cnt) + DP_NOTICE(p_hwfn, + "Timeout waiting for clear status 0x%08x [for sb %d]\n", + val, sb_id); +} + +void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 sb_id, + u16 opaque, + bool b_set) +{ + int pi; + + /* Set */ + if (b_set) + qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque); + + /* Clear */ + qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque); + + /* Clear the CAU for the SB */ + for (pi = 0; pi < 12; pi++) + qed_wr(p_hwfn, p_ptt, + CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0); +} + +void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool b_set, + bool b_slowpath) +{ + u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb; + u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt; + u32 sb_id = 0; + u32 val = 0; + + val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); + val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; + val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN; + qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); + + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "IGU cleaning SBs [%d,...,%d]\n", + igu_base_sb, igu_base_sb + igu_sb_cnt - 1); + + for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++) + qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id, + p_hwfn->hw_info.opaque_fid, + b_set); + + if (b_slowpath) { + sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "IGU cleaning slowpath SB [%d]\n", sb_id); + qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id, + p_hwfn->hw_info.opaque_fid, + b_set); + } +} + +int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct qed_igu_info *p_igu_info; + struct qed_igu_block *blk; + u32 val; + u16 sb_id; + u16 prev_sb_id = 0xFF; + + p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_ATOMIC); + + if (!p_hwfn->hw_info.p_igu_info) + return -ENOMEM; + + p_igu_info = p_hwfn->hw_info.p_igu_info; + + /* Initialize base sb / sb cnt for PFs */ + p_igu_info->igu_base_sb = 0xffff; + p_igu_info->igu_sb_cnt = 0; + p_igu_info->igu_dsb_id = 0xffff; + p_igu_info->igu_base_sb_iov = 0xffff; + + for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); + sb_id++) { + blk = &p_igu_info->igu_map.igu_blocks[sb_id]; + + val = qed_rd(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id); + + /* stop scanning when hit first invalid PF entry */ + if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) && + GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID)) + break; + + blk->status = QED_IGU_STATUS_VALID; + blk->function_id = GET_FIELD(val, + IGU_MAPPING_LINE_FUNCTION_NUMBER); + blk->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); + blk->vector_number = GET_FIELD(val, + IGU_MAPPING_LINE_VECTOR_NUMBER); + + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "IGU_BLOCK[sb_id]:%x:func_id = %d is_pf = %d vector_num = 0x%x\n", + val, blk->function_id, blk->is_pf, + blk->vector_number); + + if (blk->is_pf) { + if (blk->function_id == p_hwfn->rel_pf_id) { + blk->status |= QED_IGU_STATUS_PF; + + if (blk->vector_number == 0) { + if (p_igu_info->igu_dsb_id == 0xffff) + p_igu_info->igu_dsb_id = sb_id; + } else { + if (p_igu_info->igu_base_sb == + 0xffff) { + p_igu_info->igu_base_sb = sb_id; + } else if (prev_sb_id != sb_id - 1) { + DP_NOTICE(p_hwfn->cdev, + "consecutive igu vectors for HWFN %x broken", + p_hwfn->rel_pf_id); + break; + } + prev_sb_id = sb_id; + /* we don't count the default */ + (p_igu_info->igu_sb_cnt)++; + } + } + } + } + + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "IGU igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n", + p_igu_info->igu_base_sb, + p_igu_info->igu_sb_cnt, + p_igu_info->igu_dsb_id); + + if (p_igu_info->igu_base_sb == 0xffff || + p_igu_info->igu_dsb_id == 0xffff || + p_igu_info->igu_sb_cnt == 0) { + DP_NOTICE(p_hwfn, + "IGU CAM returned invalid values igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n", + p_igu_info->igu_base_sb, + p_igu_info->igu_sb_cnt, + p_igu_info->igu_dsb_id); + return -EINVAL; + } + + return 0; +} + +/** + * @brief Initialize igu runtime registers + * + * @param p_hwfn + */ +void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn) +{ + u32 igu_pf_conf = 0; + + igu_pf_conf |= IGU_PF_CONF_FUNC_EN; + + STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf); +} + +u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn) +{ + u64 intr_status = 0; + u32 intr_status_lo = 0; + u32 intr_status_hi = 0; + u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - + IGU_CMD_INT_ACK_BASE; + u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - + IGU_CMD_INT_ACK_BASE; + + intr_status_lo = REG_RD(p_hwfn, + GTT_BAR0_MAP_REG_IGU_CMD + + lsb_igu_cmd_addr * 8); + intr_status_hi = REG_RD(p_hwfn, + GTT_BAR0_MAP_REG_IGU_CMD + + msb_igu_cmd_addr * 8); + intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo; + + return intr_status; +} + +static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn) +{ + tasklet_init(p_hwfn->sp_dpc, + qed_int_sp_dpc, (unsigned long)p_hwfn); + p_hwfn->b_sp_dpc_enabled = true; +} + +static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn) +{ + p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_ATOMIC); + if (!p_hwfn->sp_dpc) + return -ENOMEM; + + return 0; +} + +static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn) +{ + kfree(p_hwfn->sp_dpc); +} + +int qed_int_alloc(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + int rc = 0; + + rc = qed_int_sp_dpc_alloc(p_hwfn); + if (rc) { + DP_ERR(p_hwfn->cdev, "Failed to allocate sp dpc mem\n"); + return rc; + } + rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt); + if (rc) { + DP_ERR(p_hwfn->cdev, "Failed to allocate sp sb mem\n"); + return rc; + } + rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt); + if (rc) { + DP_ERR(p_hwfn->cdev, "Failed to allocate sb attn mem\n"); + return rc; + } + return rc; +} + +void qed_int_free(struct qed_hwfn *p_hwfn) +{ + qed_int_sp_sb_free(p_hwfn); + qed_int_sb_attn_free(p_hwfn); + qed_int_sp_dpc_free(p_hwfn); +} + +void qed_int_setup(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + qed_int_sp_sb_setup(p_hwfn, p_ptt); + qed_int_sp_dpc_setup(p_hwfn); +} + +int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, + int *p_iov_blks) +{ + struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info; + + if (!info) + return 0; + + if (p_iov_blks) + *p_iov_blks = info->free_blks; + + return info->igu_sb_cnt; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h new file mode 100644 index 000000000000..16b57518e706 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -0,0 +1,391 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_INT_H +#define _QED_INT_H + +#include <linux/types.h> +#include <linux/slab.h> +#include "qed.h" + +/* Fields of IGU PF CONFIGRATION REGISTER */ +#define IGU_PF_CONF_FUNC_EN (0x1 << 0) /* function enable */ +#define IGU_PF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */ +#define IGU_PF_CONF_INT_LINE_EN (0x1 << 2) /* INT enable */ +#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */ +#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */ +#define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */ + +/* Igu control commands + */ +enum igu_ctrl_cmd { + IGU_CTRL_CMD_TYPE_RD, + IGU_CTRL_CMD_TYPE_WR, + MAX_IGU_CTRL_CMD +}; + +/* Control register for the IGU command register + */ +struct igu_ctrl_reg { + u32 ctrl_data; +#define IGU_CTRL_REG_FID_MASK 0xFFFF /* Opaque_FID */ +#define IGU_CTRL_REG_FID_SHIFT 0 +#define IGU_CTRL_REG_PXP_ADDR_MASK 0xFFF /* Command address */ +#define IGU_CTRL_REG_PXP_ADDR_SHIFT 16 +#define IGU_CTRL_REG_RESERVED_MASK 0x1 +#define IGU_CTRL_REG_RESERVED_SHIFT 28 +#define IGU_CTRL_REG_TYPE_MASK 0x1 /* use enum igu_ctrl_cmd */ +#define IGU_CTRL_REG_TYPE_SHIFT 31 +}; + +enum qed_coalescing_fsm { + QED_COAL_RX_STATE_MACHINE, + QED_COAL_TX_STATE_MACHINE +}; + +/** + * @brief qed_int_cau_conf_pi - configure cau for a given + * status block + * + * @param p_hwfn + * @param p_ptt + * @param igu_sb_id + * @param pi_index + * @param state + * @param timeset + */ +void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 igu_sb_id, + u32 pi_index, + enum qed_coalescing_fsm coalescing_fsm, + u8 timeset); + +/** + * @brief qed_int_igu_enable_int - enable device interrupts + * + * @param p_hwfn + * @param p_ptt + * @param int_mode - interrupt mode to use + */ +void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_int_mode int_mode); + +/** + * @brief qed_int_igu_disable_int - disable device interrupts + * + * @param p_hwfn + * @param p_ptt + */ +void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * @brief qed_int_igu_read_sisr_reg - Reads the single isr multiple dpc + * register from igu. + * + * @param p_hwfn + * + * @return u64 + */ +u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn); + +#define QED_SP_SB_ID 0xffff +/** + * @brief qed_int_sb_init - Initializes the sb_info structure. + * + * once the structure is initialized it can be passed to sb related functions. + * + * @param p_hwfn + * @param p_ptt + * @param sb_info points to an uninitialized (but + * allocated) sb_info structure + * @param sb_virt_addr + * @param sb_phy_addr + * @param sb_id the sb_id to be used (zero based in driver) + * should use QED_SP_SB_ID for SP Status block + * + * @return int + */ +int qed_int_sb_init(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_sb_info *sb_info, + void *sb_virt_addr, + dma_addr_t sb_phy_addr, + u16 sb_id); +/** + * @brief qed_int_sb_setup - Setup the sb. + * + * @param p_hwfn + * @param p_ptt + * @param sb_info initialized sb_info structure + */ +void qed_int_sb_setup(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_sb_info *sb_info); + +/** + * @brief qed_int_sb_release - releases the sb_info structure. + * + * once the structure is released, it's memory can be freed + * + * @param p_hwfn + * @param sb_info points to an allocated sb_info structure + * @param sb_id the sb_id to be used (zero based in driver) + * should never be equal to QED_SP_SB_ID + * (SP Status block) + * + * @return int + */ +int qed_int_sb_release(struct qed_hwfn *p_hwfn, + struct qed_sb_info *sb_info, + u16 sb_id); + +/** + * @brief qed_int_sp_dpc - To be called when an interrupt is received on the + * default status block. + * + * @param p_hwfn - pointer to hwfn + * + */ +void qed_int_sp_dpc(unsigned long hwfn_cookie); + +/** + * @brief qed_int_get_num_sbs - get the number of status + * blocks configured for this funciton in the igu. + * + * @param p_hwfn + * @param p_iov_blks - configured free blks for vfs + * + * @return int - number of status blocks configured + */ +int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, + int *p_iov_blks); + +/** + * @file + * + * @brief Interrupt handler + */ + +#define QED_CAU_DEF_RX_TIMER_RES 0 +#define QED_CAU_DEF_TX_TIMER_RES 0 + +#define QED_SB_ATT_IDX 0x0001 +#define QED_SB_EVENT_MASK 0x0003 + +#define SB_ALIGNED_SIZE(p_hwfn) \ + ALIGNED_TYPE_SIZE(struct status_block, p_hwfn) + +struct qed_igu_block { + u8 status; +#define QED_IGU_STATUS_FREE 0x01 +#define QED_IGU_STATUS_VALID 0x02 +#define QED_IGU_STATUS_PF 0x04 + + u8 vector_number; + u8 function_id; + u8 is_pf; +}; + +struct qed_igu_map { + struct qed_igu_block igu_blocks[MAX_TOT_SB_PER_PATH]; +}; + +struct qed_igu_info { + struct qed_igu_map igu_map; + u16 igu_dsb_id; + u16 igu_base_sb; + u16 igu_base_sb_iov; + u16 igu_sb_cnt; + u16 igu_sb_cnt_iov; + u16 free_blks; +}; + +/* TODO Names of function may change... */ +void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool b_set, + bool b_slowpath); + +void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_int_igu_read_cam - Reads the IGU CAM. + * This function needs to be called during hardware + * prepare. It reads the info from igu cam to know which + * status block is the default / base status block etc. + * + * @param p_hwfn + * @param p_ptt + * + * @return int + */ +int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +typedef int (*qed_int_comp_cb_t)(struct qed_hwfn *p_hwfn, + void *cookie); +/** + * @brief qed_int_register_cb - Register callback func for + * slowhwfn statusblock. + * + * Every protocol that uses the slowhwfn status block + * should register a callback function that will be called + * once there is an update of the sp status block. + * + * @param p_hwfn + * @param comp_cb - function to be called when there is an + * interrupt on the sp sb + * + * @param cookie - passed to the callback function + * @param sb_idx - OUT parameter which gives the chosen index + * for this protocol. + * @param p_fw_cons - pointer to the actual address of the + * consumer for this protocol. + * + * @return int + */ +int qed_int_register_cb(struct qed_hwfn *p_hwfn, + qed_int_comp_cb_t comp_cb, + void *cookie, + u8 *sb_idx, + __le16 **p_fw_cons); + +/** + * @brief qed_int_unregister_cb - Unregisters callback + * function from sp sb. + * Partner of qed_int_register_cb -> should be called + * when no longer required. + * + * @param p_hwfn + * @param pi + * + * @return int + */ +int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, + u8 pi); + +/** + * @brief qed_int_get_sp_sb_id - Get the slowhwfn sb id. + * + * @param p_hwfn + * + * @return u16 + */ +u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn); + +/** + * @brief Status block cleanup. Should be called for each status + * block that will be used -> both PF / VF + * + * @param p_hwfn + * @param p_ptt + * @param sb_id - igu status block id + * @param cleanup_set - set(1) / clear(0) + * @param opaque_fid - the function for which to perform + * cleanup, for example a PF on behalf of + * its VFs. + */ +void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 sb_id, + bool cleanup_set, + u16 opaque_fid); + +/** + * @brief Status block cleanup. Should be called for each status + * block that will be used -> both PF / VF + * + * @param p_hwfn + * @param p_ptt + * @param sb_id - igu status block id + * @param opaque - opaque fid of the sb owner. + * @param cleanup_set - set(1) / clear(0) + */ +void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 sb_id, + u16 opaque, + bool b_set); + +/** + * @brief qed_int_cau_conf - configure cau for a given status + * block + * + * @param p_hwfn + * @param ptt + * @param sb_phys + * @param igu_sb_id + * @param vf_number + * @param vf_valid + */ +void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + dma_addr_t sb_phys, + u16 igu_sb_id, + u16 vf_number, + u8 vf_valid); + +/** + * @brief qed_int_alloc + * + * @param p_hwfn + * @param p_ptt + * + * @return int + */ +int qed_int_alloc(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * @brief qed_int_free + * + * @param p_hwfn + */ +void qed_int_free(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_int_setup + * + * @param p_hwfn + * @param p_ptt + */ +void qed_int_setup(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * @brief - Enable Interrupt & Attention for hw function + * + * @param p_hwfn + * @param p_ptt + * @param int_mode + */ +void qed_int_igu_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_int_mode int_mode); + +/** + * @brief - Initialize CAU status block entry + * + * @param p_hwfn + * @param p_sb_entry + * @param pf_id + * @param vf_number + * @param vf_valid + */ +void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, + struct cau_sb_entry *p_sb_entry, + u8 pf_id, + u16 vf_number, + u8 vf_valid); + +#define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev)) + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c new file mode 100644 index 000000000000..f72036a2ef5b --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -0,0 +1,1704 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <asm/param.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/etherdevice.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/stddef.h> +#include <linux/string.h> +#include <linux/version.h> +#include <linux/workqueue.h> +#include <linux/bitops.h> +#include <linux/bug.h> +#include "qed.h" +#include <linux/qed/qed_chain.h> +#include "qed_cxt.h" +#include "qed_dev_api.h" +#include <linux/qed/qed_eth_if.h> +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_int.h" +#include "qed_reg_addr.h" +#include "qed_sp.h" + +enum qed_rss_caps { + QED_RSS_IPV4 = 0x1, + QED_RSS_IPV6 = 0x2, + QED_RSS_IPV4_TCP = 0x4, + QED_RSS_IPV6_TCP = 0x8, + QED_RSS_IPV4_UDP = 0x10, + QED_RSS_IPV6_UDP = 0x20, +}; + +/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */ +#define QED_RSS_IND_TABLE_SIZE 128 +#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */ + +struct qed_rss_params { + u8 update_rss_config; + u8 rss_enable; + u8 rss_eng_id; + u8 update_rss_capabilities; + u8 update_rss_ind_table; + u8 update_rss_key; + u8 rss_caps; + u8 rss_table_size_log; + u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE]; + u32 rss_key[QED_RSS_KEY_SIZE]; +}; + +enum qed_filter_opcode { + QED_FILTER_ADD, + QED_FILTER_REMOVE, + QED_FILTER_MOVE, + QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */ + QED_FILTER_FLUSH, /* Removes all filters */ +}; + +enum qed_filter_ucast_type { + QED_FILTER_MAC, + QED_FILTER_VLAN, + QED_FILTER_MAC_VLAN, + QED_FILTER_INNER_MAC, + QED_FILTER_INNER_VLAN, + QED_FILTER_INNER_PAIR, + QED_FILTER_INNER_MAC_VNI_PAIR, + QED_FILTER_MAC_VNI_PAIR, + QED_FILTER_VNI, +}; + +struct qed_filter_ucast { + enum qed_filter_opcode opcode; + enum qed_filter_ucast_type type; + u8 is_rx_filter; + u8 is_tx_filter; + u8 vport_to_add_to; + u8 vport_to_remove_from; + unsigned char mac[ETH_ALEN]; + u8 assert_on_error; + u16 vlan; + u32 vni; +}; + +struct qed_filter_mcast { + /* MOVE is not supported for multicast */ + enum qed_filter_opcode opcode; + u8 vport_to_add_to; + u8 vport_to_remove_from; + u8 num_mc_addrs; +#define QED_MAX_MC_ADDRS 64 + unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN]; +}; + +struct qed_filter_accept_flags { + u8 update_rx_mode_config; + u8 update_tx_mode_config; + u8 rx_accept_filter; + u8 tx_accept_filter; +#define QED_ACCEPT_NONE 0x01 +#define QED_ACCEPT_UCAST_MATCHED 0x02 +#define QED_ACCEPT_UCAST_UNMATCHED 0x04 +#define QED_ACCEPT_MCAST_MATCHED 0x08 +#define QED_ACCEPT_MCAST_UNMATCHED 0x10 +#define QED_ACCEPT_BCAST 0x20 +}; + +struct qed_sp_vport_update_params { + u16 opaque_fid; + u8 vport_id; + u8 update_vport_active_rx_flg; + u8 vport_active_rx_flg; + u8 update_vport_active_tx_flg; + u8 vport_active_tx_flg; + u8 update_approx_mcast_flg; + unsigned long bins[8]; + struct qed_rss_params *rss_params; + struct qed_filter_accept_flags accept_flags; +}; + +#define QED_MAX_SGES_NUM 16 +#define CRC32_POLY 0x1edc6f41 + +static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, + u32 concrete_fid, + u16 opaque_fid, + u8 vport_id, + u16 mtu, + u8 drop_ttl0_flg, + u8 inner_vlan_removal_en_flg) +{ + struct qed_sp_init_request_params params; + struct vport_start_ramrod_data *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + int rc = -EINVAL; + u16 rx_mode = 0; + u8 abs_vport_id = 0; + + rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); + if (rc != 0) + return rc; + + memset(¶ms, 0, sizeof(params)); + params.ramrod_data_size = sizeof(*p_ramrod); + params.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + qed_spq_get_cid(p_hwfn), + opaque_fid, + ETH_RAMROD_VPORT_START, + PROTOCOLID_ETH, + ¶ms); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.vport_start; + p_ramrod->vport_id = abs_vport_id; + + p_ramrod->mtu = cpu_to_le16(mtu); + p_ramrod->inner_vlan_removal_en = inner_vlan_removal_en_flg; + p_ramrod->drop_ttl0_en = drop_ttl0_flg; + + SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1); + SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1); + + p_ramrod->rx_mode.state = cpu_to_le16(rx_mode); + + /* TPA related fields */ + memset(&p_ramrod->tpa_param, 0, + sizeof(struct eth_vport_tpa_param)); + + /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ + p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev, + concrete_fid); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int +qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn, + struct vport_update_ramrod_data *p_ramrod, + struct qed_rss_params *p_params) +{ + struct eth_vport_rss_config *rss = &p_ramrod->rss_config; + u16 abs_l2_queue = 0, capabilities = 0; + int rc = 0, i; + + if (!p_params) { + p_ramrod->common.update_rss_flg = 0; + return rc; + } + + BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != + ETH_RSS_IND_TABLE_ENTRIES_NUM); + + rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id); + if (rc) + return rc; + + p_ramrod->common.update_rss_flg = p_params->update_rss_config; + rss->update_rss_capabilities = p_params->update_rss_capabilities; + rss->update_rss_ind_table = p_params->update_rss_ind_table; + rss->update_rss_key = p_params->update_rss_key; + + rss->rss_mode = p_params->rss_enable ? + ETH_VPORT_RSS_MODE_REGULAR : + ETH_VPORT_RSS_MODE_DISABLED; + + SET_FIELD(capabilities, + ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY, + !!(p_params->rss_caps & QED_RSS_IPV4)); + SET_FIELD(capabilities, + ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY, + !!(p_params->rss_caps & QED_RSS_IPV6)); + SET_FIELD(capabilities, + ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY, + !!(p_params->rss_caps & QED_RSS_IPV4_TCP)); + SET_FIELD(capabilities, + ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY, + !!(p_params->rss_caps & QED_RSS_IPV6_TCP)); + SET_FIELD(capabilities, + ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY, + !!(p_params->rss_caps & QED_RSS_IPV4_UDP)); + SET_FIELD(capabilities, + ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY, + !!(p_params->rss_caps & QED_RSS_IPV6_UDP)); + rss->tbl_size = p_params->rss_table_size_log; + + rss->capabilities = cpu_to_le16(capabilities); + + DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, + "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n", + p_ramrod->common.update_rss_flg, + rss->rss_mode, rss->update_rss_capabilities, + capabilities, rss->update_rss_ind_table, + rss->update_rss_key); + + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { + rc = qed_fw_l2_queue(p_hwfn, + (u8)p_params->rss_ind_table[i], + &abs_l2_queue); + if (rc) + return rc; + + rss->indirection_table[i] = cpu_to_le16(abs_l2_queue); + DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n", + i, rss->indirection_table[i]); + } + + for (i = 0; i < 10; i++) + rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]); + + return rc; +} + +static void +qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, + struct vport_update_ramrod_data *p_ramrod, + struct qed_filter_accept_flags accept_flags) +{ + p_ramrod->common.update_rx_mode_flg = + accept_flags.update_rx_mode_config; + + p_ramrod->common.update_tx_mode_flg = + accept_flags.update_tx_mode_config; + + /* Set Rx mode accept flags */ + if (p_ramrod->common.update_rx_mode_flg) { + u8 accept_filter = accept_flags.rx_accept_filter; + u16 state = 0; + + SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, + !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) || + !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); + + SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED, + !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)); + + SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, + !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) || + !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); + + SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL, + (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && + !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); + + SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL, + !!(accept_filter & QED_ACCEPT_BCAST)); + + p_ramrod->rx_mode.state = cpu_to_le16(state); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "p_ramrod->rx_mode.state = 0x%x\n", state); + } + + /* Set Tx mode accept flags */ + if (p_ramrod->common.update_tx_mode_flg) { + u8 accept_filter = accept_flags.tx_accept_filter; + u16 state = 0; + + SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL, + !!(accept_filter & QED_ACCEPT_NONE)); + + SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL, + (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) && + !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); + + SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL, + !!(accept_filter & QED_ACCEPT_NONE)); + + SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL, + (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && + !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); + + SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, + !!(accept_filter & QED_ACCEPT_BCAST)); + + p_ramrod->tx_mode.state = cpu_to_le16(state); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "p_ramrod->tx_mode.state = 0x%x\n", state); + } +} + +static void +qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, + struct vport_update_ramrod_data *p_ramrod, + struct qed_sp_vport_update_params *p_params) +{ + int i; + + memset(&p_ramrod->approx_mcast.bins, 0, + sizeof(p_ramrod->approx_mcast.bins)); + + if (p_params->update_approx_mcast_flg) { + p_ramrod->common.update_approx_mcast_flg = 1; + for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { + u32 *p_bins = (u32 *)p_params->bins; + __le32 val = cpu_to_le32(p_bins[i]); + + p_ramrod->approx_mcast.bins[i] = val; + } + } +} + +static int +qed_sp_vport_update(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_params, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + struct qed_rss_params *p_rss_params = p_params->rss_params; + struct vport_update_ramrod_data_cmn *p_cmn; + struct qed_sp_init_request_params sp_params; + struct vport_update_ramrod_data *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + u8 abs_vport_id = 0; + int rc = -EINVAL; + + rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); + if (rc != 0) + return rc; + + memset(&sp_params, 0, sizeof(sp_params)); + sp_params.ramrod_data_size = sizeof(*p_ramrod); + sp_params.comp_mode = comp_mode; + sp_params.p_comp_data = p_comp_data; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + qed_spq_get_cid(p_hwfn), + p_params->opaque_fid, + ETH_RAMROD_VPORT_UPDATE, + PROTOCOLID_ETH, + &sp_params); + if (rc) + return rc; + + /* Copy input params to ramrod according to FW struct */ + p_ramrod = &p_ent->ramrod.vport_update; + p_cmn = &p_ramrod->common; + + p_cmn->vport_id = abs_vport_id; + p_cmn->rx_active_flg = p_params->vport_active_rx_flg; + p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg; + p_cmn->tx_active_flg = p_params->vport_active_tx_flg; + p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg; + + rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); + if (rc) { + /* Return spq entry which is taken in qed_sp_init_request()*/ + qed_spq_return_entry(p_hwfn, p_ent); + return rc; + } + + /* Update mcast bins for VFs, PF doesn't use this functionality */ + qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); + + qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags); + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + u8 vport_id) +{ + struct qed_sp_init_request_params sp_params; + struct vport_stop_ramrod_data *p_ramrod; + struct qed_spq_entry *p_ent; + u8 abs_vport_id = 0; + int rc; + + rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); + if (rc != 0) + return rc; + + memset(&sp_params, 0, sizeof(sp_params)); + sp_params.ramrod_data_size = sizeof(*p_ramrod); + sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + qed_spq_get_cid(p_hwfn), + opaque_fid, + ETH_RAMROD_VPORT_STOP, + PROTOCOLID_ETH, + &sp_params); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.vport_stop; + p_ramrod->vport_id = abs_vport_id; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_filter_accept_cmd(struct qed_dev *cdev, + u8 vport, + struct qed_filter_accept_flags accept_flags, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + struct qed_sp_vport_update_params vport_update_params; + int i, rc; + + /* Prepare and send the vport rx_mode change */ + memset(&vport_update_params, 0, sizeof(vport_update_params)); + vport_update_params.vport_id = vport; + vport_update_params.accept_flags = accept_flags; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; + + rc = qed_sp_vport_update(p_hwfn, &vport_update_params, + comp_mode, p_comp_data); + if (rc != 0) { + DP_ERR(cdev, "Update rx_mode failed %d\n", rc); + return rc; + } + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Accept filter configured, flags = [Rx]%x [Tx]%x\n", + accept_flags.rx_accept_filter, + accept_flags.tx_accept_filter); + } + + return 0; +} + +static int qed_sp_release_queue_cid( + struct qed_hwfn *p_hwfn, + struct qed_hw_cid_data *p_cid_data) +{ + if (!p_cid_data->b_cid_allocated) + return 0; + + qed_cxt_release_cid(p_hwfn, p_cid_data->cid); + + p_cid_data->b_cid_allocated = false; + + return 0; +} + +static int +qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + u32 cid, + struct qed_queue_start_common_params *params, + u8 stats_id, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size) +{ + struct rx_queue_start_ramrod_data *p_ramrod = NULL; + struct qed_sp_init_request_params sp_params; + struct qed_spq_entry *p_ent = NULL; + struct qed_hw_cid_data *p_rx_cid; + u16 abs_rx_q_id = 0; + u8 abs_vport_id = 0; + int rc = -EINVAL; + + /* Store information for the stop */ + p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id]; + p_rx_cid->cid = cid; + p_rx_cid->opaque_fid = opaque_fid; + p_rx_cid->vport_id = params->vport_id; + + rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_vport_id); + if (rc != 0) + return rc; + + rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_rx_q_id); + if (rc != 0) + return rc; + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n", + opaque_fid, cid, params->queue_id, params->vport_id, + params->sb); + + memset(&sp_params, 0, sizeof(params)); + sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; + sp_params.ramrod_data_size = sizeof(*p_ramrod); + + rc = qed_sp_init_request(p_hwfn, &p_ent, + cid, opaque_fid, + ETH_RAMROD_RX_QUEUE_START, + PROTOCOLID_ETH, + &sp_params); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.rx_queue_start; + + p_ramrod->sb_id = cpu_to_le16(params->sb); + p_ramrod->sb_index = params->sb_idx; + p_ramrod->vport_id = abs_vport_id; + p_ramrod->stats_counter_id = stats_id; + p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id); + p_ramrod->complete_cqe_flg = 0; + p_ramrod->complete_event_flg = 1; + + p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes); + p_ramrod->bd_base.hi = DMA_HI_LE(bd_chain_phys_addr); + p_ramrod->bd_base.lo = DMA_LO_LE(bd_chain_phys_addr); + + p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); + p_ramrod->cqe_pbl_addr.hi = DMA_HI_LE(cqe_pbl_addr); + p_ramrod->cqe_pbl_addr.lo = DMA_LO_LE(cqe_pbl_addr); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + + return rc; +} + +static int +qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_queue_start_common_params *params, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, + void __iomem **pp_prod) +{ + struct qed_hw_cid_data *p_rx_cid; + u64 init_prod_val = 0; + u16 abs_l2_queue = 0; + u8 abs_stats_id = 0; + int rc; + + rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue); + if (rc != 0) + return rc; + + rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_stats_id); + if (rc != 0) + return rc; + + *pp_prod = (u8 __iomem *)p_hwfn->regview + + GTT_BAR0_MAP_REG_MSDM_RAM + + MSTORM_PRODS_OFFSET(abs_l2_queue); + + /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ + __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64), + (u32 *)(&init_prod_val)); + + /* Allocate a CID for the queue */ + p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id]; + rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, + &p_rx_cid->cid); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); + return rc; + } + p_rx_cid->b_cid_allocated = true; + + rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, + opaque_fid, + p_rx_cid->cid, + params, + abs_stats_id, + bd_max_bytes, + bd_chain_phys_addr, + cqe_pbl_addr, + cqe_pbl_size); + + if (rc != 0) + qed_sp_release_queue_cid(p_hwfn, p_rx_cid); + + return rc; +} + +static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, + u16 rx_queue_id, + bool eq_completion_only, + bool cqe_completion) +{ + struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id]; + struct rx_queue_stop_ramrod_data *p_ramrod = NULL; + struct qed_sp_init_request_params sp_params; + struct qed_spq_entry *p_ent = NULL; + u16 abs_rx_q_id = 0; + int rc = -EINVAL; + + memset(&sp_params, 0, sizeof(sp_params)); + sp_params.ramrod_data_size = sizeof(*p_ramrod); + sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + p_rx_cid->cid, + p_rx_cid->opaque_fid, + ETH_RAMROD_RX_QUEUE_STOP, + PROTOCOLID_ETH, + &sp_params); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.rx_queue_stop; + + qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id); + qed_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id); + p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id); + + /* Cleaning the queue requires the completion to arrive there. + * In addition, VFs require the answer to come as eqe to PF. + */ + p_ramrod->complete_cqe_flg = + (!!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) && + !eq_completion_only) || cqe_completion; + p_ramrod->complete_event_flg = + !(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) || + eq_completion_only; + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + return rc; + + return qed_sp_release_queue_cid(p_hwfn, p_rx_cid); +} + +static int +qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + u32 cid, + struct qed_queue_start_common_params *p_params, + u8 stats_id, + dma_addr_t pbl_addr, + u16 pbl_size, + union qed_qm_pq_params *p_pq_params) +{ + struct tx_queue_start_ramrod_data *p_ramrod = NULL; + struct qed_sp_init_request_params sp_params; + struct qed_spq_entry *p_ent = NULL; + struct qed_hw_cid_data *p_tx_cid; + u8 abs_vport_id; + int rc = -EINVAL; + u16 pq_id; + + /* Store information for the stop */ + p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id]; + p_tx_cid->cid = cid; + p_tx_cid->opaque_fid = opaque_fid; + + rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); + if (rc) + return rc; + + memset(&sp_params, 0, sizeof(sp_params)); + sp_params.ramrod_data_size = sizeof(*p_ramrod); + sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, cid, + opaque_fid, + ETH_RAMROD_TX_QUEUE_START, + PROTOCOLID_ETH, + &sp_params); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.tx_queue_start; + p_ramrod->vport_id = abs_vport_id; + + p_ramrod->sb_id = cpu_to_le16(p_params->sb); + p_ramrod->sb_index = p_params->sb_idx; + p_ramrod->stats_counter_id = stats_id; + p_ramrod->tc = p_pq_params->eth.tc; + + p_ramrod->pbl_size = cpu_to_le16(pbl_size); + p_ramrod->pbl_base_addr.hi = DMA_HI_LE(pbl_addr); + p_ramrod->pbl_base_addr.lo = DMA_LO_LE(pbl_addr); + + pq_id = qed_get_qm_pq(p_hwfn, + PROTOCOLID_ETH, + p_pq_params); + p_ramrod->qm_pq_id = cpu_to_le16(pq_id); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int +qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_queue_start_common_params *p_params, + dma_addr_t pbl_addr, + u16 pbl_size, + void __iomem **pp_doorbell) +{ + struct qed_hw_cid_data *p_tx_cid; + union qed_qm_pq_params pq_params; + u8 abs_stats_id = 0; + int rc; + + rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id); + if (rc) + return rc; + + p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id]; + memset(p_tx_cid, 0, sizeof(*p_tx_cid)); + memset(&pq_params, 0, sizeof(pq_params)); + + /* Allocate a CID for the queue */ + rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, + &p_tx_cid->cid); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); + return rc; + } + p_tx_cid->b_cid_allocated = true; + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n", + opaque_fid, p_tx_cid->cid, + p_params->queue_id, p_params->vport_id, p_params->sb); + + rc = qed_sp_eth_txq_start_ramrod(p_hwfn, + opaque_fid, + p_tx_cid->cid, + p_params, + abs_stats_id, + pbl_addr, + pbl_size, + &pq_params); + + *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + + qed_db_addr(p_tx_cid->cid, DQ_DEMS_LEGACY); + + if (rc) + qed_sp_release_queue_cid(p_hwfn, p_tx_cid); + + return rc; +} + +static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, + u16 tx_queue_id) +{ + struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id]; + struct qed_sp_init_request_params sp_params; + struct qed_spq_entry *p_ent = NULL; + int rc = -EINVAL; + + memset(&sp_params, 0, sizeof(sp_params)); + sp_params.ramrod_data_size = sizeof(struct tx_queue_stop_ramrod_data); + sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + p_tx_cid->cid, + p_tx_cid->opaque_fid, + ETH_RAMROD_TX_QUEUE_STOP, + PROTOCOLID_ETH, + &sp_params); + if (rc) + return rc; + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + return rc; + + return qed_sp_release_queue_cid(p_hwfn, p_tx_cid); +} + +static enum eth_filter_action +qed_filter_action(enum qed_filter_opcode opcode) +{ + enum eth_filter_action action = MAX_ETH_FILTER_ACTION; + + switch (opcode) { + case QED_FILTER_ADD: + action = ETH_FILTER_ACTION_ADD; + break; + case QED_FILTER_REMOVE: + action = ETH_FILTER_ACTION_REMOVE; + break; + case QED_FILTER_REPLACE: + case QED_FILTER_FLUSH: + action = ETH_FILTER_ACTION_REPLACE; + break; + default: + action = MAX_ETH_FILTER_ACTION; + } + + return action; +} + +static void qed_set_fw_mac_addr(__le16 *fw_msb, + __le16 *fw_mid, + __le16 *fw_lsb, + u8 *mac) +{ + ((u8 *)fw_msb)[0] = mac[1]; + ((u8 *)fw_msb)[1] = mac[0]; + ((u8 *)fw_mid)[0] = mac[3]; + ((u8 *)fw_mid)[1] = mac[2]; + ((u8 *)fw_lsb)[0] = mac[5]; + ((u8 *)fw_lsb)[1] = mac[4]; +} + +static int +qed_filter_ucast_common(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_filter_ucast *p_filter_cmd, + struct vport_filter_update_ramrod_data **pp_ramrod, + struct qed_spq_entry **pp_ent, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + u8 vport_to_add_to = 0, vport_to_remove_from = 0; + struct vport_filter_update_ramrod_data *p_ramrod; + struct qed_sp_init_request_params sp_params; + struct eth_filter_cmd *p_first_filter; + struct eth_filter_cmd *p_second_filter; + enum eth_filter_action action; + int rc; + + rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, + &vport_to_remove_from); + if (rc) + return rc; + + rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, + &vport_to_add_to); + if (rc) + return rc; + + memset(&sp_params, 0, sizeof(sp_params)); + sp_params.ramrod_data_size = sizeof(**pp_ramrod); + sp_params.comp_mode = comp_mode; + sp_params.p_comp_data = p_comp_data; + + rc = qed_sp_init_request(p_hwfn, pp_ent, + qed_spq_get_cid(p_hwfn), + opaque_fid, + ETH_RAMROD_FILTERS_UPDATE, + PROTOCOLID_ETH, + &sp_params); + if (rc) + return rc; + + *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update; + p_ramrod = *pp_ramrod; + p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0; + p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0; + + switch (p_filter_cmd->opcode) { + case QED_FILTER_FLUSH: + p_ramrod->filter_cmd_hdr.cmd_cnt = 0; break; + case QED_FILTER_MOVE: + p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break; + default: + p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break; + } + + p_first_filter = &p_ramrod->filter_cmds[0]; + p_second_filter = &p_ramrod->filter_cmds[1]; + + switch (p_filter_cmd->type) { + case QED_FILTER_MAC: + p_first_filter->type = ETH_FILTER_TYPE_MAC; break; + case QED_FILTER_VLAN: + p_first_filter->type = ETH_FILTER_TYPE_VLAN; break; + case QED_FILTER_MAC_VLAN: + p_first_filter->type = ETH_FILTER_TYPE_PAIR; break; + case QED_FILTER_INNER_MAC: + p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break; + case QED_FILTER_INNER_VLAN: + p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break; + case QED_FILTER_INNER_PAIR: + p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break; + case QED_FILTER_INNER_MAC_VNI_PAIR: + p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR; + break; + case QED_FILTER_MAC_VNI_PAIR: + p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break; + case QED_FILTER_VNI: + p_first_filter->type = ETH_FILTER_TYPE_VNI; break; + } + + if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) || + (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) || + (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) { + qed_set_fw_mac_addr(&p_first_filter->mac_msb, + &p_first_filter->mac_mid, + &p_first_filter->mac_lsb, + (u8 *)p_filter_cmd->mac); + } + + if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) || + (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) || + (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR)) + p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan); + + if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_VNI)) + p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni); + + if (p_filter_cmd->opcode == QED_FILTER_MOVE) { + p_second_filter->type = p_first_filter->type; + p_second_filter->mac_msb = p_first_filter->mac_msb; + p_second_filter->mac_mid = p_first_filter->mac_mid; + p_second_filter->mac_lsb = p_first_filter->mac_lsb; + p_second_filter->vlan_id = p_first_filter->vlan_id; + p_second_filter->vni = p_first_filter->vni; + + p_first_filter->action = ETH_FILTER_ACTION_REMOVE; + + p_first_filter->vport_id = vport_to_remove_from; + + p_second_filter->action = ETH_FILTER_ACTION_ADD; + p_second_filter->vport_id = vport_to_add_to; + } else { + action = qed_filter_action(p_filter_cmd->opcode); + + if (action == MAX_ETH_FILTER_ACTION) { + DP_NOTICE(p_hwfn, + "%d is not supported yet\n", + p_filter_cmd->opcode); + return -EINVAL; + } + + p_first_filter->action = action; + p_first_filter->vport_id = (p_filter_cmd->opcode == + QED_FILTER_REMOVE) ? + vport_to_remove_from : + vport_to_add_to; + } + + return 0; +} + +static int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_filter_ucast *p_filter_cmd, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + struct vport_filter_update_ramrod_data *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct eth_filter_cmd_header *p_header; + int rc; + + rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd, + &p_ramrod, &p_ent, + comp_mode, p_comp_data); + if (rc != 0) { + DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc); + return rc; + } + p_header = &p_ramrod->filter_cmd_hdr; + p_header->assert_on_error = p_filter_cmd->assert_on_error; + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc != 0) { + DP_ERR(p_hwfn, + "Unicast filter ADD command failed %d\n", + rc); + return rc; + } + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n", + (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" : + ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ? + "REMOVE" : + ((p_filter_cmd->opcode == QED_FILTER_MOVE) ? + "MOVE" : "REPLACE")), + (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" : + ((p_filter_cmd->type == QED_FILTER_VLAN) ? + "VLAN" : "MAC & VLAN"), + p_ramrod->filter_cmd_hdr.cmd_cnt, + p_filter_cmd->is_rx_filter, + p_filter_cmd->is_tx_filter); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n", + p_filter_cmd->vport_to_add_to, + p_filter_cmd->vport_to_remove_from, + p_filter_cmd->mac[0], + p_filter_cmd->mac[1], + p_filter_cmd->mac[2], + p_filter_cmd->mac[3], + p_filter_cmd->mac[4], + p_filter_cmd->mac[5], + p_filter_cmd->vlan); + + return 0; +} + +/******************************************************************************* + * Description: + * Calculates crc 32 on a buffer + * Note: crc32_length MUST be aligned to 8 + * Return: + ******************************************************************************/ +static u32 qed_calc_crc32c(u8 *crc32_packet, + u32 crc32_length, + u32 crc32_seed, + u8 complement) +{ + u32 byte = 0; + u32 bit = 0; + u8 msb = 0; + u8 current_byte = 0; + u32 crc32_result = crc32_seed; + + if ((!crc32_packet) || + (crc32_length == 0) || + ((crc32_length % 8) != 0)) + return crc32_result; + for (byte = 0; byte < crc32_length; byte++) { + current_byte = crc32_packet[byte]; + for (bit = 0; bit < 8; bit++) { + msb = (u8)(crc32_result >> 31); + crc32_result = crc32_result << 1; + if (msb != (0x1 & (current_byte >> bit))) { + crc32_result = crc32_result ^ CRC32_POLY; + crc32_result |= 1; /*crc32_result[0] = 1;*/ + } + } + } + return crc32_result; +} + +static inline u32 qed_crc32c_le(u32 seed, + u8 *mac, + u32 len) +{ + u32 packet_buf[2] = { 0 }; + + memcpy((u8 *)(&packet_buf[0]), &mac[0], 6); + return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0); +} + +static u8 qed_mcast_bin_from_mac(u8 *mac) +{ + u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, + mac, ETH_ALEN); + + return crc & 0xff; +} + +static int +qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_filter_mcast *p_filter_cmd, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; + struct vport_update_ramrod_data *p_ramrod = NULL; + struct qed_sp_init_request_params sp_params; + struct qed_spq_entry *p_ent = NULL; + u8 abs_vport_id = 0; + int rc, i; + + if (p_filter_cmd->opcode == QED_FILTER_ADD) { + rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, + &abs_vport_id); + if (rc) + return rc; + } else { + rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, + &abs_vport_id); + if (rc) + return rc; + } + + memset(&sp_params, 0, sizeof(sp_params)); + sp_params.ramrod_data_size = sizeof(*p_ramrod); + sp_params.comp_mode = comp_mode; + sp_params.p_comp_data = p_comp_data; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + qed_spq_get_cid(p_hwfn), + p_hwfn->hw_info.opaque_fid, + ETH_RAMROD_VPORT_UPDATE, + PROTOCOLID_ETH, + &sp_params); + + if (rc) { + DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc); + return rc; + } + + p_ramrod = &p_ent->ramrod.vport_update; + p_ramrod->common.update_approx_mcast_flg = 1; + + /* explicitly clear out the entire vector */ + memset(&p_ramrod->approx_mcast.bins, 0, + sizeof(p_ramrod->approx_mcast.bins)); + memset(bins, 0, sizeof(unsigned long) * + ETH_MULTICAST_MAC_BINS_IN_REGS); + /* filter ADD op is explicit set op and it removes + * any existing filters for the vport + */ + if (p_filter_cmd->opcode == QED_FILTER_ADD) { + for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { + u32 bit; + + bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); + __set_bit(bit, bins); + } + + /* Convert to correct endianity */ + for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { + u32 *p_bins = (u32 *)bins; + struct vport_update_ramrod_mcast *approx_mcast; + + approx_mcast = &p_ramrod->approx_mcast; + approx_mcast->bins[i] = cpu_to_le32(p_bins[i]); + } + } + + p_ramrod->common.vport_id = abs_vport_id; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int +qed_filter_mcast_cmd(struct qed_dev *cdev, + struct qed_filter_mcast *p_filter_cmd, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + int rc = 0; + int i; + + /* only ADD and REMOVE operations are supported for multi-cast */ + if ((p_filter_cmd->opcode != QED_FILTER_ADD && + (p_filter_cmd->opcode != QED_FILTER_REMOVE)) || + (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS)) + return -EINVAL; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + u16 opaque_fid; + + if (rc != 0) + break; + + opaque_fid = p_hwfn->hw_info.opaque_fid; + + rc = qed_sp_eth_filter_mcast(p_hwfn, + opaque_fid, + p_filter_cmd, + comp_mode, + p_comp_data); + } + return rc; +} + +static int qed_filter_ucast_cmd(struct qed_dev *cdev, + struct qed_filter_ucast *p_filter_cmd, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + int rc = 0; + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + u16 opaque_fid; + + if (rc != 0) + break; + + opaque_fid = p_hwfn->hw_info.opaque_fid; + + rc = qed_sp_eth_filter_ucast(p_hwfn, + opaque_fid, + p_filter_cmd, + comp_mode, + p_comp_data); + } + + return rc; +} + +static int qed_fill_eth_dev_info(struct qed_dev *cdev, + struct qed_dev_eth_info *info) +{ + int i; + + memset(info, 0, sizeof(*info)); + + info->num_tc = 1; + + if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { + for_each_hwfn(cdev, i) + info->num_queues += FEAT_NUM(&cdev->hwfns[i], + QED_PF_L2_QUE); + if (cdev->int_params.fp_msix_cnt) + info->num_queues = min_t(u8, info->num_queues, + cdev->int_params.fp_msix_cnt); + } else { + info->num_queues = cdev->num_hwfns; + } + + info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN); + ether_addr_copy(info->port_mac, + cdev->hwfns[0].hw_info.hw_mac_addr); + + qed_fill_dev_info(cdev, &info->common); + + return 0; +} + +static void qed_register_eth_ops(struct qed_dev *cdev, + struct qed_eth_cb_ops *ops, + void *cookie) +{ + cdev->protocol_ops.eth = ops; + cdev->ops_cookie = cookie; +} + +static int qed_start_vport(struct qed_dev *cdev, + u8 vport_id, + u16 mtu, + u8 drop_ttl0_flg, + u8 inner_vlan_removal_en_flg) +{ + int rc, i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + rc = qed_sp_vport_start(p_hwfn, + p_hwfn->hw_info.concrete_fid, + p_hwfn->hw_info.opaque_fid, + vport_id, + mtu, + drop_ttl0_flg, + inner_vlan_removal_en_flg); + + if (rc) { + DP_ERR(cdev, "Failed to start VPORT\n"); + return rc; + } + + qed_hw_start_fastpath(p_hwfn); + + DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), + "Started V-PORT %d with MTU %d\n", + vport_id, mtu); + } + + qed_reset_vport_stats(cdev); + + return 0; +} + +static int qed_stop_vport(struct qed_dev *cdev, + u8 vport_id) +{ + int rc, i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + rc = qed_sp_vport_stop(p_hwfn, + p_hwfn->hw_info.opaque_fid, + vport_id); + + if (rc) { + DP_ERR(cdev, "Failed to stop VPORT\n"); + return rc; + } + } + return 0; +} + +static int qed_update_vport(struct qed_dev *cdev, + struct qed_update_vport_params *params) +{ + struct qed_sp_vport_update_params sp_params; + struct qed_rss_params sp_rss_params; + int rc, i; + + if (!cdev) + return -ENODEV; + + memset(&sp_params, 0, sizeof(sp_params)); + memset(&sp_rss_params, 0, sizeof(sp_rss_params)); + + /* Translate protocol params into sp params */ + sp_params.vport_id = params->vport_id; + sp_params.update_vport_active_rx_flg = + params->update_vport_active_flg; + sp_params.update_vport_active_tx_flg = + params->update_vport_active_flg; + sp_params.vport_active_rx_flg = params->vport_active_flg; + sp_params.vport_active_tx_flg = params->vport_active_flg; + + /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. + * We need to re-fix the rss values per engine for CMT. + */ + if (cdev->num_hwfns > 1 && params->update_rss_flg) { + struct qed_update_vport_rss_params *rss = + ¶ms->rss_params; + int k, max = 0; + + /* Find largest entry, since it's possible RSS needs to + * be disabled [in case only 1 queue per-hwfn] + */ + for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++) + max = (max > rss->rss_ind_table[k]) ? + max : rss->rss_ind_table[k]; + + /* Either fix RSS values or disable RSS */ + if (cdev->num_hwfns < max + 1) { + int divisor = (max + cdev->num_hwfns - 1) / + cdev->num_hwfns; + + DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), + "CMT - fixing RSS values (modulo %02x)\n", + divisor); + + for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++) + rss->rss_ind_table[k] = + rss->rss_ind_table[k] % divisor; + } else { + DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), + "CMT - 1 queue per-hwfn; Disabling RSS\n"); + params->update_rss_flg = 0; + } + } + + /* Now, update the RSS configuration for actual configuration */ + if (params->update_rss_flg) { + sp_rss_params.update_rss_config = 1; + sp_rss_params.rss_enable = 1; + sp_rss_params.update_rss_capabilities = 1; + sp_rss_params.update_rss_ind_table = 1; + sp_rss_params.update_rss_key = 1; + sp_rss_params.rss_caps = QED_RSS_IPV4 | + QED_RSS_IPV6 | + QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP; + sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */ + memcpy(sp_rss_params.rss_ind_table, + params->rss_params.rss_ind_table, + QED_RSS_IND_TABLE_SIZE * sizeof(u16)); + memcpy(sp_rss_params.rss_key, params->rss_params.rss_key, + QED_RSS_KEY_SIZE * sizeof(u32)); + } + sp_params.rss_params = &sp_rss_params; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; + rc = qed_sp_vport_update(p_hwfn, &sp_params, + QED_SPQ_MODE_EBLOCK, + NULL); + if (rc) { + DP_ERR(cdev, "Failed to update VPORT\n"); + return rc; + } + + DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), + "Updated V-PORT %d: active_flag %d [update %d]\n", + params->vport_id, params->vport_active_flg, + params->update_vport_active_flg); + } + + return 0; +} + +static int qed_start_rxq(struct qed_dev *cdev, + struct qed_queue_start_common_params *params, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, + void __iomem **pp_prod) +{ + int rc, hwfn_index; + struct qed_hwfn *p_hwfn; + + hwfn_index = params->rss_id % cdev->num_hwfns; + p_hwfn = &cdev->hwfns[hwfn_index]; + + /* Fix queue ID in 100g mode */ + params->queue_id /= cdev->num_hwfns; + + rc = qed_sp_eth_rx_queue_start(p_hwfn, + p_hwfn->hw_info.opaque_fid, + params, + bd_max_bytes, + bd_chain_phys_addr, + cqe_pbl_addr, + cqe_pbl_size, + pp_prod); + + if (rc) { + DP_ERR(cdev, "Failed to start RXQ#%d\n", params->queue_id); + return rc; + } + + DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), + "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n", + params->queue_id, params->rss_id, params->vport_id, + params->sb); + + return 0; +} + +static int qed_stop_rxq(struct qed_dev *cdev, + struct qed_stop_rxq_params *params) +{ + int rc, hwfn_index; + struct qed_hwfn *p_hwfn; + + hwfn_index = params->rss_id % cdev->num_hwfns; + p_hwfn = &cdev->hwfns[hwfn_index]; + + rc = qed_sp_eth_rx_queue_stop(p_hwfn, + params->rx_queue_id / cdev->num_hwfns, + params->eq_completion_only, + false); + if (rc) { + DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id); + return rc; + } + + return 0; +} + +static int qed_start_txq(struct qed_dev *cdev, + struct qed_queue_start_common_params *p_params, + dma_addr_t pbl_addr, + u16 pbl_size, + void __iomem **pp_doorbell) +{ + struct qed_hwfn *p_hwfn; + int rc, hwfn_index; + + hwfn_index = p_params->rss_id % cdev->num_hwfns; + p_hwfn = &cdev->hwfns[hwfn_index]; + + /* Fix queue ID in 100g mode */ + p_params->queue_id /= cdev->num_hwfns; + + rc = qed_sp_eth_tx_queue_start(p_hwfn, + p_hwfn->hw_info.opaque_fid, + p_params, + pbl_addr, + pbl_size, + pp_doorbell); + + if (rc) { + DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id); + return rc; + } + + DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), + "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n", + p_params->queue_id, p_params->rss_id, p_params->vport_id, + p_params->sb); + + return 0; +} + +#define QED_HW_STOP_RETRY_LIMIT (10) +static int qed_fastpath_stop(struct qed_dev *cdev) +{ + qed_hw_stop_fastpath(cdev); + + return 0; +} + +static int qed_stop_txq(struct qed_dev *cdev, + struct qed_stop_txq_params *params) +{ + struct qed_hwfn *p_hwfn; + int rc, hwfn_index; + + hwfn_index = params->rss_id % cdev->num_hwfns; + p_hwfn = &cdev->hwfns[hwfn_index]; + + rc = qed_sp_eth_tx_queue_stop(p_hwfn, + params->tx_queue_id / cdev->num_hwfns); + if (rc) { + DP_ERR(cdev, "Failed to stop TXQ#%d\n", params->tx_queue_id); + return rc; + } + + return 0; +} + +static int qed_configure_filter_rx_mode(struct qed_dev *cdev, + enum qed_filter_rx_mode_type type) +{ + struct qed_filter_accept_flags accept_flags; + + memset(&accept_flags, 0, sizeof(accept_flags)); + + accept_flags.update_rx_mode_config = 1; + accept_flags.update_tx_mode_config = 1; + accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED | + QED_ACCEPT_MCAST_MATCHED | + QED_ACCEPT_BCAST; + accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED | + QED_ACCEPT_MCAST_MATCHED | + QED_ACCEPT_BCAST; + + if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) + accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | + QED_ACCEPT_MCAST_UNMATCHED; + else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) + accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; + + return qed_filter_accept_cmd(cdev, 0, accept_flags, + QED_SPQ_MODE_CB, NULL); +} + +static int qed_configure_filter_ucast(struct qed_dev *cdev, + struct qed_filter_ucast_params *params) +{ + struct qed_filter_ucast ucast; + + if (!params->vlan_valid && !params->mac_valid) { + DP_NOTICE( + cdev, + "Tried configuring a unicast filter, but both MAC and VLAN are not set\n"); + return -EINVAL; + } + + memset(&ucast, 0, sizeof(ucast)); + switch (params->type) { + case QED_FILTER_XCAST_TYPE_ADD: + ucast.opcode = QED_FILTER_ADD; + break; + case QED_FILTER_XCAST_TYPE_DEL: + ucast.opcode = QED_FILTER_REMOVE; + break; + case QED_FILTER_XCAST_TYPE_REPLACE: + ucast.opcode = QED_FILTER_REPLACE; + break; + default: + DP_NOTICE(cdev, "Unknown unicast filter type %d\n", + params->type); + } + + if (params->vlan_valid && params->mac_valid) { + ucast.type = QED_FILTER_MAC_VLAN; + ether_addr_copy(ucast.mac, params->mac); + ucast.vlan = params->vlan; + } else if (params->mac_valid) { + ucast.type = QED_FILTER_MAC; + ether_addr_copy(ucast.mac, params->mac); + } else { + ucast.type = QED_FILTER_VLAN; + ucast.vlan = params->vlan; + } + + ucast.is_rx_filter = true; + ucast.is_tx_filter = true; + + return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL); +} + +static int qed_configure_filter_mcast(struct qed_dev *cdev, + struct qed_filter_mcast_params *params) +{ + struct qed_filter_mcast mcast; + int i; + + memset(&mcast, 0, sizeof(mcast)); + switch (params->type) { + case QED_FILTER_XCAST_TYPE_ADD: + mcast.opcode = QED_FILTER_ADD; + break; + case QED_FILTER_XCAST_TYPE_DEL: + mcast.opcode = QED_FILTER_REMOVE; + break; + default: + DP_NOTICE(cdev, "Unknown multicast filter type %d\n", + params->type); + } + + mcast.num_mc_addrs = params->num; + for (i = 0; i < mcast.num_mc_addrs; i++) + ether_addr_copy(mcast.mac[i], params->mac[i]); + + return qed_filter_mcast_cmd(cdev, &mcast, + QED_SPQ_MODE_CB, NULL); +} + +static int qed_configure_filter(struct qed_dev *cdev, + struct qed_filter_params *params) +{ + enum qed_filter_rx_mode_type accept_flags; + + switch (params->type) { + case QED_FILTER_TYPE_UCAST: + return qed_configure_filter_ucast(cdev, ¶ms->filter.ucast); + case QED_FILTER_TYPE_MCAST: + return qed_configure_filter_mcast(cdev, ¶ms->filter.mcast); + case QED_FILTER_TYPE_RX_MODE: + accept_flags = params->filter.accept_flags; + return qed_configure_filter_rx_mode(cdev, accept_flags); + default: + DP_NOTICE(cdev, "Unknown filter type %d\n", + (int)params->type); + return -EINVAL; + } +} + +static int qed_fp_cqe_completion(struct qed_dev *dev, + u8 rss_id, + struct eth_slow_path_rx_cqe *cqe) +{ + return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns], + cqe); +} + +static const struct qed_eth_ops qed_eth_ops_pass = { + .common = &qed_common_ops_pass, + .fill_dev_info = &qed_fill_eth_dev_info, + .register_ops = &qed_register_eth_ops, + .vport_start = &qed_start_vport, + .vport_stop = &qed_stop_vport, + .vport_update = &qed_update_vport, + .q_rx_start = &qed_start_rxq, + .q_rx_stop = &qed_stop_rxq, + .q_tx_start = &qed_start_txq, + .q_tx_stop = &qed_stop_txq, + .filter_config = &qed_configure_filter, + .fastpath_stop = &qed_fastpath_stop, + .eth_cqe_completion = &qed_fp_cqe_completion, + .get_vport_stats = &qed_get_vport_stats, +}; + +const struct qed_eth_ops *qed_get_eth_ops(u32 version) +{ + if (version != QED_ETH_INTERFACE_VERSION) { + pr_notice("Cannot supply ethtool operations [%08x != %08x]\n", + version, QED_ETH_INTERFACE_VERSION); + return NULL; + } + + return &qed_eth_ops_pass; +} +EXPORT_SYMBOL(qed_get_eth_ops); + +void qed_put_eth_ops(void) +{ + /* TODO - reference count for module? */ +} +EXPORT_SYMBOL(qed_put_eth_ops); diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c new file mode 100644 index 000000000000..947c7af72b25 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -0,0 +1,1169 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include <linux/stddef.h> +#include <linux/pci.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/version.h> +#include <linux/delay.h> +#include <asm/byteorder.h> +#include <linux/dma-mapping.h> +#include <linux/string.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/ethtool.h> +#include <linux/etherdevice.h> +#include <linux/vmalloc.h> +#include <linux/qed/qed_if.h> + +#include "qed.h" +#include "qed_sp.h" +#include "qed_dev_api.h" +#include "qed_mcp.h" +#include "qed_hw.h" + +static const char version[] = + "QLogic QL4xxx 40G/100G Ethernet Driver qed " DRV_MODULE_VERSION "\n"; + +MODULE_DESCRIPTION("QLogic 25G/40G/50G/100G Core Module"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +#define FW_FILE_VERSION \ + __stringify(FW_MAJOR_VERSION) "." \ + __stringify(FW_MINOR_VERSION) "." \ + __stringify(FW_REVISION_VERSION) "." \ + __stringify(FW_ENGINEERING_VERSION) + +#define QED_FW_FILE_NAME \ + "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin" + +static int __init qed_init(void) +{ + pr_notice("qed_init called\n"); + + pr_info("%s", version); + + return 0; +} + +static void __exit qed_cleanup(void) +{ + pr_notice("qed_cleanup called\n"); +} + +module_init(qed_init); +module_exit(qed_cleanup); + +/* Check if the DMA controller on the machine can properly handle the DMA + * addressing required by the device. +*/ +static int qed_set_coherency_mask(struct qed_dev *cdev) +{ + struct device *dev = &cdev->pdev->dev; + + if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { + if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { + DP_NOTICE(cdev, + "Can't request 64-bit consistent allocations\n"); + return -EIO; + } + } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { + DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n"); + return -EIO; + } + + return 0; +} + +static void qed_free_pci(struct qed_dev *cdev) +{ + struct pci_dev *pdev = cdev->pdev; + + if (cdev->doorbells) + iounmap(cdev->doorbells); + if (cdev->regview) + iounmap(cdev->regview); + if (atomic_read(&pdev->enable_cnt) == 1) + pci_release_regions(pdev); + + pci_disable_device(pdev); +} + +/* Performs PCI initializations as well as initializing PCI-related parameters + * in the device structrue. Returns 0 in case of success. + */ +static int qed_init_pci(struct qed_dev *cdev, + struct pci_dev *pdev) +{ + int rc; + + cdev->pdev = pdev; + + rc = pci_enable_device(pdev); + if (rc) { + DP_NOTICE(cdev, "Cannot enable PCI device\n"); + goto err0; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + DP_NOTICE(cdev, "No memory region found in bar #0\n"); + rc = -EIO; + goto err1; + } + + if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { + DP_NOTICE(cdev, "No memory region found in bar #2\n"); + rc = -EIO; + goto err1; + } + + if (atomic_read(&pdev->enable_cnt) == 1) { + rc = pci_request_regions(pdev, "qed"); + if (rc) { + DP_NOTICE(cdev, + "Failed to request PCI memory resources\n"); + goto err1; + } + pci_set_master(pdev); + pci_save_state(pdev); + } + + if (!pci_is_pcie(pdev)) { + DP_NOTICE(cdev, "The bus is not PCI Express\n"); + rc = -EIO; + goto err2; + } + + cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); + if (cdev->pci_params.pm_cap == 0) + DP_NOTICE(cdev, "Cannot find power management capability\n"); + + rc = qed_set_coherency_mask(cdev); + if (rc) + goto err2; + + cdev->pci_params.mem_start = pci_resource_start(pdev, 0); + cdev->pci_params.mem_end = pci_resource_end(pdev, 0); + cdev->pci_params.irq = pdev->irq; + + cdev->regview = pci_ioremap_bar(pdev, 0); + if (!cdev->regview) { + DP_NOTICE(cdev, "Cannot map register space, aborting\n"); + rc = -ENOMEM; + goto err2; + } + + cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); + cdev->db_size = pci_resource_len(cdev->pdev, 2); + cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); + if (!cdev->doorbells) { + DP_NOTICE(cdev, "Cannot map doorbell space\n"); + return -ENOMEM; + } + + return 0; + +err2: + pci_release_regions(pdev); +err1: + pci_disable_device(pdev); +err0: + return rc; +} + +int qed_fill_dev_info(struct qed_dev *cdev, + struct qed_dev_info *dev_info) +{ + struct qed_ptt *ptt; + + memset(dev_info, 0, sizeof(struct qed_dev_info)); + + dev_info->num_hwfns = cdev->num_hwfns; + dev_info->pci_mem_start = cdev->pci_params.mem_start; + dev_info->pci_mem_end = cdev->pci_params.mem_end; + dev_info->pci_irq = cdev->pci_params.irq; + dev_info->is_mf = IS_MF(&cdev->hwfns[0]); + ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); + + dev_info->fw_major = FW_MAJOR_VERSION; + dev_info->fw_minor = FW_MINOR_VERSION; + dev_info->fw_rev = FW_REVISION_VERSION; + dev_info->fw_eng = FW_ENGINEERING_VERSION; + dev_info->mf_mode = cdev->mf_mode; + + qed_mcp_get_mfw_ver(cdev, &dev_info->mfw_rev); + + ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); + if (ptt) { + qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, + &dev_info->flash_size); + + qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); + } + + return 0; +} + +static void qed_free_cdev(struct qed_dev *cdev) +{ + kfree((void *)cdev); +} + +static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev) +{ + struct qed_dev *cdev; + + cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); + if (!cdev) + return cdev; + + qed_init_struct(cdev); + + return cdev; +} + +/* Sets the requested power state */ +static int qed_set_power_state(struct qed_dev *cdev, + pci_power_t state) +{ + if (!cdev) + return -ENODEV; + + DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n"); + return 0; +} + +/* probing */ +static struct qed_dev *qed_probe(struct pci_dev *pdev, + enum qed_protocol protocol, + u32 dp_module, + u8 dp_level) +{ + struct qed_dev *cdev; + int rc; + + cdev = qed_alloc_cdev(pdev); + if (!cdev) + goto err0; + + cdev->protocol = protocol; + + qed_init_dp(cdev, dp_module, dp_level); + + rc = qed_init_pci(cdev, pdev); + if (rc) { + DP_ERR(cdev, "init pci failed\n"); + goto err1; + } + DP_INFO(cdev, "PCI init completed successfully\n"); + + rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT); + if (rc) { + DP_ERR(cdev, "hw prepare failed\n"); + goto err2; + } + + DP_INFO(cdev, "qed_probe completed successffuly\n"); + + return cdev; + +err2: + qed_free_pci(cdev); +err1: + qed_free_cdev(cdev); +err0: + return NULL; +} + +static void qed_remove(struct qed_dev *cdev) +{ + if (!cdev) + return; + + qed_hw_remove(cdev); + + qed_free_pci(cdev); + + qed_set_power_state(cdev, PCI_D3hot); + + qed_free_cdev(cdev); +} + +static void qed_disable_msix(struct qed_dev *cdev) +{ + if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { + pci_disable_msix(cdev->pdev); + kfree(cdev->int_params.msix_table); + } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) { + pci_disable_msi(cdev->pdev); + } + + memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param)); +} + +static int qed_enable_msix(struct qed_dev *cdev, + struct qed_int_params *int_params) +{ + int i, rc, cnt; + + cnt = int_params->in.num_vectors; + + for (i = 0; i < cnt; i++) + int_params->msix_table[i].entry = i; + + rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table, + int_params->in.min_msix_cnt, cnt); + if (rc < cnt && rc >= int_params->in.min_msix_cnt && + (rc % cdev->num_hwfns)) { + pci_disable_msix(cdev->pdev); + + /* If fastpath is initialized, we need at least one interrupt + * per hwfn [and the slow path interrupts]. New requested number + * should be a multiple of the number of hwfns. + */ + cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns; + DP_NOTICE(cdev, + "Trying to enable MSI-X with less vectors (%d out of %d)\n", + cnt, int_params->in.num_vectors); + rc = pci_enable_msix_exact(cdev->pdev, + int_params->msix_table, cnt); + if (!rc) + rc = cnt; + } + + if (rc > 0) { + /* MSI-x configuration was achieved */ + int_params->out.int_mode = QED_INT_MODE_MSIX; + int_params->out.num_vectors = rc; + rc = 0; + } else { + DP_NOTICE(cdev, + "Failed to enable MSI-X [Requested %d vectors][rc %d]\n", + cnt, rc); + } + + return rc; +} + +/* This function outputs the int mode and the number of enabled msix vector */ +static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) +{ + struct qed_int_params *int_params = &cdev->int_params; + struct msix_entry *tbl; + int rc = 0, cnt; + + switch (int_params->in.int_mode) { + case QED_INT_MODE_MSIX: + /* Allocate MSIX table */ + cnt = int_params->in.num_vectors; + int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL); + if (!int_params->msix_table) { + rc = -ENOMEM; + goto out; + } + + /* Enable MSIX */ + rc = qed_enable_msix(cdev, int_params); + if (!rc) + goto out; + + DP_NOTICE(cdev, "Failed to enable MSI-X\n"); + kfree(int_params->msix_table); + if (force_mode) + goto out; + /* Fallthrough */ + + case QED_INT_MODE_MSI: + rc = pci_enable_msi(cdev->pdev); + if (!rc) { + int_params->out.int_mode = QED_INT_MODE_MSI; + goto out; + } + + DP_NOTICE(cdev, "Failed to enable MSI\n"); + if (force_mode) + goto out; + /* Fallthrough */ + + case QED_INT_MODE_INTA: + int_params->out.int_mode = QED_INT_MODE_INTA; + rc = 0; + goto out; + default: + DP_NOTICE(cdev, "Unknown int_mode value %d\n", + int_params->in.int_mode); + rc = -EINVAL; + } + +out: + cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE; + + return rc; +} + +static void qed_simd_handler_config(struct qed_dev *cdev, void *token, + int index, void(*handler)(void *)) +{ + struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; + int relative_idx = index / cdev->num_hwfns; + + hwfn->simd_proto_handler[relative_idx].func = handler; + hwfn->simd_proto_handler[relative_idx].token = token; +} + +static void qed_simd_handler_clean(struct qed_dev *cdev, int index) +{ + struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; + int relative_idx = index / cdev->num_hwfns; + + memset(&hwfn->simd_proto_handler[relative_idx], 0, + sizeof(struct qed_simd_fp_handler)); +} + +static irqreturn_t qed_msix_sp_int(int irq, void *tasklet) +{ + tasklet_schedule((struct tasklet_struct *)tasklet); + return IRQ_HANDLED; +} + +static irqreturn_t qed_single_int(int irq, void *dev_instance) +{ + struct qed_dev *cdev = (struct qed_dev *)dev_instance; + struct qed_hwfn *hwfn; + irqreturn_t rc = IRQ_NONE; + u64 status; + int i, j; + + for (i = 0; i < cdev->num_hwfns; i++) { + status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]); + + if (!status) + continue; + + hwfn = &cdev->hwfns[i]; + + /* Slowpath interrupt */ + if (unlikely(status & 0x1)) { + tasklet_schedule(hwfn->sp_dpc); + status &= ~0x1; + rc = IRQ_HANDLED; + } + + /* Fastpath interrupts */ + for (j = 0; j < 64; j++) { + if ((0x2ULL << j) & status) { + hwfn->simd_proto_handler[j].func( + hwfn->simd_proto_handler[j].token); + status &= ~(0x2ULL << j); + rc = IRQ_HANDLED; + } + } + + if (unlikely(status)) + DP_VERBOSE(hwfn, NETIF_MSG_INTR, + "got an unknown interrupt status 0x%llx\n", + status); + } + + return rc; +} + +static int qed_slowpath_irq_req(struct qed_dev *cdev) +{ + int i = 0, rc = 0; + + if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { + /* Request all the slowpath MSI-X vectors */ + for (i = 0; i < cdev->num_hwfns; i++) { + snprintf(cdev->hwfns[i].name, NAME_SIZE, + "sp-%d-%02x:%02x.%02x", + i, cdev->pdev->bus->number, + PCI_SLOT(cdev->pdev->devfn), + cdev->hwfns[i].abs_pf_id); + + rc = request_irq(cdev->int_params.msix_table[i].vector, + qed_msix_sp_int, 0, + cdev->hwfns[i].name, + cdev->hwfns[i].sp_dpc); + if (rc) + break; + + DP_VERBOSE(&cdev->hwfns[i], + (NETIF_MSG_INTR | QED_MSG_SP), + "Requested slowpath MSI-X\n"); + } + + if (i != cdev->num_hwfns) { + /* Free already request MSI-X vectors */ + for (i--; i >= 0; i--) { + unsigned int vec = + cdev->int_params.msix_table[i].vector; + synchronize_irq(vec); + free_irq(cdev->int_params.msix_table[i].vector, + cdev->hwfns[i].sp_dpc); + } + } + } else { + unsigned long flags = 0; + + snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x", + cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), + PCI_FUNC(cdev->pdev->devfn)); + + if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA) + flags |= IRQF_SHARED; + + rc = request_irq(cdev->pdev->irq, qed_single_int, + flags, cdev->name, cdev); + } + + return rc; +} + +static void qed_slowpath_irq_free(struct qed_dev *cdev) +{ + int i; + + if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { + for_each_hwfn(cdev, i) { + synchronize_irq(cdev->int_params.msix_table[i].vector); + free_irq(cdev->int_params.msix_table[i].vector, + cdev->hwfns[i].sp_dpc); + } + } else { + free_irq(cdev->pdev->irq, cdev); + } +} + +static int qed_nic_stop(struct qed_dev *cdev) +{ + int i, rc; + + rc = qed_hw_stop(cdev); + + for (i = 0; i < cdev->num_hwfns; i++) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + if (p_hwfn->b_sp_dpc_enabled) { + tasklet_disable(p_hwfn->sp_dpc); + p_hwfn->b_sp_dpc_enabled = false; + DP_VERBOSE(cdev, NETIF_MSG_IFDOWN, + "Disabled sp taskelt [hwfn %d] at %p\n", + i, p_hwfn->sp_dpc); + } + } + + return rc; +} + +static int qed_nic_reset(struct qed_dev *cdev) +{ + int rc; + + rc = qed_hw_reset(cdev); + if (rc) + return rc; + + qed_resc_free(cdev); + + return 0; +} + +static int qed_nic_setup(struct qed_dev *cdev) +{ + int rc; + + rc = qed_resc_alloc(cdev); + if (rc) + return rc; + + DP_INFO(cdev, "Allocated qed resources\n"); + + qed_resc_setup(cdev); + + return rc; +} + +static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt) +{ + int limit = 0; + + /* Mark the fastpath as free/used */ + cdev->int_params.fp_initialized = cnt ? true : false; + + if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) + limit = cdev->num_hwfns * 63; + else if (cdev->int_params.fp_msix_cnt) + limit = cdev->int_params.fp_msix_cnt; + + if (!limit) + return -ENOMEM; + + return min_t(int, cnt, limit); +} + +static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info) +{ + memset(info, 0, sizeof(struct qed_int_info)); + + if (!cdev->int_params.fp_initialized) { + DP_INFO(cdev, + "Protocol driver requested interrupt information, but its support is not yet configured\n"); + return -EINVAL; + } + + /* Need to expose only MSI-X information; Single IRQ is handled solely + * by qed. + */ + if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { + int msix_base = cdev->int_params.fp_msix_base; + + info->msix_cnt = cdev->int_params.fp_msix_cnt; + info->msix = &cdev->int_params.msix_table[msix_base]; + } + + return 0; +} + +static int qed_slowpath_setup_int(struct qed_dev *cdev, + enum qed_int_mode int_mode) +{ + int rc, i; + u8 num_vectors = 0; + + memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); + + cdev->int_params.in.int_mode = int_mode; + for_each_hwfn(cdev, i) + num_vectors += qed_int_get_num_sbs(&cdev->hwfns[i], NULL) + 1; + cdev->int_params.in.num_vectors = num_vectors; + + /* We want a minimum of one slowpath and one fastpath vector per hwfn */ + cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; + + rc = qed_set_int_mode(cdev, false); + if (rc) { + DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); + return rc; + } + + cdev->int_params.fp_msix_base = cdev->num_hwfns; + cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - + cdev->num_hwfns; + + return 0; +} + +u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, + u8 *input_buf, u32 max_size, u8 *unzip_buf) +{ + int rc; + + p_hwfn->stream->next_in = input_buf; + p_hwfn->stream->avail_in = input_len; + p_hwfn->stream->next_out = unzip_buf; + p_hwfn->stream->avail_out = max_size; + + rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS); + + if (rc != Z_OK) { + DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n", + rc); + return 0; + } + + rc = zlib_inflate(p_hwfn->stream, Z_FINISH); + zlib_inflateEnd(p_hwfn->stream); + + if (rc != Z_OK && rc != Z_STREAM_END) { + DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n", + p_hwfn->stream->msg, rc); + return 0; + } + + return p_hwfn->stream->total_out / 4; +} + +static int qed_alloc_stream_mem(struct qed_dev *cdev) +{ + int i; + void *workspace; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL); + if (!p_hwfn->stream) + return -ENOMEM; + + workspace = vzalloc(zlib_inflate_workspacesize()); + if (!workspace) + return -ENOMEM; + p_hwfn->stream->workspace = workspace; + } + + return 0; +} + +static void qed_free_stream_mem(struct qed_dev *cdev) +{ + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + if (!p_hwfn->stream) + return; + + vfree(p_hwfn->stream->workspace); + kfree(p_hwfn->stream); + } +} + +static void qed_update_pf_params(struct qed_dev *cdev, + struct qed_pf_params *params) +{ + int i; + + for (i = 0; i < cdev->num_hwfns; i++) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + p_hwfn->pf_params = *params; + } +} + +static int qed_slowpath_start(struct qed_dev *cdev, + struct qed_slowpath_params *params) +{ + struct qed_mcp_drv_version drv_version; + const u8 *data = NULL; + struct qed_hwfn *hwfn; + int rc; + + rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, + &cdev->pdev->dev); + if (rc) { + DP_NOTICE(cdev, + "Failed to find fw file - /lib/firmware/%s\n", + QED_FW_FILE_NAME); + goto err; + } + + rc = qed_nic_setup(cdev); + if (rc) + goto err; + + rc = qed_slowpath_setup_int(cdev, params->int_mode); + if (rc) + goto err1; + + /* Request the slowpath IRQ */ + rc = qed_slowpath_irq_req(cdev); + if (rc) + goto err2; + + /* Allocate stream for unzipping */ + rc = qed_alloc_stream_mem(cdev); + if (rc) { + DP_NOTICE(cdev, "Failed to allocate stream memory\n"); + goto err3; + } + + /* Start the slowpath */ + data = cdev->firmware->data; + + rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode, + true, data); + if (rc) + goto err3; + + DP_INFO(cdev, + "HW initialization and function start completed successfully\n"); + + hwfn = QED_LEADING_HWFN(cdev); + drv_version.version = (params->drv_major << 24) | + (params->drv_minor << 16) | + (params->drv_rev << 8) | + (params->drv_eng); + strlcpy(drv_version.name, params->name, + MCP_DRV_VER_STR_SIZE - 4); + rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, + &drv_version); + if (rc) { + DP_NOTICE(cdev, "Failed sending drv version command\n"); + return rc; + } + + return 0; + +err3: + qed_free_stream_mem(cdev); + qed_slowpath_irq_free(cdev); +err2: + qed_disable_msix(cdev); +err1: + qed_resc_free(cdev); +err: + release_firmware(cdev->firmware); + + return rc; +} + +static int qed_slowpath_stop(struct qed_dev *cdev) +{ + if (!cdev) + return -ENODEV; + + qed_free_stream_mem(cdev); + + qed_nic_stop(cdev); + qed_slowpath_irq_free(cdev); + + qed_disable_msix(cdev); + qed_nic_reset(cdev); + + release_firmware(cdev->firmware); + + return 0; +} + +static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE], + char ver_str[VER_SIZE]) +{ + int i; + + memcpy(cdev->name, name, NAME_SIZE); + for_each_hwfn(cdev, i) + snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); + + memcpy(cdev->ver_str, ver_str, VER_SIZE); + cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; +} + +static u32 qed_sb_init(struct qed_dev *cdev, + struct qed_sb_info *sb_info, + void *sb_virt_addr, + dma_addr_t sb_phy_addr, u16 sb_id, + enum qed_sb_type type) +{ + struct qed_hwfn *p_hwfn; + int hwfn_index; + u16 rel_sb_id; + u8 n_hwfns; + u32 rc; + + /* RoCE uses single engine and CMT uses two engines. When using both + * we force only a single engine. Storage uses only engine 0 too. + */ + if (type == QED_SB_TYPE_L2_QUEUE) + n_hwfns = cdev->num_hwfns; + else + n_hwfns = 1; + + hwfn_index = sb_id % n_hwfns; + p_hwfn = &cdev->hwfns[hwfn_index]; + rel_sb_id = sb_id / n_hwfns; + + DP_VERBOSE(cdev, NETIF_MSG_INTR, + "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", + hwfn_index, rel_sb_id, sb_id); + + rc = qed_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info, + sb_virt_addr, sb_phy_addr, rel_sb_id); + + return rc; +} + +static u32 qed_sb_release(struct qed_dev *cdev, + struct qed_sb_info *sb_info, + u16 sb_id) +{ + struct qed_hwfn *p_hwfn; + int hwfn_index; + u16 rel_sb_id; + u32 rc; + + hwfn_index = sb_id % cdev->num_hwfns; + p_hwfn = &cdev->hwfns[hwfn_index]; + rel_sb_id = sb_id / cdev->num_hwfns; + + DP_VERBOSE(cdev, NETIF_MSG_INTR, + "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", + hwfn_index, rel_sb_id, sb_id); + + rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id); + + return rc; +} + +static int qed_set_link(struct qed_dev *cdev, + struct qed_link_params *params) +{ + struct qed_hwfn *hwfn; + struct qed_mcp_link_params *link_params; + struct qed_ptt *ptt; + int rc; + + if (!cdev) + return -ENODEV; + + /* The link should be set only once per PF */ + hwfn = &cdev->hwfns[0]; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EBUSY; + + link_params = qed_mcp_get_link_params(hwfn); + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) + link_params->speed.autoneg = params->autoneg; + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { + link_params->speed.advertised_speeds = 0; + if ((params->adv_speeds & SUPPORTED_1000baseT_Half) || + (params->adv_speeds & SUPPORTED_1000baseT_Full)) + link_params->speed.advertised_speeds |= + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + if (params->adv_speeds & SUPPORTED_10000baseKR_Full) + link_params->speed.advertised_speeds |= + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + if (params->adv_speeds & SUPPORTED_40000baseLR4_Full) + link_params->speed.advertised_speeds |= + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; + if (params->adv_speeds & 0) + link_params->speed.advertised_speeds |= + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; + if (params->adv_speeds & 0) + link_params->speed.advertised_speeds |= + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G; + } + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) + link_params->speed.forced_speed = params->forced_speed; + + rc = qed_mcp_set_link(hwfn, ptt, params->link_up); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + +static int qed_get_port_type(u32 media_type) +{ + int port_type; + + switch (media_type) { + case MEDIA_SFPP_10G_FIBER: + case MEDIA_SFP_1G_FIBER: + case MEDIA_XFP_FIBER: + case MEDIA_KR: + port_type = PORT_FIBRE; + break; + case MEDIA_DA_TWINAX: + port_type = PORT_DA; + break; + case MEDIA_BASE_T: + port_type = PORT_TP; + break; + case MEDIA_NOT_PRESENT: + port_type = PORT_NONE; + break; + case MEDIA_UNSPECIFIED: + default: + port_type = PORT_OTHER; + break; + } + return port_type; +} + +static void qed_fill_link(struct qed_hwfn *hwfn, + struct qed_link_output *if_link) +{ + struct qed_mcp_link_params params; + struct qed_mcp_link_state link; + struct qed_mcp_link_capabilities link_caps; + u32 media_type; + + memset(if_link, 0, sizeof(*if_link)); + + /* Prepare source inputs */ + memcpy(¶ms, qed_mcp_get_link_params(hwfn), sizeof(params)); + memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link)); + memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn), + sizeof(link_caps)); + + /* Set the link parameters to pass to protocol driver */ + if (link.link_up) + if_link->link_up = true; + + /* TODO - at the moment assume supported and advertised speed equal */ + if_link->supported_caps = SUPPORTED_FIBRE; + if (params.speed.autoneg) + if_link->supported_caps |= SUPPORTED_Autoneg; + if (params.pause.autoneg || + (params.pause.forced_rx && params.pause.forced_tx)) + if_link->supported_caps |= SUPPORTED_Asym_Pause; + if (params.pause.autoneg || params.pause.forced_rx || + params.pause.forced_tx) + if_link->supported_caps |= SUPPORTED_Pause; + + if_link->advertised_caps = if_link->supported_caps; + if (params.speed.advertised_speeds & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) + if_link->advertised_caps |= SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full; + if (params.speed.advertised_speeds & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) + if_link->advertised_caps |= SUPPORTED_10000baseKR_Full; + if (params.speed.advertised_speeds & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) + if_link->advertised_caps |= SUPPORTED_40000baseLR4_Full; + if (params.speed.advertised_speeds & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) + if_link->advertised_caps |= 0; + if (params.speed.advertised_speeds & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G) + if_link->advertised_caps |= 0; + + if (link_caps.speed_capabilities & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) + if_link->supported_caps |= SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full; + if (link_caps.speed_capabilities & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) + if_link->supported_caps |= SUPPORTED_10000baseKR_Full; + if (link_caps.speed_capabilities & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) + if_link->supported_caps |= SUPPORTED_40000baseLR4_Full; + if (link_caps.speed_capabilities & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) + if_link->supported_caps |= 0; + if (link_caps.speed_capabilities & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G) + if_link->supported_caps |= 0; + + if (link.link_up) + if_link->speed = link.speed; + + /* TODO - fill duplex properly */ + if_link->duplex = DUPLEX_FULL; + qed_mcp_get_media_type(hwfn->cdev, &media_type); + if_link->port = qed_get_port_type(media_type); + + if_link->autoneg = params.speed.autoneg; + + if (params.pause.autoneg) + if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; + if (params.pause.forced_rx) + if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; + if (params.pause.forced_tx) + if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; + + /* Link partner capabilities */ + if (link.partner_adv_speed & + QED_LINK_PARTNER_SPEED_1G_HD) + if_link->lp_caps |= SUPPORTED_1000baseT_Half; + if (link.partner_adv_speed & + QED_LINK_PARTNER_SPEED_1G_FD) + if_link->lp_caps |= SUPPORTED_1000baseT_Full; + if (link.partner_adv_speed & + QED_LINK_PARTNER_SPEED_10G) + if_link->lp_caps |= SUPPORTED_10000baseKR_Full; + if (link.partner_adv_speed & + QED_LINK_PARTNER_SPEED_40G) + if_link->lp_caps |= SUPPORTED_40000baseLR4_Full; + if (link.partner_adv_speed & + QED_LINK_PARTNER_SPEED_50G) + if_link->lp_caps |= 0; + if (link.partner_adv_speed & + QED_LINK_PARTNER_SPEED_100G) + if_link->lp_caps |= 0; + + if (link.an_complete) + if_link->lp_caps |= SUPPORTED_Autoneg; + + if (link.partner_adv_pause) + if_link->lp_caps |= SUPPORTED_Pause; + if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || + link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) + if_link->lp_caps |= SUPPORTED_Asym_Pause; +} + +static void qed_get_current_link(struct qed_dev *cdev, + struct qed_link_output *if_link) +{ + qed_fill_link(&cdev->hwfns[0], if_link); +} + +void qed_link_update(struct qed_hwfn *hwfn) +{ + void *cookie = hwfn->cdev->ops_cookie; + struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; + struct qed_link_output if_link; + + qed_fill_link(hwfn, &if_link); + + if (IS_LEAD_HWFN(hwfn) && cookie) + op->link_update(cookie, &if_link); +} + +static int qed_drain(struct qed_dev *cdev) +{ + struct qed_hwfn *hwfn; + struct qed_ptt *ptt; + int i, rc; + + for_each_hwfn(cdev, i) { + hwfn = &cdev->hwfns[i]; + ptt = qed_ptt_acquire(hwfn); + if (!ptt) { + DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n"); + return -EBUSY; + } + rc = qed_mcp_drain(hwfn, ptt); + if (rc) + return rc; + qed_ptt_release(hwfn, ptt); + } + + return 0; +} + +const struct qed_common_ops qed_common_ops_pass = { + .probe = &qed_probe, + .remove = &qed_remove, + .set_power_state = &qed_set_power_state, + .set_id = &qed_set_id, + .update_pf_params = &qed_update_pf_params, + .slowpath_start = &qed_slowpath_start, + .slowpath_stop = &qed_slowpath_stop, + .set_fp_int = &qed_set_int_fp, + .get_fp_int = &qed_get_int_fp, + .sb_init = &qed_sb_init, + .sb_release = &qed_sb_release, + .simd_handler_config = &qed_simd_handler_config, + .simd_handler_clean = &qed_simd_handler_clean, + .set_link = &qed_set_link, + .get_link = &qed_get_current_link, + .drain = &qed_drain, + .update_msglvl = &qed_init_dp, + .chain_alloc = &qed_chain_alloc, + .chain_free = &qed_chain_free, +}; + +u32 qed_get_protocol_version(enum qed_protocol protocol) +{ + switch (protocol) { + case QED_PROTOCOL_ETH: + return QED_ETH_INTERFACE_VERSION; + default: + return 0; + } +} +EXPORT_SYMBOL(qed_get_protocol_version); diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c new file mode 100644 index 000000000000..20d048cdcb88 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -0,0 +1,860 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/string.h> +#include "qed.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_mcp.h" +#include "qed_reg_addr.h" +#define CHIP_MCP_RESP_ITER_US 10 + +#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ +#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ + +#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \ + qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \ + _val) + +#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \ + qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset)) + +#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \ + DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \ + offsetof(struct public_drv_mb, _field), _val) + +#define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \ + DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \ + offsetof(struct public_drv_mb, _field)) + +#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \ + DRV_ID_PDA_COMP_VER_SHIFT) + +#define MCP_BYTES_PER_MBIT_SHIFT 17 + +bool qed_mcp_is_init(struct qed_hwfn *p_hwfn) +{ + if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base) + return false; + return true; +} + +void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_PORT); + u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr); + + p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize, + MFW_PORT(p_hwfn)); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "port_addr = 0x%x, port_id 0x%02x\n", + p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn)); +} + +void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length); + u32 tmp, i; + + if (!p_hwfn->mcp_info->public_base) + return; + + for (i = 0; i < length; i++) { + tmp = qed_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->mfw_mb_addr + + (i << 2) + sizeof(u32)); + + /* The MB data is actually BE; Need to force it to cpu */ + ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] = + be32_to_cpu((__force __be32)tmp); + } +} + +int qed_mcp_free(struct qed_hwfn *p_hwfn) +{ + if (p_hwfn->mcp_info) { + kfree(p_hwfn->mcp_info->mfw_mb_cur); + kfree(p_hwfn->mcp_info->mfw_mb_shadow); + } + kfree(p_hwfn->mcp_info); + + return 0; +} + +static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct qed_mcp_info *p_info = p_hwfn->mcp_info; + u32 drv_mb_offsize, mfw_mb_offsize; + u32 mcp_pf_id = MCP_PF_ID(p_hwfn); + + p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); + if (!p_info->public_base) + return 0; + + p_info->public_base |= GRCBASE_MCP; + + /* Calculate the driver and MFW mailbox address */ + drv_mb_offsize = qed_rd(p_hwfn, p_ptt, + SECTION_OFFSIZE_ADDR(p_info->public_base, + PUBLIC_DRV_MB)); + p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n", + drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); + + /* Set the MFW MB address */ + mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, + SECTION_OFFSIZE_ADDR(p_info->public_base, + PUBLIC_MFW_MB)); + p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); + p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr); + + /* Get the current driver mailbox sequence before sending + * the first command + */ + p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK; + + /* Get current FW pulse sequence */ + p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) & + DRV_PULSE_SEQ_MASK; + + p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); + + return 0; +} + +int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct qed_mcp_info *p_info; + u32 size; + + /* Allocate mcp_info structure */ + p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_ATOMIC); + if (!p_hwfn->mcp_info) + goto err; + p_info = p_hwfn->mcp_info; + + if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) { + DP_NOTICE(p_hwfn, "MCP is not initialized\n"); + /* Do not free mcp_info here, since public_base indicate that + * the MCP is not initialized + */ + return 0; + } + + size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); + p_info->mfw_mb_cur = kzalloc(size, GFP_ATOMIC); + p_info->mfw_mb_shadow = + kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS( + p_info->mfw_mb_length), GFP_ATOMIC); + if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) + goto err; + + /* Initialize the MFW mutex */ + mutex_init(&p_info->mutex); + + return 0; + +err: + DP_NOTICE(p_hwfn, "Failed to allocate mcp memory\n"); + qed_mcp_free(p_hwfn); + return -ENOMEM; +} + +int qed_mcp_reset(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 seq = ++p_hwfn->mcp_info->drv_mb_seq; + u8 delay = CHIP_MCP_RESP_ITER_US; + u32 org_mcp_reset_seq, cnt = 0; + int rc = 0; + + /* Set drv command along with the updated sequence */ + org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); + DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, + (DRV_MSG_CODE_MCP_RESET | seq)); + + do { + /* Wait for MFW response */ + udelay(delay); + /* Give the FW up to 500 second (50*1000*10usec) */ + } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt, + MISCS_REG_GENERIC_POR_0)) && + (cnt++ < QED_MCP_RESET_RETRIES)); + + if (org_mcp_reset_seq != + qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) { + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "MCP was reset after %d usec\n", cnt * delay); + } else { + DP_ERR(p_hwfn, "Failed to reset MCP\n"); + rc = -EAGAIN; + } + + return rc; +} + +static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param) +{ + u8 delay = CHIP_MCP_RESP_ITER_US; + u32 seq, cnt = 1, actual_mb_seq; + int rc = 0; + + /* Get actual driver mailbox sequence */ + actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK; + + /* Use MCP history register to check if MCP reset occurred between + * init time and now. + */ + if (p_hwfn->mcp_info->mcp_hist != + qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) { + DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n"); + qed_load_mcp_offsets(p_hwfn, p_ptt); + qed_mcp_cmd_port_init(p_hwfn, p_ptt); + } + seq = ++p_hwfn->mcp_info->drv_mb_seq; + + /* Set drv param */ + DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param); + + /* Set drv command along with the updated sequence */ + DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq)); + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "wrote command (%x) to MFW MB param 0x%08x\n", + (cmd | seq), param); + + do { + /* Wait for MFW response */ + udelay(delay); + *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header); + + /* Give the FW up to 5 second (500*10ms) */ + } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) && + (cnt++ < QED_DRV_MB_MAX_RETRIES)); + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "[after %d ms] read (%x) seq is (%x) from FW MB\n", + cnt * delay, *o_mcp_resp, seq); + + /* Is this a reply to our command? */ + if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) { + *o_mcp_resp &= FW_MSG_CODE_MASK; + /* Get the MCP param */ + *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); + } else { + /* FW BUG! */ + DP_ERR(p_hwfn, "MFW failed to respond!\n"); + *o_mcp_resp = 0; + rc = -EAGAIN; + } + return rc; +} + +int qed_mcp_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param) +{ + int rc = 0; + + /* MCP not initialized */ + if (!qed_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); + return -EBUSY; + } + + /* Lock Mutex to ensure only single thread is + * accessing the MCP at one time + */ + mutex_lock(&p_hwfn->mcp_info->mutex); + rc = qed_do_mcp_cmd(p_hwfn, p_ptt, cmd, param, + o_mcp_resp, o_mcp_param); + /* Release Mutex */ + mutex_unlock(&p_hwfn->mcp_info->mutex); + + return rc; +} + +static void qed_mcp_set_drv_ver(struct qed_dev *cdev, + struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 i; + + /* Copy version string to MCP */ + for (i = 0; i < MCP_DRV_VER_STR_SIZE_DWORD; i++) + DRV_MB_WR(p_hwfn, p_ptt, union_data.ver_str[i], + *(u32 *)&cdev->ver_str[i * sizeof(u32)]); +} + +int qed_mcp_load_req(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *p_load_code) +{ + struct qed_dev *cdev = p_hwfn->cdev; + u32 param; + int rc; + + if (!qed_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); + return -EBUSY; + } + + /* Save driver's version to shmem */ + qed_mcp_set_drv_ver(cdev, p_hwfn, p_ptt); + + DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n", + p_hwfn->mcp_info->drv_mb_seq, + p_hwfn->mcp_info->drv_pulse_seq); + + /* Load Request */ + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_REQ, + (PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT | + cdev->drv_type), + p_load_code, ¶m); + + /* if mcp fails to respond we must abort */ + if (rc) { + DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + return rc; + } + + /* If MFW refused (e.g. other port is in diagnostic mode) we + * must abort. This can happen in the following cases: + * - Other port is in diagnostic mode + * - Previously loaded function on the engine is not compliant with + * the requester. + * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION. + * - + */ + if (!(*p_load_code) || + ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) || + ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) || + ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) { + DP_ERR(p_hwfn, "MCP refused load request, aborting\n"); + return -EBUSY; + } + + return 0; +} + +static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool b_reset) +{ + struct qed_mcp_link_state *p_link; + u32 status = 0; + + p_link = &p_hwfn->mcp_info->link_output; + memset(p_link, 0, sizeof(*p_link)); + if (!b_reset) { + status = qed_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, link_status)); + DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP), + "Received link update [0x%08x] from mfw [Addr 0x%x]\n", + status, + (u32)(p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, + link_status))); + } else { + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Resetting link indications\n"); + return; + } + + p_link->link_up = !!(status & LINK_STATUS_LINK_UP); + + p_link->full_duplex = true; + switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) { + case LINK_STATUS_SPEED_AND_DUPLEX_100G: + p_link->speed = 100000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_50G: + p_link->speed = 50000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_40G: + p_link->speed = 40000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_25G: + p_link->speed = 25000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_20G: + p_link->speed = 20000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_10G: + p_link->speed = 10000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_1000THD: + p_link->full_duplex = false; + /* Fall-through */ + case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD: + p_link->speed = 1000; + break; + default: + p_link->speed = 0; + } + + /* Correct speed according to bandwidth allocation */ + if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) { + p_link->speed = p_link->speed * + p_hwfn->mcp_info->func_info.bandwidth_max / + 100; + qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, + p_link->speed); + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Configured MAX bandwidth to be %08x Mb/sec\n", + p_link->speed); + } + + p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED); + p_link->an_complete = !!(status & + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE); + p_link->parallel_detection = !!(status & + LINK_STATUS_PARALLEL_DETECTION_USED); + p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED); + + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ? + QED_LINK_PARTNER_SPEED_1G_FD : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ? + QED_LINK_PARTNER_SPEED_1G_HD : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ? + QED_LINK_PARTNER_SPEED_10G : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ? + QED_LINK_PARTNER_SPEED_20G : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ? + QED_LINK_PARTNER_SPEED_40G : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ? + QED_LINK_PARTNER_SPEED_50G : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ? + QED_LINK_PARTNER_SPEED_100G : 0; + + p_link->partner_tx_flow_ctrl_en = + !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED); + p_link->partner_rx_flow_ctrl_en = + !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED); + + switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) { + case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE: + p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE; + break; + case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE: + p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE; + break; + case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE: + p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE; + break; + default: + p_link->partner_adv_pause = 0; + } + + p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT); + + qed_link_update(p_hwfn); +} + +int qed_mcp_set_link(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool b_up) +{ + struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input; + u32 param = 0, reply = 0, cmd; + struct pmm_phy_cfg phy_cfg; + int rc = 0; + u32 i; + + if (!qed_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); + return -EBUSY; + } + + /* Set the shmem configuration according to params */ + memset(&phy_cfg, 0, sizeof(phy_cfg)); + cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET; + if (!params->speed.autoneg) + phy_cfg.speed = params->speed.forced_speed; + phy_cfg.pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0; + phy_cfg.pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0; + phy_cfg.pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0; + phy_cfg.adv_speed = params->speed.advertised_speeds; + phy_cfg.loopback_mode = params->loopback_mode; + + /* Write the requested configuration to shmem */ + for (i = 0; i < sizeof(phy_cfg); i += 4) + qed_wr(p_hwfn, p_ptt, + p_hwfn->mcp_info->drv_mb_addr + + offsetof(struct public_drv_mb, union_data) + i, + ((u32 *)&phy_cfg)[i >> 2]); + + if (b_up) { + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n", + phy_cfg.speed, + phy_cfg.pause, + phy_cfg.adv_speed, + phy_cfg.loopback_mode, + phy_cfg.feature_config_flags); + } else { + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Resetting link\n"); + } + + DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n", + p_hwfn->mcp_info->drv_mb_seq, + p_hwfn->mcp_info->drv_pulse_seq); + + /* Load Request */ + rc = qed_mcp_cmd(p_hwfn, p_ptt, cmd, 0, &reply, ¶m); + + /* if mcp fails to respond we must abort */ + if (rc) { + DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + return rc; + } + + /* Reset the link status if needed */ + if (!b_up) + qed_mcp_handle_link_change(p_hwfn, p_ptt, true); + + return 0; +} + +int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct qed_mcp_info *info = p_hwfn->mcp_info; + int rc = 0; + bool found = false; + u16 i; + + DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n"); + + /* Read Messages from MFW */ + qed_mcp_read_mb(p_hwfn, p_ptt); + + /* Compare current messages to old ones */ + for (i = 0; i < info->mfw_mb_length; i++) { + if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i]) + continue; + + found = true; + + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n", + i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]); + + switch (i) { + case MFW_DRV_MSG_LINK_CHANGE: + qed_mcp_handle_link_change(p_hwfn, p_ptt, false); + break; + default: + DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i); + rc = -EINVAL; + } + } + + /* ACK everything */ + for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) { + __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]); + + /* MFW expect answer in BE, so we force write in that format */ + qed_wr(p_hwfn, p_ptt, + info->mfw_mb_addr + sizeof(u32) + + MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) * + sizeof(u32) + i * sizeof(u32), + (__force u32)val); + } + + if (!found) { + DP_NOTICE(p_hwfn, + "Received an MFW message indication but no new message!\n"); + rc = -EINVAL; + } + + /* Copy the new mfw messages into the shadow */ + memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length); + + return rc; +} + +int qed_mcp_get_mfw_ver(struct qed_dev *cdev, + u32 *p_mfw_ver) +{ + struct qed_hwfn *p_hwfn = &cdev->hwfns[0]; + struct qed_ptt *p_ptt; + u32 global_offsize; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + global_offsize = qed_rd(p_hwfn, p_ptt, + SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info-> + public_base, + PUBLIC_GLOBAL)); + *p_mfw_ver = qed_rd(p_hwfn, p_ptt, + SECTION_ADDR(global_offsize, 0) + + offsetof(struct public_global, mfw_ver)); + + qed_ptt_release(p_hwfn, p_ptt); + + return 0; +} + +int qed_mcp_get_media_type(struct qed_dev *cdev, + u32 *p_media_type) +{ + struct qed_hwfn *p_hwfn = &cdev->hwfns[0]; + struct qed_ptt *p_ptt; + + if (!qed_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); + return -EBUSY; + } + + *p_media_type = MEDIA_UNSPECIFIED; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, media_type)); + + qed_ptt_release(p_hwfn, p_ptt); + + return 0; +} + +static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct public_func *p_data, + int pfid) +{ + u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_FUNC); + u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr); + u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid); + u32 i, size; + + memset(p_data, 0, sizeof(*p_data)); + + size = min_t(u32, sizeof(*p_data), + QED_SECTION_SIZE(mfw_path_offsize)); + for (i = 0; i < size / sizeof(u32); i++) + ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt, + func_addr + (i << 2)); + + return size; +} + +static int +qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, + struct public_func *p_info, + enum qed_pci_personality *p_proto) +{ + int rc = 0; + + switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) { + case FUNC_MF_CFG_PROTOCOL_ETHERNET: + *p_proto = QED_PCI_ETH; + break; + default: + rc = -EINVAL; + } + + return rc; +} + +int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct qed_mcp_function_info *info; + struct public_func shmem_info; + + qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, + MCP_PF_ID(p_hwfn)); + info = &p_hwfn->mcp_info->func_info; + + info->pause_on_host = (shmem_info.config & + FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0; + + if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, + &info->protocol)) { + DP_ERR(p_hwfn, "Unknown personality %08x\n", + (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK)); + return -EINVAL; + } + + if (p_hwfn->cdev->mf_mode != SF) { + info->bandwidth_min = (shmem_info.config & + FUNC_MF_CFG_MIN_BW_MASK) >> + FUNC_MF_CFG_MIN_BW_SHIFT; + if (info->bandwidth_min < 1 || info->bandwidth_min > 100) { + DP_INFO(p_hwfn, + "bandwidth minimum out of bounds [%02x]. Set to 1\n", + info->bandwidth_min); + info->bandwidth_min = 1; + } + + info->bandwidth_max = (shmem_info.config & + FUNC_MF_CFG_MAX_BW_MASK) >> + FUNC_MF_CFG_MAX_BW_SHIFT; + if (info->bandwidth_max < 1 || info->bandwidth_max > 100) { + DP_INFO(p_hwfn, + "bandwidth maximum out of bounds [%02x]. Set to 100\n", + info->bandwidth_max); + info->bandwidth_max = 100; + } + } + + if (shmem_info.mac_upper || shmem_info.mac_lower) { + info->mac[0] = (u8)(shmem_info.mac_upper >> 8); + info->mac[1] = (u8)(shmem_info.mac_upper); + info->mac[2] = (u8)(shmem_info.mac_lower >> 24); + info->mac[3] = (u8)(shmem_info.mac_lower >> 16); + info->mac[4] = (u8)(shmem_info.mac_lower >> 8); + info->mac[5] = (u8)(shmem_info.mac_lower); + } else { + DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n"); + } + + info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper | + (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32); + info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper | + (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32); + + info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK); + + DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP), + "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x\n", + info->pause_on_host, info->protocol, + info->bandwidth_min, info->bandwidth_max, + info->mac[0], info->mac[1], info->mac[2], + info->mac[3], info->mac[4], info->mac[5], + info->wwn_port, info->wwn_node, info->ovlan); + + return 0; +} + +struct qed_mcp_link_params +*qed_mcp_get_link_params(struct qed_hwfn *p_hwfn) +{ + if (!p_hwfn || !p_hwfn->mcp_info) + return NULL; + return &p_hwfn->mcp_info->link_input; +} + +struct qed_mcp_link_state +*qed_mcp_get_link_state(struct qed_hwfn *p_hwfn) +{ + if (!p_hwfn || !p_hwfn->mcp_info) + return NULL; + return &p_hwfn->mcp_info->link_output; +} + +struct qed_mcp_link_capabilities +*qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn) +{ + if (!p_hwfn || !p_hwfn->mcp_info) + return NULL; + return &p_hwfn->mcp_info->link_capabilities; +} + +int qed_mcp_drain(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 resp = 0, param = 0; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_NIG_DRAIN, 100, + &resp, ¶m); + + /* Wait for the drain to complete before returning */ + msleep(120); + + return rc; +} + +int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *p_flash_size) +{ + u32 flash_size; + + flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4); + flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >> + MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT; + flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT)); + + *p_flash_size = flash_size; + + return 0; +} + +int +qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_drv_version *p_ver) +{ + int rc = 0; + u32 param = 0, reply = 0, i; + + if (!qed_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); + return -EBUSY; + } + + DRV_MB_WR(p_hwfn, p_ptt, union_data.drv_version.version, + p_ver->version); + /* Copy version string to shmem */ + for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / 4; i++) { + DRV_MB_WR(p_hwfn, p_ptt, + union_data.drv_version.name[i * sizeof(u32)], + *(u32 *)&p_ver->name[i * sizeof(u32)]); + } + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_VERSION, 0, &reply, + ¶m); + if (rc) { + DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + return rc; + } + + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h new file mode 100644 index 000000000000..dbaae586b4a7 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -0,0 +1,369 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_MCP_H +#define _QED_MCP_H + +#include <linux/types.h> +#include <linux/delay.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include "qed_hsi.h" + +struct qed_mcp_link_speed_params { + bool autoneg; + u32 advertised_speeds; /* bitmask of DRV_SPEED_CAPABILITY */ + u32 forced_speed; /* In Mb/s */ +}; + +struct qed_mcp_link_pause_params { + bool autoneg; + bool forced_rx; + bool forced_tx; +}; + +struct qed_mcp_link_params { + struct qed_mcp_link_speed_params speed; + struct qed_mcp_link_pause_params pause; + u32 loopback_mode; +}; + +struct qed_mcp_link_capabilities { + u32 speed_capabilities; +}; + +struct qed_mcp_link_state { + bool link_up; + + u32 speed; /* In Mb/s */ + bool full_duplex; + + bool an; + bool an_complete; + bool parallel_detection; + bool pfc_enabled; + +#define QED_LINK_PARTNER_SPEED_1G_HD BIT(0) +#define QED_LINK_PARTNER_SPEED_1G_FD BIT(1) +#define QED_LINK_PARTNER_SPEED_10G BIT(2) +#define QED_LINK_PARTNER_SPEED_20G BIT(3) +#define QED_LINK_PARTNER_SPEED_40G BIT(4) +#define QED_LINK_PARTNER_SPEED_50G BIT(5) +#define QED_LINK_PARTNER_SPEED_100G BIT(6) + u32 partner_adv_speed; + + bool partner_tx_flow_ctrl_en; + bool partner_rx_flow_ctrl_en; + +#define QED_LINK_PARTNER_SYMMETRIC_PAUSE (1) +#define QED_LINK_PARTNER_ASYMMETRIC_PAUSE (2) +#define QED_LINK_PARTNER_BOTH_PAUSE (3) + u8 partner_adv_pause; + + bool sfp_tx_fault; +}; + +struct qed_mcp_function_info { + u8 pause_on_host; + + enum qed_pci_personality protocol; + + u8 bandwidth_min; + u8 bandwidth_max; + + u8 mac[ETH_ALEN]; + + u64 wwn_port; + u64 wwn_node; + +#define QED_MCP_VLAN_UNSET (0xffff) + u16 ovlan; +}; + +struct qed_mcp_nvm_common { + u32 offset; + u32 param; + u32 resp; + u32 cmd; +}; + +struct qed_mcp_drv_version { + u32 version; + u8 name[MCP_DRV_VER_STR_SIZE - 4]; +}; + +/** + * @brief - returns the link params of the hw function + * + * @param p_hwfn + * + * @returns pointer to link params + */ +struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *); + +/** + * @brief - return the link state of the hw function + * + * @param p_hwfn + * + * @returns pointer to link state + */ +struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *); + +/** + * @brief - return the link capabilities of the hw function + * + * @param p_hwfn + * + * @returns pointer to link capabilities + */ +struct qed_mcp_link_capabilities + *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn); + +/** + * @brief Request the MFW to set the the link according to 'link_input'. + * + * @param p_hwfn + * @param p_ptt + * @param b_up - raise link if `true'. Reset link if `false'. + * + * @return int + */ +int qed_mcp_set_link(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool b_up); + +/** + * @brief Get the management firmware version value + * + * @param cdev - qed dev pointer + * @param mfw_ver - mfw version value + * + * @return int - 0 - operation was successul. + */ +int qed_mcp_get_mfw_ver(struct qed_dev *cdev, + u32 *mfw_ver); + +/** + * @brief Get media type value of the port. + * + * @param cdev - qed dev pointer + * @param mfw_ver - media type value + * + * @return int - + * 0 - Operation was successul. + * -EBUSY - Operation failed + */ +int qed_mcp_get_media_type(struct qed_dev *cdev, + u32 *media_type); + +/** + * @brief General function for sending commands to the MCP + * mailbox. It acquire mutex lock for the entire + * operation, from sending the request until the MCP + * response. Waiting for MCP response will be checked up + * to 5 seconds every 5ms. + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param cmd - command to be sent to the MCP. + * @param param - Optional param + * @param o_mcp_resp - The MCP response code (exclude sequence). + * @param o_mcp_param- Optional parameter provided by the MCP + * response + * @return int - 0 - operation + * was successul. + */ +int qed_mcp_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param); + +/** + * @brief - drains the nig, allowing completion to pass in case of pauses. + * (Should be called only from sleepable context) + * + * @param p_hwfn + * @param p_ptt + */ +int qed_mcp_drain(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * @brief Get the flash size value + * + * @param p_hwfn + * @param p_ptt + * @param p_flash_size - flash size in bytes to be filled. + * + * @return int - 0 - operation was successul. + */ +int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *p_flash_size); + +/** + * @brief Send driver version to MFW + * + * @param p_hwfn + * @param p_ptt + * @param version - Version value + * @param name - Protocol driver name + * + * @return int - 0 - operation was successul. + */ +int +qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_drv_version *p_ver); + +/* Using hwfn number (and not pf_num) is required since in CMT mode, + * same pf_num may be used by two different hwfn + * TODO - this shouldn't really be in .h file, but until all fields + * required during hw-init will be placed in their correct place in shmem + * we need it in qed_dev.c [for readin the nvram reflection in shmem]. + */ +#define MCP_PF_ID_BY_REL(p_hwfn, rel_pfid) (QED_IS_BB((p_hwfn)->cdev) ? \ + ((rel_pfid) | \ + ((p_hwfn)->abs_pf_id & 1) << 3) : \ + rel_pfid) +#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id) + +/* TODO - this is only correct as long as only BB is supported, and + * no port-swapping is implemented; Afterwards we'll need to fix it. + */ +#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \ + ((_p_hwfn)->cdev->num_ports_in_engines * 2)) +struct qed_mcp_info { + struct mutex mutex; /* MCP access lock */ + u32 public_base; + u32 drv_mb_addr; + u32 mfw_mb_addr; + u32 port_addr; + u16 drv_mb_seq; + u16 drv_pulse_seq; + struct qed_mcp_link_params link_input; + struct qed_mcp_link_state link_output; + struct qed_mcp_link_capabilities link_capabilities; + struct qed_mcp_function_info func_info; + u8 *mfw_mb_cur; + u8 *mfw_mb_shadow; + u16 mfw_mb_length; + u16 mcp_hist; +}; + +/** + * @brief Initialize the interface with the MCP + * + * @param p_hwfn - HW func + * @param p_ptt - PTT required for register access + * + * @return int + */ +int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * @brief Initialize the port interface with the MCP + * + * @param p_hwfn + * @param p_ptt + * Can only be called after `num_ports_in_engines' is set + */ +void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); +/** + * @brief Releases resources allocated during the init process. + * + * @param p_hwfn - HW func + * @param p_ptt - PTT required for register access + * + * @return int + */ + +int qed_mcp_free(struct qed_hwfn *p_hwfn); + +/** + * @brief This function is called from the DPC context. After + * pointing PTT to the mfw mb, check for events sent by the MCP + * to the driver and ack them. In case a critical event + * detected, it will be handled here, otherwise the work will be + * queued to a sleepable work-queue. + * + * @param p_hwfn - HW function + * @param p_ptt - PTT required for register access + * @return int - 0 - operation + * was successul. + */ +int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * @brief Sends a LOAD_REQ to the MFW, and in case operation + * succeed, returns whether this PF is the first on the + * chip/engine/port or function. This function should be + * called when driver is ready to accept MFW events after + * Storms initializations are done. + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param p_load_code - The MCP response param containing one + * of the following: + * FW_MSG_CODE_DRV_LOAD_ENGINE + * FW_MSG_CODE_DRV_LOAD_PORT + * FW_MSG_CODE_DRV_LOAD_FUNCTION + * @return int - + * 0 - Operation was successul. + * -EBUSY - Operation failed + */ +int qed_mcp_load_req(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *p_load_code); + +/** + * @brief Read the MFW mailbox into Current buffer. + * + * @param p_hwfn + * @param p_ptt + */ +void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * @brief - calls during init to read shmem of all function-related info. + * + * @param p_hwfn + * + * @param return 0 upon success. + */ +int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * @brief - Reset the MCP using mailbox command. + * + * @param p_hwfn + * @param p_ptt + * + * @param return 0 upon success. + */ +int qed_mcp_reset(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * @brief indicates whether the MFW objects [under mcp_info] are accessible + * + * @param p_hwfn + * + * @return true iff MFW is running and mcp_info is initialized + */ +bool qed_mcp_is_init(struct qed_hwfn *p_hwfn); + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h new file mode 100644 index 000000000000..7a5ce5914ace --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -0,0 +1,366 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef REG_ADDR_H +#define REG_ADDR_H + +#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT \ + 0 + +#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE ( \ + 0xfff << 0) + +#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT \ + 12 + +#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE ( \ + 0xfff << 12) + +#define CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT \ + 24 + +#define CDU_REG_CID_ADDR_PARAMS_NCIB ( \ + 0xff << 24) + +#define XSDM_REG_OPERATION_GEN \ + 0xf80408UL +#define NIG_REG_RX_BRB_OUT_EN \ + 0x500e18UL +#define NIG_REG_STORM_OUT_EN \ + 0x500e08UL +#define PSWRQ2_REG_L2P_VALIDATE_VFID \ + 0x240c50UL +#define PGLUE_B_REG_USE_CLIENTID_IN_TAG \ + 0x2aae04UL +#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \ + 0x2aa16cUL +#define BAR0_MAP_REG_MSDM_RAM \ + 0x1d00000UL +#define BAR0_MAP_REG_USDM_RAM \ + 0x1d80000UL +#define BAR0_MAP_REG_PSDM_RAM \ + 0x1f00000UL +#define BAR0_MAP_REG_TSDM_RAM \ + 0x1c80000UL +#define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \ + 0x5011f4UL +#define PRS_REG_SEARCH_TCP \ + 0x1f0400UL +#define PRS_REG_SEARCH_UDP \ + 0x1f0404UL +#define PRS_REG_SEARCH_FCOE \ + 0x1f0408UL +#define PRS_REG_SEARCH_ROCE \ + 0x1f040cUL +#define PRS_REG_SEARCH_OPENFLOW \ + 0x1f0434UL +#define TM_REG_PF_ENABLE_CONN \ + 0x2c043cUL +#define TM_REG_PF_ENABLE_TASK \ + 0x2c0444UL +#define TM_REG_PF_SCAN_ACTIVE_CONN \ + 0x2c04fcUL +#define TM_REG_PF_SCAN_ACTIVE_TASK \ + 0x2c0500UL +#define IGU_REG_LEADING_EDGE_LATCH \ + 0x18082cUL +#define IGU_REG_TRAILING_EDGE_LATCH \ + 0x180830UL +#define QM_REG_USG_CNT_PF_TX \ + 0x2f2eacUL +#define QM_REG_USG_CNT_PF_OTHER \ + 0x2f2eb0UL +#define DORQ_REG_PF_DB_ENABLE \ + 0x100508UL +#define QM_REG_PF_EN \ + 0x2f2ea4UL +#define TCFC_REG_STRONG_ENABLE_PF \ + 0x2d0708UL +#define CCFC_REG_STRONG_ENABLE_PF \ + 0x2e0708UL +#define PGLUE_B_REG_PGL_ADDR_88_F0 \ + 0x2aa404UL +#define PGLUE_B_REG_PGL_ADDR_8C_F0 \ + 0x2aa408UL +#define PGLUE_B_REG_PGL_ADDR_90_F0 \ + 0x2aa40cUL +#define PGLUE_B_REG_PGL_ADDR_94_F0 \ + 0x2aa410UL +#define PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \ + 0x2aa138UL +#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ \ + 0x2aa174UL +#define MISC_REG_GEN_PURP_CR0 \ + 0x008c80UL +#define MCP_REG_SCRATCH \ + 0xe20000UL +#define CNIG_REG_NW_PORT_MODE_BB_B0 \ + 0x218200UL +#define MISCS_REG_CHIP_NUM \ + 0x00976cUL +#define MISCS_REG_CHIP_REV \ + 0x009770UL +#define MISCS_REG_CMT_ENABLED_FOR_PAIR \ + 0x00971cUL +#define MISCS_REG_CHIP_TEST_REG \ + 0x009778UL +#define MISCS_REG_CHIP_METAL \ + 0x009774UL +#define BRB_REG_HEADER_SIZE \ + 0x340804UL +#define BTB_REG_HEADER_SIZE \ + 0xdb0804UL +#define CAU_REG_LONG_TIMEOUT_THRESHOLD \ + 0x1c0708UL +#define CCFC_REG_ACTIVITY_COUNTER \ + 0x2e8800UL +#define CDU_REG_CID_ADDR_PARAMS \ + 0x580900UL +#define DBG_REG_CLIENT_ENABLE \ + 0x010004UL +#define DMAE_REG_INIT \ + 0x00c000UL +#define DORQ_REG_IFEN \ + 0x100040UL +#define GRC_REG_TIMEOUT_EN \ + 0x050404UL +#define IGU_REG_BLOCK_CONFIGURATION \ + 0x180040UL +#define MCM_REG_INIT \ + 0x1200000UL +#define MCP2_REG_DBG_DWORD_ENABLE \ + 0x052404UL +#define MISC_REG_PORT_MODE \ + 0x008c00UL +#define MISCS_REG_CLK_100G_MODE \ + 0x009070UL +#define MSDM_REG_ENABLE_IN1 \ + 0xfc0004UL +#define MSEM_REG_ENABLE_IN \ + 0x1800004UL +#define NIG_REG_CM_HDR \ + 0x500840UL +#define NCSI_REG_CONFIG \ + 0x040200UL +#define PBF_REG_INIT \ + 0xd80000UL +#define PTU_REG_ATC_INIT_ARRAY \ + 0x560000UL +#define PCM_REG_INIT \ + 0x1100000UL +#define PGLUE_B_REG_ADMIN_PER_PF_REGION \ + 0x2a9000UL +#define PRM_REG_DISABLE_PRM \ + 0x230000UL +#define PRS_REG_SOFT_RST \ + 0x1f0000UL +#define PSDM_REG_ENABLE_IN1 \ + 0xfa0004UL +#define PSEM_REG_ENABLE_IN \ + 0x1600004UL +#define PSWRQ_REG_DBG_SELECT \ + 0x280020UL +#define PSWRQ2_REG_CDUT_P_SIZE \ + 0x24000cUL +#define PSWHST_REG_DISCARD_INTERNAL_WRITES \ + 0x2a0040UL +#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \ + 0x29e050UL +#define PSWRD_REG_DBG_SELECT \ + 0x29c040UL +#define PSWRD2_REG_CONF11 \ + 0x29d064UL +#define PSWWR_REG_USDM_FULL_TH \ + 0x29a040UL +#define PSWWR2_REG_CDU_FULL_TH2 \ + 0x29b040UL +#define QM_REG_MAXPQSIZE_0 \ + 0x2f0434UL +#define RSS_REG_RSS_INIT_EN \ + 0x238804UL +#define RDIF_REG_STOP_ON_ERROR \ + 0x300040UL +#define SRC_REG_SOFT_RST \ + 0x23874cUL +#define TCFC_REG_ACTIVITY_COUNTER \ + 0x2d8800UL +#define TCM_REG_INIT \ + 0x1180000UL +#define TM_REG_PXP_READ_DATA_FIFO_INIT \ + 0x2c0014UL +#define TSDM_REG_ENABLE_IN1 \ + 0xfb0004UL +#define TSEM_REG_ENABLE_IN \ + 0x1700004UL +#define TDIF_REG_STOP_ON_ERROR \ + 0x310040UL +#define UCM_REG_INIT \ + 0x1280000UL +#define UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \ + 0x051004UL +#define USDM_REG_ENABLE_IN1 \ + 0xfd0004UL +#define USEM_REG_ENABLE_IN \ + 0x1900004UL +#define XCM_REG_INIT \ + 0x1000000UL +#define XSDM_REG_ENABLE_IN1 \ + 0xf80004UL +#define XSEM_REG_ENABLE_IN \ + 0x1400004UL +#define YCM_REG_INIT \ + 0x1080000UL +#define YSDM_REG_ENABLE_IN1 \ + 0xf90004UL +#define YSEM_REG_ENABLE_IN \ + 0x1500004UL +#define XYLD_REG_SCBD_STRICT_PRIO \ + 0x4c0000UL +#define TMLD_REG_SCBD_STRICT_PRIO \ + 0x4d0000UL +#define MULD_REG_SCBD_STRICT_PRIO \ + 0x4e0000UL +#define YULD_REG_SCBD_STRICT_PRIO \ + 0x4c8000UL +#define MISC_REG_SHARED_MEM_ADDR \ + 0x008c20UL +#define DMAE_REG_GO_C0 \ + 0x00c048UL +#define DMAE_REG_GO_C1 \ + 0x00c04cUL +#define DMAE_REG_GO_C2 \ + 0x00c050UL +#define DMAE_REG_GO_C3 \ + 0x00c054UL +#define DMAE_REG_GO_C4 \ + 0x00c058UL +#define DMAE_REG_GO_C5 \ + 0x00c05cUL +#define DMAE_REG_GO_C6 \ + 0x00c060UL +#define DMAE_REG_GO_C7 \ + 0x00c064UL +#define DMAE_REG_GO_C8 \ + 0x00c068UL +#define DMAE_REG_GO_C9 \ + 0x00c06cUL +#define DMAE_REG_GO_C10 \ + 0x00c070UL +#define DMAE_REG_GO_C11 \ + 0x00c074UL +#define DMAE_REG_GO_C12 \ + 0x00c078UL +#define DMAE_REG_GO_C13 \ + 0x00c07cUL +#define DMAE_REG_GO_C14 \ + 0x00c080UL +#define DMAE_REG_GO_C15 \ + 0x00c084UL +#define DMAE_REG_GO_C16 \ + 0x00c088UL +#define DMAE_REG_GO_C17 \ + 0x00c08cUL +#define DMAE_REG_GO_C18 \ + 0x00c090UL +#define DMAE_REG_GO_C19 \ + 0x00c094UL +#define DMAE_REG_GO_C20 \ + 0x00c098UL +#define DMAE_REG_GO_C21 \ + 0x00c09cUL +#define DMAE_REG_GO_C22 \ + 0x00c0a0UL +#define DMAE_REG_GO_C23 \ + 0x00c0a4UL +#define DMAE_REG_GO_C24 \ + 0x00c0a8UL +#define DMAE_REG_GO_C25 \ + 0x00c0acUL +#define DMAE_REG_GO_C26 \ + 0x00c0b0UL +#define DMAE_REG_GO_C27 \ + 0x00c0b4UL +#define DMAE_REG_GO_C28 \ + 0x00c0b8UL +#define DMAE_REG_GO_C29 \ + 0x00c0bcUL +#define DMAE_REG_GO_C30 \ + 0x00c0c0UL +#define DMAE_REG_GO_C31 \ + 0x00c0c4UL +#define DMAE_REG_CMD_MEM \ + 0x00c800UL +#define QM_REG_MAXPQSIZETXSEL_0 \ + 0x2f0440UL +#define QM_REG_SDMCMDREADY \ + 0x2f1e10UL +#define QM_REG_SDMCMDADDR \ + 0x2f1e04UL +#define QM_REG_SDMCMDDATALSB \ + 0x2f1e08UL +#define QM_REG_SDMCMDDATAMSB \ + 0x2f1e0cUL +#define QM_REG_SDMCMDGO \ + 0x2f1e14UL +#define QM_REG_RLPFCRD \ + 0x2f4d80UL +#define QM_REG_RLPFINCVAL \ + 0x2f4c80UL +#define QM_REG_RLGLBLCRD \ + 0x2f4400UL +#define QM_REG_RLGLBLINCVAL \ + 0x2f3400UL +#define IGU_REG_ATTENTION_ENABLE \ + 0x18083cUL +#define IGU_REG_ATTN_MSG_ADDR_L \ + 0x180820UL +#define IGU_REG_ATTN_MSG_ADDR_H \ + 0x180824UL +#define MISC_REG_AEU_GENERAL_ATTN_0 \ + 0x008400UL +#define CAU_REG_SB_ADDR_MEMORY \ + 0x1c8000UL +#define CAU_REG_SB_VAR_MEMORY \ + 0x1c6000UL +#define CAU_REG_PI_MEMORY \ + 0x1d0000UL +#define IGU_REG_PF_CONFIGURATION \ + 0x180800UL +#define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \ + 0x00849cUL +#define MISC_REG_AEU_MASK_ATTN_IGU \ + 0x008494UL +#define IGU_REG_CLEANUP_STATUS_0 \ + 0x180980UL +#define IGU_REG_CLEANUP_STATUS_1 \ + 0x180a00UL +#define IGU_REG_CLEANUP_STATUS_2 \ + 0x180a80UL +#define IGU_REG_CLEANUP_STATUS_3 \ + 0x180b00UL +#define IGU_REG_CLEANUP_STATUS_4 \ + 0x180b80UL +#define IGU_REG_COMMAND_REG_32LSB_DATA \ + 0x180840UL +#define IGU_REG_COMMAND_REG_CTRL \ + 0x180848UL +#define IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN ( \ + 0x1 << 1) +#define IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN ( \ + 0x1 << 0) +#define IGU_REG_MAPPING_MEMORY \ + 0x184000UL +#define MISCS_REG_GENERIC_POR_0 \ + 0x0096d4UL +#define MCP_REG_NVM_CFG4 \ + 0xe0642cUL +#define MCP_REG_NVM_CFG4_FLASH_SIZE ( \ + 0x7 << 0) +#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \ + 0 +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h new file mode 100644 index 000000000000..31a1f1eb4f56 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -0,0 +1,360 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_SP_H +#define _QED_SP_H + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/qed/qed_chain.h> +#include "qed.h" +#include "qed_hsi.h" + +enum spq_mode { + QED_SPQ_MODE_BLOCK, /* Client will poll a designated mem. address */ + QED_SPQ_MODE_CB, /* Client supplies a callback */ + QED_SPQ_MODE_EBLOCK, /* QED should block until completion */ +}; + +struct qed_spq_comp_cb { + void (*function)(struct qed_hwfn *, + void *, + union event_ring_data *, + u8 fw_return_code); + void *cookie; +}; + +/** + * @brief qed_eth_cqe_completion - handles the completion of a + * ramrod on the cqe ring + * + * @param p_hwfn + * @param cqe + * + * @return int + */ +int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, + struct eth_slow_path_rx_cqe *cqe); + +/** + * @file + * + * QED Slow-hwfn queue interface + */ + +union ramrod_data { + struct pf_start_ramrod_data pf_start; + struct rx_queue_start_ramrod_data rx_queue_start; + struct rx_queue_update_ramrod_data rx_queue_update; + struct rx_queue_stop_ramrod_data rx_queue_stop; + struct tx_queue_start_ramrod_data tx_queue_start; + struct tx_queue_stop_ramrod_data tx_queue_stop; + struct vport_start_ramrod_data vport_start; + struct vport_stop_ramrod_data vport_stop; + struct vport_update_ramrod_data vport_update; + struct vport_filter_update_ramrod_data vport_filter_update; +}; + +#define EQ_MAX_CREDIT 0xffffffff + +enum spq_priority { + QED_SPQ_PRIORITY_NORMAL, + QED_SPQ_PRIORITY_HIGH, +}; + +union qed_spq_req_comp { + struct qed_spq_comp_cb cb; + u64 *done_addr; +}; + +struct qed_spq_comp_done { + u64 done; + u8 fw_return_code; +}; + +struct qed_spq_entry { + struct list_head list; + + u8 flags; + + /* HSI slow path element */ + struct slow_path_element elem; + + union ramrod_data ramrod; + + enum spq_priority priority; + + /* pending queue for this entry */ + struct list_head *queue; + + enum spq_mode comp_mode; + struct qed_spq_comp_cb comp_cb; + struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */ +}; + +struct qed_eq { + struct qed_chain chain; + u8 eq_sb_index; /* index within the SB */ + __le16 *p_fw_cons; /* ptr to index value */ +}; + +struct qed_consq { + struct qed_chain chain; +}; + +struct qed_spq { + spinlock_t lock; /* SPQ lock */ + + struct list_head unlimited_pending; + struct list_head pending; + struct list_head completion_pending; + struct list_head free_pool; + + struct qed_chain chain; + + /* allocated dma-able memory for spq entries (+ramrod data) */ + dma_addr_t p_phys; + struct qed_spq_entry *p_virt; + + /* Used as index for completions (returns on EQ by FW) */ + u16 echo_idx; + + /* Statistics */ + u32 unlimited_pending_count; + u32 normal_count; + u32 high_count; + u32 comp_sent_count; + u32 comp_count; + + u32 cid; +}; + +/** + * @brief qed_spq_post - Posts a Slow hwfn request to FW, or lacking that + * Pends it to the future list. + * + * @param p_hwfn + * @param p_req + * + * @return int + */ +int qed_spq_post(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent, + u8 *fw_return_code); + +/** + * @brief qed_spq_allocate - Alloocates & initializes the SPQ and EQ. + * + * @param p_hwfn + * + * @return int + */ +int qed_spq_alloc(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_spq_setup - Reset the SPQ to its start state. + * + * @param p_hwfn + */ +void qed_spq_setup(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_spq_deallocate - Deallocates the given SPQ struct. + * + * @param p_hwfn + */ +void qed_spq_free(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_spq_get_entry - Obtain an entrry from the spq + * free pool list. + * + * + * + * @param p_hwfn + * @param pp_ent + * + * @return int + */ +int +qed_spq_get_entry(struct qed_hwfn *p_hwfn, + struct qed_spq_entry **pp_ent); + +/** + * @brief qed_spq_return_entry - Return an entry to spq free + * pool list + * + * @param p_hwfn + * @param p_ent + */ +void qed_spq_return_entry(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent); +/** + * @brief qed_eq_allocate - Allocates & initializes an EQ struct + * + * @param p_hwfn + * @param num_elem number of elements in the eq + * + * @return struct qed_eq* - a newly allocated structure; NULL upon error. + */ +struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, + u16 num_elem); + +/** + * @brief qed_eq_setup - Reset the SPQ to its start state. + * + * @param p_hwfn + * @param p_eq + */ +void qed_eq_setup(struct qed_hwfn *p_hwfn, + struct qed_eq *p_eq); + +/** + * @brief qed_eq_deallocate - deallocates the given EQ struct. + * + * @param p_hwfn + * @param p_eq + */ +void qed_eq_free(struct qed_hwfn *p_hwfn, + struct qed_eq *p_eq); + +/** + * @brief qed_eq_prod_update - update the FW with default EQ producer + * + * @param p_hwfn + * @param prod + */ +void qed_eq_prod_update(struct qed_hwfn *p_hwfn, + u16 prod); + +/** + * @brief qed_eq_completion - Completes currently pending EQ elements + * + * @param p_hwfn + * @param cookie + * + * @return int + */ +int qed_eq_completion(struct qed_hwfn *p_hwfn, + void *cookie); + +/** + * @brief qed_spq_completion - Completes a single event + * + * @param p_hwfn + * @param echo - echo value from cookie (used for determining completion) + * @param p_data - data from cookie (used in callback function if applicable) + * + * @return int + */ +int qed_spq_completion(struct qed_hwfn *p_hwfn, + __le16 echo, + u8 fw_return_code, + union event_ring_data *p_data); + +/** + * @brief qed_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ + * + * @param p_hwfn + * + * @return u32 - SPQ CID + */ +u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_consq_alloc - Allocates & initializes an ConsQ + * struct + * + * @param p_hwfn + * + * @return struct qed_eq* - a newly allocated structure; NULL upon error. + */ +struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_consq_setup - Reset the ConsQ to its start + * state. + * + * @param p_hwfn + * @param p_eq + */ +void qed_consq_setup(struct qed_hwfn *p_hwfn, + struct qed_consq *p_consq); + +/** + * @brief qed_consq_free - deallocates the given ConsQ struct. + * + * @param p_hwfn + * @param p_eq + */ +void qed_consq_free(struct qed_hwfn *p_hwfn, + struct qed_consq *p_consq); + +/** + * @file + * + * @brief Slow-hwfn low-level commands (Ramrods) function definitions. + */ + +#define QED_SP_EQ_COMPLETION 0x01 +#define QED_SP_CQE_COMPLETION 0x02 + +struct qed_sp_init_request_params { + size_t ramrod_data_size; + enum spq_mode comp_mode; + struct qed_spq_comp_cb *p_comp_data; +}; + +int qed_sp_init_request(struct qed_hwfn *p_hwfn, + struct qed_spq_entry **pp_ent, + u32 cid, + u16 opaque_fid, + u8 cmd, + u8 protocol, + struct qed_sp_init_request_params *p_params); + +/** + * @brief qed_sp_pf_start - PF Function Start Ramrod + * + * This ramrod is sent to initialize a physical function (PF). It will + * configure the function related parameters and write its completion to the + * event ring specified in the parameters. + * + * Ramrods complete on the common event ring for the PF. This ring is + * allocated by the driver on host memory and its parameters are written + * to the internal RAM of the UStorm by the Function Start Ramrod. + * + * @param p_hwfn + * @param mode + * + * @return int + */ + +int qed_sp_pf_start(struct qed_hwfn *p_hwfn, + enum mf_mode mode); + +/** + * @brief qed_sp_pf_stop - PF Function Stop Ramrod + * + * This ramrod is sent to close a Physical Function (PF). It is the last ramrod + * sent and the last completion written to the PFs Event Ring. This ramrod also + * deletes the context for the Slowhwfn connection on this PF. + * + * @note Not required for first packet. + * + * @param p_hwfn + * + * @return int + */ + +int qed_sp_pf_stop(struct qed_hwfn *p_hwfn); + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c new file mode 100644 index 000000000000..6f7879136633 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -0,0 +1,170 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/bitops.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include "qed.h" +#include <linux/qed/qed_chain.h> +#include "qed_cxt.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_int.h" +#include "qed_reg_addr.h" +#include "qed_sp.h" + +int qed_sp_init_request(struct qed_hwfn *p_hwfn, + struct qed_spq_entry **pp_ent, + u32 cid, + u16 opaque_fid, + u8 cmd, + u8 protocol, + struct qed_sp_init_request_params *p_params) +{ + int rc = -EINVAL; + struct qed_spq_entry *p_ent = NULL; + u32 opaque_cid = opaque_fid << 16 | cid; + + if (!pp_ent) + return -ENOMEM; + + rc = qed_spq_get_entry(p_hwfn, pp_ent); + + if (rc != 0) + return rc; + + p_ent = *pp_ent; + + p_ent->elem.hdr.cid = cpu_to_le32(opaque_cid); + p_ent->elem.hdr.cmd_id = cmd; + p_ent->elem.hdr.protocol_id = protocol; + + p_ent->priority = QED_SPQ_PRIORITY_NORMAL; + p_ent->comp_mode = p_params->comp_mode; + p_ent->comp_done.done = 0; + + switch (p_ent->comp_mode) { + case QED_SPQ_MODE_EBLOCK: + p_ent->comp_cb.cookie = &p_ent->comp_done; + break; + + case QED_SPQ_MODE_BLOCK: + if (!p_params->p_comp_data) + return -EINVAL; + + p_ent->comp_cb.cookie = p_params->p_comp_data->cookie; + break; + + case QED_SPQ_MODE_CB: + if (!p_params->p_comp_data) + p_ent->comp_cb.function = NULL; + else + p_ent->comp_cb = *p_params->p_comp_data; + break; + + default: + DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n", + p_ent->comp_mode); + return -EINVAL; + } + + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n", + opaque_cid, cmd, protocol, + (unsigned long)&p_ent->ramrod, + D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK, + QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK", + "MODE_CB")); + if (p_params->ramrod_data_size) + memset(&p_ent->ramrod, 0, p_params->ramrod_data_size); + + return 0; +} + +int qed_sp_pf_start(struct qed_hwfn *p_hwfn, + enum mf_mode mode) +{ + struct qed_sp_init_request_params params; + struct pf_start_ramrod_data *p_ramrod = NULL; + u16 sb = qed_int_get_sp_sb_id(p_hwfn); + u8 sb_index = p_hwfn->p_eq->eq_sb_index; + struct qed_spq_entry *p_ent = NULL; + int rc = -EINVAL; + + /* update initial eq producer */ + qed_eq_prod_update(p_hwfn, + qed_chain_get_prod_idx(&p_hwfn->p_eq->chain)); + + memset(¶ms, 0, sizeof(params)); + params.ramrod_data_size = sizeof(*p_ramrod); + params.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, + &p_ent, + qed_spq_get_cid(p_hwfn), + p_hwfn->hw_info.opaque_fid, + COMMON_RAMROD_PF_START, + PROTOCOLID_COMMON, + ¶ms); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.pf_start; + + p_ramrod->event_ring_sb_id = cpu_to_le16(sb); + p_ramrod->event_ring_sb_index = sb_index; + p_ramrod->path_id = QED_PATH_ID(p_hwfn); + p_ramrod->dont_log_ramrods = 0; + p_ramrod->log_type_mask = cpu_to_le16(0xf); + p_ramrod->mf_mode = mode; + p_ramrod->outer_tag = p_hwfn->hw_info.ovlan; + + /* Place EQ address in RAMROD */ + p_ramrod->event_ring_pbl_addr.hi = + DMA_HI_LE(p_hwfn->p_eq->chain.pbl.p_phys_table); + p_ramrod->event_ring_pbl_addr.lo = + DMA_LO_LE(p_hwfn->p_eq->chain.pbl.p_phys_table); + p_ramrod->event_ring_num_pages = (u8)p_hwfn->p_eq->chain.page_cnt; + + p_ramrod->consolid_q_pbl_addr.hi = + DMA_HI_LE(p_hwfn->p_consq->chain.pbl.p_phys_table); + p_ramrod->consolid_q_pbl_addr.lo = + DMA_LO_LE(p_hwfn->p_consq->chain.pbl.p_phys_table); + + p_hwfn->hw_info.personality = PERSONALITY_ETH; + + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "Setting event_ring_sb [id %04x index %02x], mf [%s] outer_tag [%d]\n", + sb, sb_index, + (p_ramrod->mf_mode == SF) ? "SF" : "Multi-Pf", + p_ramrod->outer_tag); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +int qed_sp_pf_stop(struct qed_hwfn *p_hwfn) +{ + struct qed_sp_init_request_params params; + struct qed_spq_entry *p_ent = NULL; + int rc = -EINVAL; + + memset(¶ms, 0, sizeof(params)); + params.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, qed_spq_get_cid(p_hwfn), + p_hwfn->hw_info.opaque_fid, + COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON, + ¶ms); + if (rc) + return rc; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c new file mode 100644 index 000000000000..7c0b8459666e --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -0,0 +1,860 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include "qed.h" +#include "qed_cxt.h" +#include "qed_dev_api.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_int.h" +#include "qed_mcp.h" +#include "qed_reg_addr.h" +#include "qed_sp.h" + +/*************************************************************************** +* Structures & Definitions +***************************************************************************/ + +#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1) +#define SPQ_BLOCK_SLEEP_LENGTH (1000) + +/*************************************************************************** +* Blocking Imp. (BLOCK/EBLOCK mode) +***************************************************************************/ +static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn, + void *cookie, + union event_ring_data *data, + u8 fw_return_code) +{ + struct qed_spq_comp_done *comp_done; + + comp_done = (struct qed_spq_comp_done *)cookie; + + comp_done->done = 0x1; + comp_done->fw_return_code = fw_return_code; + + /* make update visible to waiting thread */ + smp_wmb(); +} + +static int qed_spq_block(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent, + u8 *p_fw_ret) +{ + int sleep_count = SPQ_BLOCK_SLEEP_LENGTH; + struct qed_spq_comp_done *comp_done; + int rc; + + comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; + while (sleep_count) { + /* validate we receive completion update */ + smp_rmb(); + if (comp_done->done == 1) { + if (p_fw_ret) + *p_fw_ret = comp_done->fw_return_code; + return 0; + } + usleep_range(5000, 10000); + sleep_count--; + } + + DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n"); + rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt); + if (rc != 0) + DP_NOTICE(p_hwfn, "MCP drain failed\n"); + + /* Retry after drain */ + sleep_count = SPQ_BLOCK_SLEEP_LENGTH; + while (sleep_count) { + /* validate we receive completion update */ + smp_rmb(); + if (comp_done->done == 1) { + if (p_fw_ret) + *p_fw_ret = comp_done->fw_return_code; + return 0; + } + usleep_range(5000, 10000); + sleep_count--; + } + + if (comp_done->done == 1) { + if (p_fw_ret) + *p_fw_ret = comp_done->fw_return_code; + return 0; + } + + DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n"); + + return -EBUSY; +} + +/*************************************************************************** +* SPQ entries inner API +***************************************************************************/ +static int +qed_spq_fill_entry(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent) +{ + p_ent->elem.hdr.echo = 0; + p_hwfn->p_spq->echo_idx++; + p_ent->flags = 0; + + switch (p_ent->comp_mode) { + case QED_SPQ_MODE_EBLOCK: + case QED_SPQ_MODE_BLOCK: + p_ent->comp_cb.function = qed_spq_blocking_cb; + break; + case QED_SPQ_MODE_CB: + break; + default: + DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n", + p_ent->comp_mode); + return -EINVAL; + } + + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n", + p_ent->elem.hdr.cid, + p_ent->elem.hdr.cmd_id, + p_ent->elem.hdr.protocol_id, + p_ent->elem.data_ptr.hi, + p_ent->elem.data_ptr.lo, + D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK, + QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK", + "MODE_CB")); + + return 0; +} + +/*************************************************************************** +* HSI access +***************************************************************************/ +static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, + struct qed_spq *p_spq) +{ + u16 pq; + struct qed_cxt_info cxt_info; + struct core_conn_context *p_cxt; + union qed_qm_pq_params pq_params; + int rc; + + cxt_info.iid = p_spq->cid; + + rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info); + + if (rc < 0) { + DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n", + p_spq->cid); + return; + } + + p_cxt = cxt_info.p_cxt; + + SET_FIELD(p_cxt->xstorm_ag_context.flags10, + XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1); + SET_FIELD(p_cxt->xstorm_ag_context.flags1, + XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1); + SET_FIELD(p_cxt->xstorm_ag_context.flags9, + XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1); + + /* QM physical queue */ + memset(&pq_params, 0, sizeof(pq_params)); + pq_params.core.tc = LB_TC; + pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params); + p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq); + + p_cxt->xstorm_st_context.spq_base_lo = + DMA_LO_LE(p_spq->chain.p_phys_addr); + p_cxt->xstorm_st_context.spq_base_hi = + DMA_HI_LE(p_spq->chain.p_phys_addr); + + p_cxt->xstorm_st_context.consolid_base_addr.lo = + DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr); + p_cxt->xstorm_st_context.consolid_base_addr.hi = + DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr); +} + +static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, + struct qed_spq *p_spq, + struct qed_spq_entry *p_ent) +{ + struct qed_chain *p_chain = &p_hwfn->p_spq->chain; + struct slow_path_element *elem; + struct core_db_data db; + + elem = qed_chain_produce(p_chain); + if (!elem) { + DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n"); + return -EINVAL; + } + + *elem = p_ent->elem; /* struct assignment */ + + /* send a doorbell on the slow hwfn session */ + memset(&db, 0, sizeof(db)); + SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM); + SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET); + SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL, + DQ_XCM_CORE_SPQ_PROD_CMD); + db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD; + + /* validate producer is up to-date */ + rmb(); + + db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain)); + + /* do not reorder */ + barrier(); + + DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db); + + /* make sure doorbell is rang */ + mmiowb(); + + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n", + qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), + p_spq->cid, db.params, db.agg_flags, + qed_chain_get_prod_idx(p_chain)); + + return 0; +} + +/*************************************************************************** +* Asynchronous events +***************************************************************************/ +static int +qed_async_event_completion(struct qed_hwfn *p_hwfn, + struct event_ring_entry *p_eqe) +{ + DP_NOTICE(p_hwfn, + "Unknown Async completion for protocol: %d\n", + p_eqe->protocol_id); + return -EINVAL; +} + +/*************************************************************************** +* EQ API +***************************************************************************/ +void qed_eq_prod_update(struct qed_hwfn *p_hwfn, + u16 prod) +{ + u32 addr = GTT_BAR0_MAP_REG_USDM_RAM + + USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id); + + REG_WR16(p_hwfn, addr, prod); + + /* keep prod updates ordered */ + mmiowb(); +} + +int qed_eq_completion(struct qed_hwfn *p_hwfn, + void *cookie) + +{ + struct qed_eq *p_eq = cookie; + struct qed_chain *p_chain = &p_eq->chain; + int rc = 0; + + /* take a snapshot of the FW consumer */ + u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons); + + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx); + + /* Need to guarantee the fw_cons index we use points to a usuable + * element (to comply with our chain), so our macros would comply + */ + if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) == + qed_chain_get_usable_per_page(p_chain)) + fw_cons_idx += qed_chain_get_unusable_per_page(p_chain); + + /* Complete current segment of eq entries */ + while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) { + struct event_ring_entry *p_eqe = qed_chain_consume(p_chain); + + if (!p_eqe) { + rc = -EINVAL; + break; + } + + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "op %x prot %x res0 %x echo %x fwret %x flags %x\n", + p_eqe->opcode, + p_eqe->protocol_id, + p_eqe->reserved0, + le16_to_cpu(p_eqe->echo), + p_eqe->fw_return_code, + p_eqe->flags); + + if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) { + if (qed_async_event_completion(p_hwfn, p_eqe)) + rc = -EINVAL; + } else if (qed_spq_completion(p_hwfn, + p_eqe->echo, + p_eqe->fw_return_code, + &p_eqe->data)) { + rc = -EINVAL; + } + + qed_chain_recycle_consumed(p_chain); + } + + qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain)); + + return rc; +} + +struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, + u16 num_elem) +{ + struct qed_eq *p_eq; + + /* Allocate EQ struct */ + p_eq = kzalloc(sizeof(*p_eq), GFP_ATOMIC); + if (!p_eq) { + DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n"); + return NULL; + } + + /* Allocate and initialize EQ chain*/ + if (qed_chain_alloc(p_hwfn->cdev, + QED_CHAIN_USE_TO_PRODUCE, + QED_CHAIN_MODE_PBL, + num_elem, + sizeof(union event_ring_element), + &p_eq->chain)) { + DP_NOTICE(p_hwfn, "Failed to allocate eq chain\n"); + goto eq_allocate_fail; + } + + /* register EQ completion on the SP SB */ + qed_int_register_cb(p_hwfn, + qed_eq_completion, + p_eq, + &p_eq->eq_sb_index, + &p_eq->p_fw_cons); + + return p_eq; + +eq_allocate_fail: + qed_eq_free(p_hwfn, p_eq); + return NULL; +} + +void qed_eq_setup(struct qed_hwfn *p_hwfn, + struct qed_eq *p_eq) +{ + qed_chain_reset(&p_eq->chain); +} + +void qed_eq_free(struct qed_hwfn *p_hwfn, + struct qed_eq *p_eq) +{ + if (!p_eq) + return; + qed_chain_free(p_hwfn->cdev, &p_eq->chain); + kfree(p_eq); +} + +/*************************************************************************** +* CQE API - manipulate EQ functionality +***************************************************************************/ +static int qed_cqe_completion( + struct qed_hwfn *p_hwfn, + struct eth_slow_path_rx_cqe *cqe, + enum protocol_type protocol) +{ + /* @@@tmp - it's possible we'll eventually want to handle some + * actual commands that can arrive here, but for now this is only + * used to complete the ramrod using the echo value on the cqe + */ + return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL); +} + +int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, + struct eth_slow_path_rx_cqe *cqe) +{ + int rc; + + rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH); + if (rc) + DP_NOTICE(p_hwfn, + "Failed to handle RXQ CQE [cmd 0x%02x]\n", + cqe->ramrod_cmd_id); + + return rc; +} + +/*************************************************************************** +* Slow hwfn Queue (spq) +***************************************************************************/ +void qed_spq_setup(struct qed_hwfn *p_hwfn) +{ + struct qed_spq *p_spq = p_hwfn->p_spq; + struct qed_spq_entry *p_virt = NULL; + dma_addr_t p_phys = 0; + unsigned int i = 0; + + INIT_LIST_HEAD(&p_spq->pending); + INIT_LIST_HEAD(&p_spq->completion_pending); + INIT_LIST_HEAD(&p_spq->free_pool); + INIT_LIST_HEAD(&p_spq->unlimited_pending); + spin_lock_init(&p_spq->lock); + + /* SPQ empty pool */ + p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod); + p_virt = p_spq->p_virt; + + for (i = 0; i < p_spq->chain.capacity; i++) { + p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys); + p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys); + + list_add_tail(&p_virt->list, &p_spq->free_pool); + + p_virt++; + p_phys += sizeof(struct qed_spq_entry); + } + + /* Statistics */ + p_spq->normal_count = 0; + p_spq->comp_count = 0; + p_spq->comp_sent_count = 0; + p_spq->unlimited_pending_count = 0; + p_spq->echo_idx = 0; + + /* SPQ cid, cannot fail */ + qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid); + qed_spq_hw_initialize(p_hwfn, p_spq); + + /* reset the chain itself */ + qed_chain_reset(&p_spq->chain); +} + +int qed_spq_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_spq *p_spq = NULL; + dma_addr_t p_phys = 0; + struct qed_spq_entry *p_virt = NULL; + + /* SPQ struct */ + p_spq = + kzalloc(sizeof(struct qed_spq), GFP_ATOMIC); + if (!p_spq) { + DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n"); + return -ENOMEM; + } + + /* SPQ ring */ + if (qed_chain_alloc(p_hwfn->cdev, + QED_CHAIN_USE_TO_PRODUCE, + QED_CHAIN_MODE_SINGLE, + 0, /* N/A when the mode is SINGLE */ + sizeof(struct slow_path_element), + &p_spq->chain)) { + DP_NOTICE(p_hwfn, "Failed to allocate spq chain\n"); + goto spq_allocate_fail; + } + + /* allocate and fill the SPQ elements (incl. ramrod data list) */ + p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_spq->chain.capacity * + sizeof(struct qed_spq_entry), + &p_phys, + GFP_KERNEL); + + if (!p_virt) + goto spq_allocate_fail; + + p_spq->p_virt = p_virt; + p_spq->p_phys = p_phys; + p_hwfn->p_spq = p_spq; + + return 0; + +spq_allocate_fail: + qed_chain_free(p_hwfn->cdev, &p_spq->chain); + kfree(p_spq); + return -ENOMEM; +} + +void qed_spq_free(struct qed_hwfn *p_hwfn) +{ + struct qed_spq *p_spq = p_hwfn->p_spq; + + if (!p_spq) + return; + + if (p_spq->p_virt) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + p_spq->chain.capacity * + sizeof(struct qed_spq_entry), + p_spq->p_virt, + p_spq->p_phys); + + qed_chain_free(p_hwfn->cdev, &p_spq->chain); + ; + kfree(p_spq); +} + +int +qed_spq_get_entry(struct qed_hwfn *p_hwfn, + struct qed_spq_entry **pp_ent) +{ + struct qed_spq *p_spq = p_hwfn->p_spq; + struct qed_spq_entry *p_ent = NULL; + int rc = 0; + + spin_lock_bh(&p_spq->lock); + + if (list_empty(&p_spq->free_pool)) { + p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC); + if (!p_ent) { + rc = -ENOMEM; + goto out_unlock; + } + p_ent->queue = &p_spq->unlimited_pending; + } else { + p_ent = list_first_entry(&p_spq->free_pool, + struct qed_spq_entry, + list); + list_del(&p_ent->list); + p_ent->queue = &p_spq->pending; + } + + *pp_ent = p_ent; + +out_unlock: + spin_unlock_bh(&p_spq->lock); + return rc; +} + +/* Locked variant; Should be called while the SPQ lock is taken */ +static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent) +{ + list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool); +} + +void qed_spq_return_entry(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent) +{ + spin_lock_bh(&p_hwfn->p_spq->lock); + __qed_spq_return_entry(p_hwfn, p_ent); + spin_unlock_bh(&p_hwfn->p_spq->lock); +} + +/** + * @brief qed_spq_add_entry - adds a new entry to the pending + * list. Should be used while lock is being held. + * + * Addes an entry to the pending list is there is room (en empty + * element is available in the free_pool), or else places the + * entry in the unlimited_pending pool. + * + * @param p_hwfn + * @param p_ent + * @param priority + * + * @return int + */ +static int +qed_spq_add_entry(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent, + enum spq_priority priority) +{ + struct qed_spq *p_spq = p_hwfn->p_spq; + + if (p_ent->queue == &p_spq->unlimited_pending) { + struct qed_spq_entry *p_en2; + + if (list_empty(&p_spq->free_pool)) { + list_add_tail(&p_ent->list, &p_spq->unlimited_pending); + p_spq->unlimited_pending_count++; + + return 0; + } + + p_en2 = list_first_entry(&p_spq->free_pool, + struct qed_spq_entry, + list); + list_del(&p_en2->list); + + /* Strcut assignment */ + *p_en2 = *p_ent; + + kfree(p_ent); + + p_ent = p_en2; + } + + /* entry is to be placed in 'pending' queue */ + switch (priority) { + case QED_SPQ_PRIORITY_NORMAL: + list_add_tail(&p_ent->list, &p_spq->pending); + p_spq->normal_count++; + break; + case QED_SPQ_PRIORITY_HIGH: + list_add(&p_ent->list, &p_spq->pending); + p_spq->high_count++; + break; + default: + return -EINVAL; + } + + return 0; +} + +/*************************************************************************** +* Accessor +***************************************************************************/ +u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn) +{ + if (!p_hwfn->p_spq) + return 0xffffffff; /* illegal */ + return p_hwfn->p_spq->cid; +} + +/*************************************************************************** +* Posting new Ramrods +***************************************************************************/ +static int qed_spq_post_list(struct qed_hwfn *p_hwfn, + struct list_head *head, + u32 keep_reserve) +{ + struct qed_spq *p_spq = p_hwfn->p_spq; + int rc; + + while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve && + !list_empty(head)) { + struct qed_spq_entry *p_ent = + list_first_entry(head, struct qed_spq_entry, list); + list_del(&p_ent->list); + list_add_tail(&p_ent->list, &p_spq->completion_pending); + p_spq->comp_sent_count++; + + rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent); + if (rc) { + list_del(&p_ent->list); + __qed_spq_return_entry(p_hwfn, p_ent); + return rc; + } + } + + return 0; +} + +static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) +{ + struct qed_spq *p_spq = p_hwfn->p_spq; + struct qed_spq_entry *p_ent = NULL; + + while (!list_empty(&p_spq->free_pool)) { + if (list_empty(&p_spq->unlimited_pending)) + break; + + p_ent = list_first_entry(&p_spq->unlimited_pending, + struct qed_spq_entry, + list); + if (!p_ent) + return -EINVAL; + + list_del(&p_ent->list); + + qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority); + } + + return qed_spq_post_list(p_hwfn, &p_spq->pending, + SPQ_HIGH_PRI_RESERVE_DEFAULT); +} + +int qed_spq_post(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent, + u8 *fw_return_code) +{ + int rc = 0; + struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL; + bool b_ret_ent = true; + + if (!p_hwfn) + return -EINVAL; + + if (!p_ent) { + DP_NOTICE(p_hwfn, "Got a NULL pointer\n"); + return -EINVAL; + } + + /* Complete the entry */ + rc = qed_spq_fill_entry(p_hwfn, p_ent); + + spin_lock_bh(&p_spq->lock); + + /* Check return value after LOCK is taken for cleaner error flow */ + if (rc) + goto spq_post_fail; + + /* Add the request to the pending queue */ + rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority); + if (rc) + goto spq_post_fail; + + rc = qed_spq_pend_post(p_hwfn); + if (rc) { + /* Since it's possible that pending failed for a different + * entry [although unlikely], the failed entry was already + * dealt with; No need to return it here. + */ + b_ret_ent = false; + goto spq_post_fail; + } + + spin_unlock_bh(&p_spq->lock); + + if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) { + /* For entries in QED BLOCK mode, the completion code cannot + * perform the necessary cleanup - if it did, we couldn't + * access p_ent here to see whether it's successful or not. + * Thus, after gaining the answer perform the cleanup here. + */ + rc = qed_spq_block(p_hwfn, p_ent, fw_return_code); + if (rc) + goto spq_post_fail2; + + /* return to pool */ + qed_spq_return_entry(p_hwfn, p_ent); + } + return rc; + +spq_post_fail2: + spin_lock_bh(&p_spq->lock); + list_del(&p_ent->list); + qed_chain_return_produced(&p_spq->chain); + +spq_post_fail: + /* return to the free pool */ + if (b_ret_ent) + __qed_spq_return_entry(p_hwfn, p_ent); + spin_unlock_bh(&p_spq->lock); + + return rc; +} + +int qed_spq_completion(struct qed_hwfn *p_hwfn, + __le16 echo, + u8 fw_return_code, + union event_ring_data *p_data) +{ + struct qed_spq *p_spq; + struct qed_spq_entry *p_ent = NULL; + struct qed_spq_entry *tmp; + struct qed_spq_entry *found = NULL; + int rc; + + if (!p_hwfn) + return -EINVAL; + + p_spq = p_hwfn->p_spq; + if (!p_spq) + return -EINVAL; + + spin_lock_bh(&p_spq->lock); + list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, + list) { + if (p_ent->elem.hdr.echo == echo) { + list_del(&p_ent->list); + + qed_chain_return_produced(&p_spq->chain); + p_spq->comp_count++; + found = p_ent; + break; + } + } + + /* Release lock before callback, as callback may post + * an additional ramrod. + */ + spin_unlock_bh(&p_spq->lock); + + if (!found) { + DP_NOTICE(p_hwfn, + "Failed to find an entry this EQE completes\n"); + return -EEXIST; + } + + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n", + p_ent->comp_cb.function, p_ent->comp_cb.cookie); + if (found->comp_cb.function) + found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data, + fw_return_code); + + if (found->comp_mode != QED_SPQ_MODE_EBLOCK) + /* EBLOCK is responsible for freeing its own entry */ + qed_spq_return_entry(p_hwfn, found); + + /* Attempt to post pending requests */ + spin_lock_bh(&p_spq->lock); + rc = qed_spq_pend_post(p_hwfn); + spin_unlock_bh(&p_spq->lock); + + return rc; +} + +struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_consq *p_consq; + + /* Allocate ConsQ struct */ + p_consq = kzalloc(sizeof(*p_consq), GFP_ATOMIC); + if (!p_consq) { + DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n"); + return NULL; + } + + /* Allocate and initialize EQ chain*/ + if (qed_chain_alloc(p_hwfn->cdev, + QED_CHAIN_USE_TO_PRODUCE, + QED_CHAIN_MODE_PBL, + QED_CHAIN_PAGE_SIZE / 0x80, + 0x80, + &p_consq->chain)) { + DP_NOTICE(p_hwfn, "Failed to allocate consq chain"); + goto consq_allocate_fail; + } + + return p_consq; + +consq_allocate_fail: + qed_consq_free(p_hwfn, p_consq); + return NULL; +} + +void qed_consq_setup(struct qed_hwfn *p_hwfn, + struct qed_consq *p_consq) +{ + qed_chain_reset(&p_consq->chain); +} + +void qed_consq_free(struct qed_hwfn *p_hwfn, + struct qed_consq *p_consq) +{ + if (!p_consq) + return; + qed_chain_free(p_hwfn->cdev, &p_consq->chain); + kfree(p_consq); +} diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile new file mode 100644 index 000000000000..06ff90d87572 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qede/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_QEDE) := qede.o + +qede-y := qede_main.o qede_ethtool.o diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h new file mode 100644 index 000000000000..ea00d5f3bab4 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -0,0 +1,285 @@ +/* QLogic qede NIC Driver +* Copyright (c) 2015 QLogic Corporation +* +* This software is available under the terms of the GNU General Public License +* (GPL) Version 2, available from the file COPYING in the main directory of +* this source tree. +*/ + +#ifndef _QEDE_H_ +#define _QEDE_H_ +#include <linux/compiler.h> +#include <linux/version.h> +#include <linux/workqueue.h> +#include <linux/netdevice.h> +#include <linux/interrupt.h> +#include <linux/bitmap.h> +#include <linux/kernel.h> +#include <linux/mutex.h> +#include <linux/io.h> +#include <linux/qed/common_hsi.h> +#include <linux/qed/eth_common.h> +#include <linux/qed/qed_if.h> +#include <linux/qed/qed_chain.h> +#include <linux/qed/qed_eth_if.h> + +#define QEDE_MAJOR_VERSION 8 +#define QEDE_MINOR_VERSION 4 +#define QEDE_REVISION_VERSION 0 +#define QEDE_ENGINEERING_VERSION 0 +#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ + __stringify(QEDE_MINOR_VERSION) "." \ + __stringify(QEDE_REVISION_VERSION) "." \ + __stringify(QEDE_ENGINEERING_VERSION) + +#define QEDE_ETH_INTERFACE_VERSION 300 + +#define DRV_MODULE_SYM qede + +struct qede_stats { + u64 no_buff_discards; + u64 rx_ucast_bytes; + u64 rx_mcast_bytes; + u64 rx_bcast_bytes; + u64 rx_ucast_pkts; + u64 rx_mcast_pkts; + u64 rx_bcast_pkts; + u64 mftag_filter_discards; + u64 mac_filter_discards; + u64 tx_ucast_bytes; + u64 tx_mcast_bytes; + u64 tx_bcast_bytes; + u64 tx_ucast_pkts; + u64 tx_mcast_pkts; + u64 tx_bcast_pkts; + u64 tx_err_drop_pkts; + u64 coalesced_pkts; + u64 coalesced_events; + u64 coalesced_aborts_num; + u64 non_coalesced_pkts; + u64 coalesced_bytes; + + /* port */ + u64 rx_64_byte_packets; + u64 rx_127_byte_packets; + u64 rx_255_byte_packets; + u64 rx_511_byte_packets; + u64 rx_1023_byte_packets; + u64 rx_1518_byte_packets; + u64 rx_1522_byte_packets; + u64 rx_2047_byte_packets; + u64 rx_4095_byte_packets; + u64 rx_9216_byte_packets; + u64 rx_16383_byte_packets; + u64 rx_crc_errors; + u64 rx_mac_crtl_frames; + u64 rx_pause_frames; + u64 rx_pfc_frames; + u64 rx_align_errors; + u64 rx_carrier_errors; + u64 rx_oversize_packets; + u64 rx_jabbers; + u64 rx_undersize_packets; + u64 rx_fragments; + u64 tx_64_byte_packets; + u64 tx_65_to_127_byte_packets; + u64 tx_128_to_255_byte_packets; + u64 tx_256_to_511_byte_packets; + u64 tx_512_to_1023_byte_packets; + u64 tx_1024_to_1518_byte_packets; + u64 tx_1519_to_2047_byte_packets; + u64 tx_2048_to_4095_byte_packets; + u64 tx_4096_to_9216_byte_packets; + u64 tx_9217_to_16383_byte_packets; + u64 tx_pause_frames; + u64 tx_pfc_frames; + u64 tx_lpi_entry_count; + u64 tx_total_collisions; + u64 brb_truncates; + u64 brb_discards; + u64 tx_mac_ctrl_frames; +}; + +struct qede_dev { + struct qed_dev *cdev; + struct net_device *ndev; + struct pci_dev *pdev; + + u32 dp_module; + u8 dp_level; + + const struct qed_eth_ops *ops; + + struct qed_dev_eth_info dev_info; +#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues) +#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues * \ + (edev)->dev_info.num_tc) + + struct qede_fastpath *fp_array; + u16 num_rss; + u8 num_tc; +#define QEDE_RSS_CNT(edev) ((edev)->num_rss) +#define QEDE_TSS_CNT(edev) ((edev)->num_rss * \ + (edev)->num_tc) +#define QEDE_TSS_IDX(edev, txqidx) ((txqidx) % (edev)->num_rss) +#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / (edev)->num_rss) +#define QEDE_TX_QUEUE(edev, txqidx) \ + (&(edev)->fp_array[QEDE_TSS_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX( \ + (edev), (txqidx))]) + + struct qed_int_info int_info; + unsigned char primary_mac[ETH_ALEN]; + + /* Smaller private varaiant of the RTNL lock */ + struct mutex qede_lock; + u32 state; /* Protected by qede_lock */ + u16 rx_buf_size; + /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ +#define ETH_OVERHEAD (ETH_HLEN + 8 + 8) + /* Max supported alignment is 256 (8 shift) + * minimal alignment shift 6 is optimal for 57xxx HW performance + */ +#define QEDE_RX_ALIGN_SHIFT max(6, min(8, L1_CACHE_SHIFT)) + /* We assume skb_build() uses sizeof(struct skb_shared_info) bytes + * at the end of skb->data, to avoid wasting a full cache line. + * This reduces memory use (skb->truesize). + */ +#define QEDE_FW_RX_ALIGN_END \ + max_t(u64, 1UL << QEDE_RX_ALIGN_SHIFT, \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) + + struct qede_stats stats; + struct qed_update_vport_rss_params rss_params; + u16 q_num_rx_buffers; /* Must be a power of two */ + u16 q_num_tx_buffers; /* Must be a power of two */ + + struct delayed_work sp_task; + unsigned long sp_flags; +}; + +enum QEDE_STATE { + QEDE_STATE_CLOSED, + QEDE_STATE_OPEN, +}; + +#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) + +#define MAX_NUM_TC 8 +#define MAX_NUM_PRI 8 + +/* The driver supports the new build_skb() API: + * RX ring buffer contains pointer to kmalloc() data only, + * skb are built only after the frame was DMA-ed. + */ +struct sw_rx_data { + u8 *data; + + DEFINE_DMA_UNMAP_ADDR(mapping); +}; + +struct qede_rx_queue { + __le16 *hw_cons_ptr; + struct sw_rx_data *sw_rx_ring; + u16 sw_rx_cons; + u16 sw_rx_prod; + struct qed_chain rx_bd_ring; + struct qed_chain rx_comp_ring; + void __iomem *hw_rxq_prod_addr; + + int rx_buf_size; + + u16 num_rx_buffers; + u16 rxq_id; + + u64 rx_hw_errors; + u64 rx_alloc_errors; +}; + +union db_prod { + struct eth_db_data data; + u32 raw; +}; + +struct sw_tx_bd { + struct sk_buff *skb; + u8 flags; +/* Set on the first BD descriptor when there is a split BD */ +#define QEDE_TSO_SPLIT_BD BIT(0) +}; + +struct qede_tx_queue { + int index; /* Queue index */ + __le16 *hw_cons_ptr; + struct sw_tx_bd *sw_tx_ring; + u16 sw_tx_cons; + u16 sw_tx_prod; + struct qed_chain tx_pbl; + void __iomem *doorbell_addr; + union db_prod tx_db; + + u16 num_tx_buffers; +}; + +#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \ + le32_to_cpu((bd)->addr.lo)) +#define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len) \ + do { \ + (bd)->addr.hi = cpu_to_le32(upper_32_bits(maddr)); \ + (bd)->addr.lo = cpu_to_le32(lower_32_bits(maddr)); \ + (bd)->nbytes = cpu_to_le16(len); \ + } while (0) +#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) + +struct qede_fastpath { + struct qede_dev *edev; + u8 rss_id; + struct napi_struct napi; + struct qed_sb_info *sb_info; + struct qede_rx_queue *rxq; + struct qede_tx_queue *txqs; + +#define VEC_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) + char name[VEC_NAME_SIZE]; +}; + +/* Debug print definitions */ +#define DP_NAME(edev) ((edev)->ndev->name) + +#define XMIT_PLAIN 0 +#define XMIT_L4_CSUM BIT(0) +#define XMIT_LSO BIT(1) +#define XMIT_ENC BIT(2) + +#define QEDE_CSUM_ERROR BIT(0) +#define QEDE_CSUM_UNNECESSARY BIT(1) + +#define QEDE_SP_RX_MODE 1 + +union qede_reload_args { + u16 mtu; +}; + +void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level); +void qede_set_ethtool_ops(struct net_device *netdev); +void qede_reload(struct qede_dev *edev, + void (*func)(struct qede_dev *edev, + union qede_reload_args *args), + union qede_reload_args *args); +int qede_change_mtu(struct net_device *dev, int new_mtu); +void qede_fill_by_demand_stats(struct qede_dev *edev); + +#define RX_RING_SIZE_POW 13 +#define RX_RING_SIZE BIT(RX_RING_SIZE_POW) +#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1) +#define NUM_RX_BDS_MIN 128 +#define NUM_RX_BDS_DEF NUM_RX_BDS_MAX + +#define TX_RING_SIZE_POW 13 +#define TX_RING_SIZE BIT(TX_RING_SIZE_POW) +#define NUM_TX_BDS_MAX (TX_RING_SIZE - 1) +#define NUM_TX_BDS_MIN 128 +#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX + +#define for_each_rss(i) for (i = 0; i < edev->num_rss; i++) + +#endif /* _QEDE_H_ */ diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c new file mode 100644 index 000000000000..3a362476a22c --- /dev/null +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -0,0 +1,385 @@ +/* QLogic qede NIC Driver +* Copyright (c) 2015 QLogic Corporation +* +* This software is available under the terms of the GNU General Public License +* (GPL) Version 2, available from the file COPYING in the main directory of +* this source tree. +*/ + +#include <linux/version.h> +#include <linux/types.h> +#include <linux/netdevice.h> +#include <linux/ethtool.h> +#include <linux/string.h> +#include <linux/pci.h> +#include <linux/capability.h> +#include "qede.h" + +#define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name)) +#define QEDE_STAT_STRING(stat_name) (#stat_name) +#define _QEDE_STAT(stat_name, pf_only) \ + {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only} +#define QEDE_PF_STAT(stat_name) _QEDE_STAT(stat_name, true) +#define QEDE_STAT(stat_name) _QEDE_STAT(stat_name, false) + +#define QEDE_RQSTAT_OFFSET(stat_name) \ + (offsetof(struct qede_rx_queue, stat_name)) +#define QEDE_RQSTAT_STRING(stat_name) (#stat_name) +#define QEDE_RQSTAT(stat_name) \ + {QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)} +static const struct { + u64 offset; + char string[ETH_GSTRING_LEN]; +} qede_rqstats_arr[] = { + QEDE_RQSTAT(rx_hw_errors), + QEDE_RQSTAT(rx_alloc_errors), +}; + +#define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr) +#define QEDE_RQSTATS_DATA(dev, sindex, rqindex) \ + (*((u64 *)(((char *)(dev->fp_array[(rqindex)].rxq)) +\ + qede_rqstats_arr[(sindex)].offset))) +static const struct { + u64 offset; + char string[ETH_GSTRING_LEN]; + bool pf_only; +} qede_stats_arr[] = { + QEDE_STAT(rx_ucast_bytes), + QEDE_STAT(rx_mcast_bytes), + QEDE_STAT(rx_bcast_bytes), + QEDE_STAT(rx_ucast_pkts), + QEDE_STAT(rx_mcast_pkts), + QEDE_STAT(rx_bcast_pkts), + + QEDE_STAT(tx_ucast_bytes), + QEDE_STAT(tx_mcast_bytes), + QEDE_STAT(tx_bcast_bytes), + QEDE_STAT(tx_ucast_pkts), + QEDE_STAT(tx_mcast_pkts), + QEDE_STAT(tx_bcast_pkts), + + QEDE_PF_STAT(rx_64_byte_packets), + QEDE_PF_STAT(rx_127_byte_packets), + QEDE_PF_STAT(rx_255_byte_packets), + QEDE_PF_STAT(rx_511_byte_packets), + QEDE_PF_STAT(rx_1023_byte_packets), + QEDE_PF_STAT(rx_1518_byte_packets), + QEDE_PF_STAT(rx_1522_byte_packets), + QEDE_PF_STAT(rx_2047_byte_packets), + QEDE_PF_STAT(rx_4095_byte_packets), + QEDE_PF_STAT(rx_9216_byte_packets), + QEDE_PF_STAT(rx_16383_byte_packets), + QEDE_PF_STAT(tx_64_byte_packets), + QEDE_PF_STAT(tx_65_to_127_byte_packets), + QEDE_PF_STAT(tx_128_to_255_byte_packets), + QEDE_PF_STAT(tx_256_to_511_byte_packets), + QEDE_PF_STAT(tx_512_to_1023_byte_packets), + QEDE_PF_STAT(tx_1024_to_1518_byte_packets), + QEDE_PF_STAT(tx_1519_to_2047_byte_packets), + QEDE_PF_STAT(tx_2048_to_4095_byte_packets), + QEDE_PF_STAT(tx_4096_to_9216_byte_packets), + QEDE_PF_STAT(tx_9217_to_16383_byte_packets), + + QEDE_PF_STAT(rx_mac_crtl_frames), + QEDE_PF_STAT(tx_mac_ctrl_frames), + QEDE_PF_STAT(rx_pause_frames), + QEDE_PF_STAT(tx_pause_frames), + QEDE_PF_STAT(rx_pfc_frames), + QEDE_PF_STAT(tx_pfc_frames), + + QEDE_PF_STAT(rx_crc_errors), + QEDE_PF_STAT(rx_align_errors), + QEDE_PF_STAT(rx_carrier_errors), + QEDE_PF_STAT(rx_oversize_packets), + QEDE_PF_STAT(rx_jabbers), + QEDE_PF_STAT(rx_undersize_packets), + QEDE_PF_STAT(rx_fragments), + QEDE_PF_STAT(tx_lpi_entry_count), + QEDE_PF_STAT(tx_total_collisions), + QEDE_PF_STAT(brb_truncates), + QEDE_PF_STAT(brb_discards), + QEDE_STAT(no_buff_discards), + QEDE_PF_STAT(mftag_filter_discards), + QEDE_PF_STAT(mac_filter_discards), + QEDE_STAT(tx_err_drop_pkts), + + QEDE_STAT(coalesced_pkts), + QEDE_STAT(coalesced_events), + QEDE_STAT(coalesced_aborts_num), + QEDE_STAT(non_coalesced_pkts), + QEDE_STAT(coalesced_bytes), +}; + +#define QEDE_STATS_DATA(dev, index) \ + (*((u64 *)(((char *)(dev)) + offsetof(struct qede_dev, stats) \ + + qede_stats_arr[(index)].offset))) + +#define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr) + +static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) +{ + int i, j, k; + + for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) { + strcpy(buf + j * ETH_GSTRING_LEN, + qede_stats_arr[i].string); + j++; + } + + for (k = 0; k < QEDE_NUM_RQSTATS; k++, j++) + strcpy(buf + j * ETH_GSTRING_LEN, + qede_rqstats_arr[k].string); +} + +static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + struct qede_dev *edev = netdev_priv(dev); + + switch (stringset) { + case ETH_SS_STATS: + qede_get_strings_stats(edev, buf); + break; + default: + DP_VERBOSE(edev, QED_MSG_DEBUG, + "Unsupported stringset 0x%08x\n", stringset); + } +} + +static void qede_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *buf) +{ + struct qede_dev *edev = netdev_priv(dev); + int sidx, cnt = 0; + int qid; + + qede_fill_by_demand_stats(edev); + + mutex_lock(&edev->qede_lock); + + for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) + buf[cnt++] = QEDE_STATS_DATA(edev, sidx); + + for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) { + buf[cnt] = 0; + for (qid = 0; qid < edev->num_rss; qid++) + buf[cnt] += QEDE_RQSTATS_DATA(edev, sidx, qid); + cnt++; + } + + mutex_unlock(&edev->qede_lock); +} + +static int qede_get_sset_count(struct net_device *dev, int stringset) +{ + struct qede_dev *edev = netdev_priv(dev); + int num_stats = QEDE_NUM_STATS; + + switch (stringset) { + case ETH_SS_STATS: + return num_stats + QEDE_NUM_RQSTATS; + + default: + DP_VERBOSE(edev, QED_MSG_DEBUG, + "Unsupported stringset 0x%08x\n", stringset); + return -EINVAL; + } +} + +static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qed_link_output current_link; + + memset(¤t_link, 0, sizeof(current_link)); + edev->ops->common->get_link(edev->cdev, ¤t_link); + + cmd->supported = current_link.supported_caps; + cmd->advertising = current_link.advertised_caps; + if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) { + ethtool_cmd_speed_set(cmd, current_link.speed); + cmd->duplex = current_link.duplex; + } else { + cmd->duplex = DUPLEX_UNKNOWN; + ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + } + cmd->port = current_link.port; + cmd->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE : + AUTONEG_DISABLE; + cmd->lp_advertising = current_link.lp_caps; + + return 0; +} + +static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qed_link_output current_link; + struct qed_link_params params; + u32 speed; + + if (edev->dev_info.common.is_mf) { + DP_INFO(edev, + "Link parameters can not be changed in MF mode\n"); + return -EOPNOTSUPP; + } + + memset(¤t_link, 0, sizeof(current_link)); + memset(¶ms, 0, sizeof(params)); + edev->ops->common->get_link(edev->cdev, ¤t_link); + + speed = ethtool_cmd_speed(cmd); + params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS; + params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG; + if (cmd->autoneg == AUTONEG_ENABLE) { + params.autoneg = true; + params.forced_speed = 0; + params.adv_speeds = cmd->advertising; + } else { /* forced speed */ + params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED; + params.autoneg = false; + params.forced_speed = speed; + switch (speed) { + case SPEED_10000: + if (!(current_link.supported_caps & + SUPPORTED_10000baseKR_Full)) { + DP_INFO(edev, "10G speed not supported\n"); + return -EINVAL; + } + params.adv_speeds = SUPPORTED_10000baseKR_Full; + break; + case SPEED_40000: + if (!(current_link.supported_caps & + SUPPORTED_40000baseLR4_Full)) { + DP_INFO(edev, "40G speed not supported\n"); + return -EINVAL; + } + params.adv_speeds = SUPPORTED_40000baseLR4_Full; + break; + default: + DP_INFO(edev, "Unsupported speed %u\n", speed); + return -EINVAL; + } + } + + params.link_up = true; + edev->ops->common->set_link(edev->cdev, ¶ms); + + return 0; +} + +static void qede_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + char mfw[ETHTOOL_FWVERS_LEN], storm[ETHTOOL_FWVERS_LEN]; + struct qede_dev *edev = netdev_priv(ndev); + + strlcpy(info->driver, "qede", sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); + + snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d", + edev->dev_info.common.fw_major, + edev->dev_info.common.fw_minor, + edev->dev_info.common.fw_rev, + edev->dev_info.common.fw_eng); + + snprintf(mfw, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d", + (edev->dev_info.common.mfw_rev >> 24) & 0xFF, + (edev->dev_info.common.mfw_rev >> 16) & 0xFF, + (edev->dev_info.common.mfw_rev >> 8) & 0xFF, + edev->dev_info.common.mfw_rev & 0xFF); + + if ((strlen(storm) + strlen(mfw) + strlen("mfw storm ")) < + sizeof(info->fw_version)) { + snprintf(info->fw_version, sizeof(info->fw_version), + "mfw %s storm %s", mfw, storm); + } else { + snprintf(info->fw_version, sizeof(info->fw_version), + "%s %s", mfw, storm); + } + + strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info)); +} + +static u32 qede_get_msglevel(struct net_device *ndev) +{ + struct qede_dev *edev = netdev_priv(ndev); + + return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) | + edev->dp_module; +} + +static void qede_set_msglevel(struct net_device *ndev, u32 level) +{ + struct qede_dev *edev = netdev_priv(ndev); + u32 dp_module = 0; + u8 dp_level = 0; + + qede_config_debug(level, &dp_module, &dp_level); + + edev->dp_level = dp_level; + edev->dp_module = dp_module; + edev->ops->common->update_msglvl(edev->cdev, + dp_module, dp_level); +} + +static u32 qede_get_link(struct net_device *dev) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qed_link_output current_link; + + memset(¤t_link, 0, sizeof(current_link)); + edev->ops->common->get_link(edev->cdev, ¤t_link); + + return current_link.link_up; +} + +static void qede_update_mtu(struct qede_dev *edev, union qede_reload_args *args) +{ + edev->ndev->mtu = args->mtu; +} + +/* Netdevice NDOs */ +#define ETH_MAX_JUMBO_PACKET_SIZE 9600 +#define ETH_MIN_PACKET_SIZE 60 +int qede_change_mtu(struct net_device *ndev, int new_mtu) +{ + struct qede_dev *edev = netdev_priv(ndev); + union qede_reload_args args; + + if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || + ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) { + DP_ERR(edev, "Can't support requested MTU size\n"); + return -EINVAL; + } + + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "Configuring MTU size of %d\n", new_mtu); + + /* Set the mtu field and re-start the interface if needed*/ + args.mtu = new_mtu; + + if (netif_running(edev->ndev)) + qede_reload(edev, &qede_update_mtu, &args); + + qede_update_mtu(edev, &args); + + return 0; +} + +static const struct ethtool_ops qede_ethtool_ops = { + .get_settings = qede_get_settings, + .set_settings = qede_set_settings, + .get_drvinfo = qede_get_drvinfo, + .get_msglevel = qede_get_msglevel, + .set_msglevel = qede_set_msglevel, + .get_link = qede_get_link, + .get_strings = qede_get_strings, + .get_ethtool_stats = qede_get_ethtool_stats, + .get_sset_count = qede_get_sset_count, + +}; + +void qede_set_ethtool_ops(struct net_device *dev) +{ + dev->ethtool_ops = &qede_ethtool_ops; +} diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c new file mode 100644 index 000000000000..f4657a2e730a --- /dev/null +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -0,0 +1,2584 @@ +/* QLogic qede NIC Driver +* Copyright (c) 2015 QLogic Corporation +* +* This software is available under the terms of the GNU General Public License +* (GPL) Version 2, available from the file COPYING in the main directory of +* this source tree. +*/ + +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/version.h> +#include <linux/device.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/errno.h> +#include <linux/list.h> +#include <linux/string.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <asm/byteorder.h> +#include <asm/param.h> +#include <linux/io.h> +#include <linux/netdev_features.h> +#include <linux/udp.h> +#include <linux/tcp.h> +#include <net/vxlan.h> +#include <linux/ip.h> +#include <net/ipv6.h> +#include <net/tcp.h> +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <linux/pkt_sched.h> +#include <linux/ethtool.h> +#include <linux/in.h> +#include <linux/random.h> +#include <net/ip6_checksum.h> +#include <linux/bitops.h> + +#include "qede.h" + +static const char version[] = "QLogic QL4xxx 40G/100G Ethernet Driver qede " + DRV_MODULE_VERSION "\n"; + +MODULE_DESCRIPTION("QLogic 40G/100G Ethernet Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +static uint debug; +module_param(debug, uint, 0); +MODULE_PARM_DESC(debug, " Default debug msglevel"); + +static const struct qed_eth_ops *qed_ops; + +#define CHIP_NUM_57980S_40 0x1634 +#define CHIP_NUM_57980S_10 0x1635 +#define CHIP_NUM_57980S_MF 0x1636 +#define CHIP_NUM_57980S_100 0x1644 +#define CHIP_NUM_57980S_50 0x1654 +#define CHIP_NUM_57980S_25 0x1656 + +#ifndef PCI_DEVICE_ID_NX2_57980E +#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40 +#define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10 +#define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF +#define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100 +#define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50 +#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25 +#endif + +static const struct pci_device_id qede_pci_tbl[] = { + { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), 0 }, + { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), 0 }, + { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), 0 }, + { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), 0 }, + { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), 0 }, + { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), 0 }, + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, qede_pci_tbl); + +static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id); + +#define TX_TIMEOUT (5 * HZ) + +static void qede_remove(struct pci_dev *pdev); +static int qede_alloc_rx_buffer(struct qede_dev *edev, + struct qede_rx_queue *rxq); +static void qede_link_update(void *dev, struct qed_link_output *link); + +static struct pci_driver qede_pci_driver = { + .name = "qede", + .id_table = qede_pci_tbl, + .probe = qede_probe, + .remove = qede_remove, +}; + +static struct qed_eth_cb_ops qede_ll_ops = { + { + .link_update = qede_link_update, + }, +}; + +static int qede_netdev_event(struct notifier_block *this, unsigned long event, + void *ptr) +{ + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + struct ethtool_drvinfo drvinfo; + struct qede_dev *edev; + + /* Currently only support name change */ + if (event != NETDEV_CHANGENAME) + goto done; + + /* Check whether this is a qede device */ + if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo) + goto done; + + memset(&drvinfo, 0, sizeof(drvinfo)); + ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo); + if (strcmp(drvinfo.driver, "qede")) + goto done; + edev = netdev_priv(ndev); + + /* Notify qed of the name change */ + if (!edev->ops || !edev->ops->common) + goto done; + edev->ops->common->set_id(edev->cdev, edev->ndev->name, + "qede"); + +done: + return NOTIFY_DONE; +} + +static struct notifier_block qede_netdev_notifier = { + .notifier_call = qede_netdev_event, +}; + +static +int __init qede_init(void) +{ + int ret; + u32 qed_ver; + + pr_notice("qede_init: %s\n", version); + + qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH); + if (qed_ver != QEDE_ETH_INTERFACE_VERSION) { + pr_notice("Version mismatch [%08x != %08x]\n", + qed_ver, + QEDE_ETH_INTERFACE_VERSION); + return -EINVAL; + } + + qed_ops = qed_get_eth_ops(QEDE_ETH_INTERFACE_VERSION); + if (!qed_ops) { + pr_notice("Failed to get qed ethtool operations\n"); + return -EINVAL; + } + + /* Must register notifier before pci ops, since we might miss + * interface rename after pci probe and netdev registeration. + */ + ret = register_netdevice_notifier(&qede_netdev_notifier); + if (ret) { + pr_notice("Failed to register netdevice_notifier\n"); + qed_put_eth_ops(); + return -EINVAL; + } + + ret = pci_register_driver(&qede_pci_driver); + if (ret) { + pr_notice("Failed to register driver\n"); + unregister_netdevice_notifier(&qede_netdev_notifier); + qed_put_eth_ops(); + return -EINVAL; + } + + return 0; +} + +static void __exit qede_cleanup(void) +{ + pr_notice("qede_cleanup called\n"); + + unregister_netdevice_notifier(&qede_netdev_notifier); + pci_unregister_driver(&qede_pci_driver); + qed_put_eth_ops(); +} + +module_init(qede_init); +module_exit(qede_cleanup); + +/* ------------------------------------------------------------------------- + * START OF FAST-PATH + * ------------------------------------------------------------------------- + */ + +/* Unmap the data and free skb */ +static int qede_free_tx_pkt(struct qede_dev *edev, + struct qede_tx_queue *txq, + int *len) +{ + u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX; + struct sk_buff *skb = txq->sw_tx_ring[idx].skb; + struct eth_tx_1st_bd *first_bd; + struct eth_tx_bd *tx_data_bd; + int bds_consumed = 0; + int nbds; + bool data_split = txq->sw_tx_ring[idx].flags & QEDE_TSO_SPLIT_BD; + int i, split_bd_len = 0; + + if (unlikely(!skb)) { + DP_ERR(edev, + "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n", + idx, txq->sw_tx_cons, txq->sw_tx_prod); + return -1; + } + + *len = skb->len; + + first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); + + bds_consumed++; + + nbds = first_bd->data.nbds; + + if (data_split) { + struct eth_tx_bd *split = (struct eth_tx_bd *) + qed_chain_consume(&txq->tx_pbl); + split_bd_len = BD_UNMAP_LEN(split); + bds_consumed++; + } + dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), + BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); + + /* Unmap the data of the skb frags */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) { + tx_data_bd = (struct eth_tx_bd *) + qed_chain_consume(&txq->tx_pbl); + dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), + BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); + } + + while (bds_consumed++ < nbds) + qed_chain_consume(&txq->tx_pbl); + + /* Free skb */ + dev_kfree_skb_any(skb); + txq->sw_tx_ring[idx].skb = NULL; + txq->sw_tx_ring[idx].flags = 0; + + return 0; +} + +/* Unmap the data and free skb when mapping failed during start_xmit */ +static void qede_free_failed_tx_pkt(struct qede_dev *edev, + struct qede_tx_queue *txq, + struct eth_tx_1st_bd *first_bd, + int nbd, + bool data_split) +{ + u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; + struct sk_buff *skb = txq->sw_tx_ring[idx].skb; + struct eth_tx_bd *tx_data_bd; + int i, split_bd_len = 0; + + /* Return prod to its position before this skb was handled */ + qed_chain_set_prod(&txq->tx_pbl, + le16_to_cpu(txq->tx_db.data.bd_prod), + first_bd); + + first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); + + if (data_split) { + struct eth_tx_bd *split = (struct eth_tx_bd *) + qed_chain_produce(&txq->tx_pbl); + split_bd_len = BD_UNMAP_LEN(split); + nbd--; + } + + dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), + BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); + + /* Unmap the data of the skb frags */ + for (i = 0; i < nbd; i++) { + tx_data_bd = (struct eth_tx_bd *) + qed_chain_produce(&txq->tx_pbl); + if (tx_data_bd->nbytes) + dma_unmap_page(&edev->pdev->dev, + BD_UNMAP_ADDR(tx_data_bd), + BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); + } + + /* Return again prod to its position before this skb was handled */ + qed_chain_set_prod(&txq->tx_pbl, + le16_to_cpu(txq->tx_db.data.bd_prod), + first_bd); + + /* Free skb */ + dev_kfree_skb_any(skb); + txq->sw_tx_ring[idx].skb = NULL; + txq->sw_tx_ring[idx].flags = 0; +} + +static u32 qede_xmit_type(struct qede_dev *edev, + struct sk_buff *skb, + int *ipv6_ext) +{ + u32 rc = XMIT_L4_CSUM; + __be16 l3_proto; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return XMIT_PLAIN; + + l3_proto = vlan_get_protocol(skb); + if (l3_proto == htons(ETH_P_IPV6) && + (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) + *ipv6_ext = 1; + + if (skb_is_gso(skb)) + rc |= XMIT_LSO; + + return rc; +} + +static void qede_set_params_for_ipv6_ext(struct sk_buff *skb, + struct eth_tx_2nd_bd *second_bd, + struct eth_tx_3rd_bd *third_bd) +{ + u8 l4_proto; + u16 bd2_bits = 0, bd2_bits2 = 0; + + bd2_bits2 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT); + + bd2_bits |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) & + ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) + << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT; + + bd2_bits2 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH << + ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT); + + if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) + l4_proto = ipv6_hdr(skb)->nexthdr; + else + l4_proto = ip_hdr(skb)->protocol; + + if (l4_proto == IPPROTO_UDP) + bd2_bits2 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT; + + if (third_bd) { + third_bd->data.bitfields |= + ((tcp_hdrlen(skb) / 4) & + ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) << + ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT; + } + + second_bd->data.bitfields = cpu_to_le16(bd2_bits); + second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2); +} + +static int map_frag_to_bd(struct qede_dev *edev, + skb_frag_t *frag, + struct eth_tx_bd *bd) +{ + dma_addr_t mapping; + + /* Map skb non-linear frag data for DMA */ + mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0, + skb_frag_size(frag), + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { + DP_NOTICE(edev, "Unable to map frag - dropping packet\n"); + return -ENOMEM; + } + + /* Setup the data pointer of the frag data */ + BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag)); + + return 0; +} + +/* Main transmit function */ +static +netdev_tx_t qede_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct qede_dev *edev = netdev_priv(ndev); + struct netdev_queue *netdev_txq; + struct qede_tx_queue *txq; + struct eth_tx_1st_bd *first_bd; + struct eth_tx_2nd_bd *second_bd = NULL; + struct eth_tx_3rd_bd *third_bd = NULL; + struct eth_tx_bd *tx_data_bd = NULL; + u16 txq_index; + u8 nbd = 0; + dma_addr_t mapping; + int rc, frag_idx = 0, ipv6_ext = 0; + u8 xmit_type; + u16 idx; + u16 hlen; + bool data_split; + + /* Get tx-queue context and netdev index */ + txq_index = skb_get_queue_mapping(skb); + WARN_ON(txq_index >= QEDE_TSS_CNT(edev)); + txq = QEDE_TX_QUEUE(edev, txq_index); + netdev_txq = netdev_get_tx_queue(ndev, txq_index); + + /* Current code doesn't support SKB linearization, since the max number + * of skb frags can be passed in the FW HSI. + */ + BUILD_BUG_ON(MAX_SKB_FRAGS > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET); + + WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < + (MAX_SKB_FRAGS + 1)); + + xmit_type = qede_xmit_type(edev, skb, &ipv6_ext); + + /* Fill the entry in the SW ring and the BDs in the FW ring */ + idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; + txq->sw_tx_ring[idx].skb = skb; + first_bd = (struct eth_tx_1st_bd *) + qed_chain_produce(&txq->tx_pbl); + memset(first_bd, 0, sizeof(*first_bd)); + first_bd->data.bd_flags.bitfields = + 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; + + /* Map skb linear data for DMA and set in the first BD */ + mapping = dma_map_single(&edev->pdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { + DP_NOTICE(edev, "SKB mapping failed\n"); + qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false); + return NETDEV_TX_OK; + } + nbd++; + BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb)); + + /* In case there is IPv6 with extension headers or LSO we need 2nd and + * 3rd BDs. + */ + if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) { + second_bd = (struct eth_tx_2nd_bd *) + qed_chain_produce(&txq->tx_pbl); + memset(second_bd, 0, sizeof(*second_bd)); + + nbd++; + third_bd = (struct eth_tx_3rd_bd *) + qed_chain_produce(&txq->tx_pbl); + memset(third_bd, 0, sizeof(*third_bd)); + + nbd++; + /* We need to fill in additional data in second_bd... */ + tx_data_bd = (struct eth_tx_bd *)second_bd; + } + + if (skb_vlan_tag_present(skb)) { + first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb)); + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT; + } + + /* Fill the parsing flags & params according to the requested offload */ + if (xmit_type & XMIT_L4_CSUM) { + /* We don't re-calculate IP checksum as it is already done by + * the upper stack + */ + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT; + + /* If the packet is IPv6 with extension header, indicate that + * to FW and pass few params, since the device cracker doesn't + * support parsing IPv6 with extension header/s. + */ + if (unlikely(ipv6_ext)) + qede_set_params_for_ipv6_ext(skb, second_bd, third_bd); + } + + if (xmit_type & XMIT_LSO) { + first_bd->data.bd_flags.bitfields |= + (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT); + third_bd->data.lso_mss = + cpu_to_le16(skb_shinfo(skb)->gso_size); + + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; + hlen = skb_transport_header(skb) + + tcp_hdrlen(skb) - skb->data; + + /* @@@TBD - if will not be removed need to check */ + third_bd->data.bitfields |= + (1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); + + /* Make life easier for FW guys who can't deal with header and + * data on same BD. If we need to split, use the second bd... + */ + if (unlikely(skb_headlen(skb) > hlen)) { + DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, + "TSO split header size is %d (%x:%x)\n", + first_bd->nbytes, first_bd->addr.hi, + first_bd->addr.lo); + + mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi), + le32_to_cpu(first_bd->addr.lo)) + + hlen; + + BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping, + le16_to_cpu(first_bd->nbytes) - + hlen); + + /* this marks the BD as one that has no + * individual mapping + */ + txq->sw_tx_ring[idx].flags |= QEDE_TSO_SPLIT_BD; + + first_bd->nbytes = cpu_to_le16(hlen); + + tx_data_bd = (struct eth_tx_bd *)third_bd; + data_split = true; + } + } + + /* Handle fragmented skb */ + /* special handle for frags inside 2nd and 3rd bds.. */ + while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) { + rc = map_frag_to_bd(edev, + &skb_shinfo(skb)->frags[frag_idx], + tx_data_bd); + if (rc) { + qede_free_failed_tx_pkt(edev, txq, first_bd, nbd, + data_split); + return NETDEV_TX_OK; + } + + if (tx_data_bd == (struct eth_tx_bd *)second_bd) + tx_data_bd = (struct eth_tx_bd *)third_bd; + else + tx_data_bd = NULL; + + frag_idx++; + } + + /* map last frags into 4th, 5th .... */ + for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) { + tx_data_bd = (struct eth_tx_bd *) + qed_chain_produce(&txq->tx_pbl); + + memset(tx_data_bd, 0, sizeof(*tx_data_bd)); + + rc = map_frag_to_bd(edev, + &skb_shinfo(skb)->frags[frag_idx], + tx_data_bd); + if (rc) { + qede_free_failed_tx_pkt(edev, txq, first_bd, nbd, + data_split); + return NETDEV_TX_OK; + } + } + + /* update the first BD with the actual num BDs */ + first_bd->data.nbds = nbd; + + netdev_tx_sent_queue(netdev_txq, skb->len); + + skb_tx_timestamp(skb); + + /* Advance packet producer only before sending the packet since mapping + * of pages may fail. + */ + txq->sw_tx_prod++; + + /* 'next page' entries are counted in the producer value */ + txq->tx_db.data.bd_prod = + cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); + + /* wmb makes sure that the BDs data is updated before updating the + * producer, otherwise FW may read old data from the BDs. + */ + wmb(); + barrier(); + writel(txq->tx_db.raw, txq->doorbell_addr); + + /* mmiowb is needed to synchronize doorbell writes from more than one + * processor. It guarantees that the write arrives to the device before + * the queue lock is released and another start_xmit is called (possibly + * on another CPU). Without this barrier, the next doorbell can bypass + * this doorbell. This is applicable to IA64/Altix systems. + */ + mmiowb(); + + if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) + < (MAX_SKB_FRAGS + 1))) { + netif_tx_stop_queue(netdev_txq); + DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, + "Stop queue was called\n"); + /* paired memory barrier is in qede_tx_int(), we have to keep + * ordering of set_bit() in netif_tx_stop_queue() and read of + * fp->bd_tx_cons + */ + smp_mb(); + + if (qed_chain_get_elem_left(&txq->tx_pbl) + >= (MAX_SKB_FRAGS + 1) && + (edev->state == QEDE_STATE_OPEN)) { + netif_tx_wake_queue(netdev_txq); + DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, + "Wake queue was called\n"); + } + } + + return NETDEV_TX_OK; +} + +static int qede_txq_has_work(struct qede_tx_queue *txq) +{ + u16 hw_bd_cons; + + /* Tell compiler that consumer and producer can change */ + barrier(); + hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); + if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1) + return 0; + + return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl); +} + +static int qede_tx_int(struct qede_dev *edev, + struct qede_tx_queue *txq) +{ + struct netdev_queue *netdev_txq; + u16 hw_bd_cons; + unsigned int pkts_compl = 0, bytes_compl = 0; + int rc; + + netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index); + + hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); + barrier(); + + while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { + int len = 0; + + rc = qede_free_tx_pkt(edev, txq, &len); + if (rc) { + DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n", + hw_bd_cons, + qed_chain_get_cons_idx(&txq->tx_pbl)); + break; + } + + bytes_compl += len; + pkts_compl++; + txq->sw_tx_cons++; + } + + netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl); + + /* Need to make the tx_bd_cons update visible to start_xmit() + * before checking for netif_tx_queue_stopped(). Without the + * memory barrier, there is a small possibility that + * start_xmit() will miss it and cause the queue to be stopped + * forever. + * On the other hand we need an rmb() here to ensure the proper + * ordering of bit testing in the following + * netif_tx_queue_stopped(txq) call. + */ + smp_mb(); + + if (unlikely(netif_tx_queue_stopped(netdev_txq))) { + /* Taking tx_lock is needed to prevent reenabling the queue + * while it's empty. This could have happen if rx_action() gets + * suspended in qede_tx_int() after the condition before + * netif_tx_wake_queue(), while tx_action (qede_start_xmit()): + * + * stops the queue->sees fresh tx_bd_cons->releases the queue-> + * sends some packets consuming the whole queue again-> + * stops the queue + */ + + __netif_tx_lock(netdev_txq, smp_processor_id()); + + if ((netif_tx_queue_stopped(netdev_txq)) && + (edev->state == QEDE_STATE_OPEN) && + (qed_chain_get_elem_left(&txq->tx_pbl) + >= (MAX_SKB_FRAGS + 1))) { + netif_tx_wake_queue(netdev_txq); + DP_VERBOSE(edev, NETIF_MSG_TX_DONE, + "Wake queue was called\n"); + } + + __netif_tx_unlock(netdev_txq); + } + + return 0; +} + +static bool qede_has_rx_work(struct qede_rx_queue *rxq) +{ + u16 hw_comp_cons, sw_comp_cons; + + /* Tell compiler that status block fields can change */ + barrier(); + + hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); + sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); + + return hw_comp_cons != sw_comp_cons; +} + +static bool qede_has_tx_work(struct qede_fastpath *fp) +{ + u8 tc; + + for (tc = 0; tc < fp->edev->num_tc; tc++) + if (qede_txq_has_work(&fp->txqs[tc])) + return true; + return false; +} + +/* This function copies the Rx buffer from the CONS position to the PROD + * position, since we failed to allocate a new Rx buffer. + */ +static void qede_reuse_rx_data(struct qede_rx_queue *rxq) +{ + struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring); + struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); + struct sw_rx_data *sw_rx_data_cons = + &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; + struct sw_rx_data *sw_rx_data_prod = + &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; + + dma_unmap_addr_set(sw_rx_data_prod, mapping, + dma_unmap_addr(sw_rx_data_cons, mapping)); + + sw_rx_data_prod->data = sw_rx_data_cons->data; + memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd)); + + rxq->sw_rx_cons++; + rxq->sw_rx_prod++; +} + +static inline void qede_update_rx_prod(struct qede_dev *edev, + struct qede_rx_queue *rxq) +{ + u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring); + u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring); + struct eth_rx_prod_data rx_prods = {0}; + + /* Update producers */ + rx_prods.bd_prod = cpu_to_le16(bd_prod); + rx_prods.cqe_prod = cpu_to_le16(cqe_prod); + + /* Make sure that the BD and SGE data is updated before updating the + * producers since FW might read the BD/SGE right after the producer + * is updated. + */ + wmb(); + + internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods), + (u32 *)&rx_prods); + + /* mmiowb is needed to synchronize doorbell writes from more than one + * processor. It guarantees that the write arrives to the device before + * the napi lock is released and another qede_poll is called (possibly + * on another CPU). Without this barrier, the next doorbell can bypass + * this doorbell. This is applicable to IA64/Altix systems. + */ + mmiowb(); +} + +static u32 qede_get_rxhash(struct qede_dev *edev, + u8 bitfields, + __le32 rss_hash, + enum pkt_hash_types *rxhash_type) +{ + enum rss_hash_type htype; + + htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE); + + if ((edev->ndev->features & NETIF_F_RXHASH) && htype) { + *rxhash_type = ((htype == RSS_HASH_TYPE_IPV4) || + (htype == RSS_HASH_TYPE_IPV6)) ? + PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4; + return le32_to_cpu(rss_hash); + } + *rxhash_type = PKT_HASH_TYPE_NONE; + return 0; +} + +static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag) +{ + skb_checksum_none_assert(skb); + + if (csum_flag & QEDE_CSUM_UNNECESSARY) + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + +static inline void qede_skb_receive(struct qede_dev *edev, + struct qede_fastpath *fp, + struct sk_buff *skb, + u16 vlan_tag) +{ + if (vlan_tag) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + vlan_tag); + + napi_gro_receive(&fp->napi, skb); +} + +static u8 qede_check_csum(u16 flag) +{ + u16 csum_flag = 0; + u8 csum = 0; + + if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) { + csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; + csum = QEDE_CSUM_UNNECESSARY; + } + + csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << + PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT; + + if (csum_flag & flag) + return QEDE_CSUM_ERROR; + + return csum; +} + +static int qede_rx_int(struct qede_fastpath *fp, int budget) +{ + struct qede_dev *edev = fp->edev; + struct qede_rx_queue *rxq = fp->rxq; + + u16 hw_comp_cons, sw_comp_cons, sw_rx_index, parse_flag; + int rx_pkt = 0; + u8 csum_flag; + + hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); + sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); + + /* Memory barrier to prevent the CPU from doing speculative reads of CQE + * / BD in the while-loop before reading hw_comp_cons. If the CQE is + * read before it is written by FW, then FW writes CQE and SB, and then + * the CPU reads the hw_comp_cons, it will use an old CQE. + */ + rmb(); + + /* Loop to complete all indicated BDs */ + while (sw_comp_cons != hw_comp_cons) { + struct eth_fast_path_rx_reg_cqe *fp_cqe; + enum pkt_hash_types rxhash_type; + enum eth_rx_cqe_type cqe_type; + struct sw_rx_data *sw_rx_data; + union eth_rx_cqe *cqe; + struct sk_buff *skb; + u16 len, pad; + u32 rx_hash; + u8 *data; + + /* Get the CQE from the completion ring */ + cqe = (union eth_rx_cqe *) + qed_chain_consume(&rxq->rx_comp_ring); + cqe_type = cqe->fast_path_regular.type; + + if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) { + edev->ops->eth_cqe_completion( + edev->cdev, fp->rss_id, + (struct eth_slow_path_rx_cqe *)cqe); + goto next_cqe; + } + + /* Get the data from the SW ring */ + sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; + sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; + data = sw_rx_data->data; + + fp_cqe = &cqe->fast_path_regular; + len = le16_to_cpu(fp_cqe->pkt_len); + pad = fp_cqe->placement_offset; + + /* For every Rx BD consumed, we allocate a new BD so the BD ring + * is always with a fixed size. If allocation fails, we take the + * consumed BD and return it to the ring in the PROD position. + * The packet that was received on that BD will be dropped (and + * not passed to the upper stack). + */ + if (likely(qede_alloc_rx_buffer(edev, rxq) == 0)) { + dma_unmap_single(&edev->pdev->dev, + dma_unmap_addr(sw_rx_data, mapping), + rxq->rx_buf_size, DMA_FROM_DEVICE); + + /* If this is an error packet then drop it */ + parse_flag = + le16_to_cpu(cqe->fast_path_regular.pars_flags.flags); + csum_flag = qede_check_csum(parse_flag); + if (csum_flag == QEDE_CSUM_ERROR) { + DP_NOTICE(edev, + "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n", + sw_comp_cons, parse_flag); + rxq->rx_hw_errors++; + kfree(data); + goto next_rx; + } + + skb = build_skb(data, 0); + + if (unlikely(!skb)) { + DP_NOTICE(edev, + "Build_skb failed, dropping incoming packet\n"); + kfree(data); + rxq->rx_alloc_errors++; + goto next_rx; + } + + skb_reserve(skb, pad); + + } else { + DP_NOTICE(edev, + "New buffer allocation failed, dropping incoming packet and reusing its buffer\n"); + qede_reuse_rx_data(rxq); + rxq->rx_alloc_errors++; + goto next_cqe; + } + + sw_rx_data->data = NULL; + + skb_put(skb, len); + + skb->protocol = eth_type_trans(skb, edev->ndev); + + rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields, + fp_cqe->rss_hash, + &rxhash_type); + + skb_set_hash(skb, rx_hash, rxhash_type); + + qede_set_skb_csum(skb, csum_flag); + + skb_record_rx_queue(skb, fp->rss_id); + + qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag)); + + qed_chain_consume(&rxq->rx_bd_ring); + +next_rx: + rxq->sw_rx_cons++; + rx_pkt++; + +next_cqe: /* don't consume bd rx buffer */ + qed_chain_recycle_consumed(&rxq->rx_comp_ring); + sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); + /* CR TPA - revisit how to handle budget in TPA perhaps + * increase on "end" + */ + if (rx_pkt == budget) + break; + } /* repeat while sw_comp_cons != hw_comp_cons... */ + + /* Update producers */ + qede_update_rx_prod(edev, rxq); + + return rx_pkt; +} + +static int qede_poll(struct napi_struct *napi, int budget) +{ + int work_done = 0; + struct qede_fastpath *fp = container_of(napi, struct qede_fastpath, + napi); + struct qede_dev *edev = fp->edev; + + while (1) { + u8 tc; + + for (tc = 0; tc < edev->num_tc; tc++) + if (qede_txq_has_work(&fp->txqs[tc])) + qede_tx_int(edev, &fp->txqs[tc]); + + if (qede_has_rx_work(fp->rxq)) { + work_done += qede_rx_int(fp, budget - work_done); + + /* must not complete if we consumed full budget */ + if (work_done >= budget) + break; + } + + /* Fall out from the NAPI loop if needed */ + if (!(qede_has_rx_work(fp->rxq) || qede_has_tx_work(fp))) { + qed_sb_update_sb_idx(fp->sb_info); + /* *_has_*_work() reads the status block, + * thus we need to ensure that status block indices + * have been actually read (qed_sb_update_sb_idx) + * prior to this check (*_has_*_work) so that + * we won't write the "newer" value of the status block + * to HW (if there was a DMA right after + * qede_has_rx_work and if there is no rmb, the memory + * reading (qed_sb_update_sb_idx) may be postponed + * to right before *_ack_sb). In this case there + * will never be another interrupt until there is + * another update of the status block, while there + * is still unhandled work. + */ + rmb(); + + if (!(qede_has_rx_work(fp->rxq) || + qede_has_tx_work(fp))) { + napi_complete(napi); + /* Update and reenable interrupts */ + qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, + 1 /*update*/); + break; + } + } + } + + return work_done; +} + +static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie) +{ + struct qede_fastpath *fp = fp_cookie; + + qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/); + + napi_schedule_irqoff(&fp->napi); + return IRQ_HANDLED; +} + +/* ------------------------------------------------------------------------- + * END OF FAST-PATH + * ------------------------------------------------------------------------- + */ + +static int qede_open(struct net_device *ndev); +static int qede_close(struct net_device *ndev); +static int qede_set_mac_addr(struct net_device *ndev, void *p); +static void qede_set_rx_mode(struct net_device *ndev); +static void qede_config_rx_mode(struct net_device *ndev); + +static int qede_set_ucast_rx_mac(struct qede_dev *edev, + enum qed_filter_xcast_params_type opcode, + unsigned char mac[ETH_ALEN]) +{ + struct qed_filter_params filter_cmd; + + memset(&filter_cmd, 0, sizeof(filter_cmd)); + filter_cmd.type = QED_FILTER_TYPE_UCAST; + filter_cmd.filter.ucast.type = opcode; + filter_cmd.filter.ucast.mac_valid = 1; + ether_addr_copy(filter_cmd.filter.ucast.mac, mac); + + return edev->ops->filter_config(edev->cdev, &filter_cmd); +} + +void qede_fill_by_demand_stats(struct qede_dev *edev) +{ + struct qed_eth_stats stats; + + edev->ops->get_vport_stats(edev->cdev, &stats); + edev->stats.no_buff_discards = stats.no_buff_discards; + edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes; + edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes; + edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes; + edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts; + edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts; + edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts; + edev->stats.mftag_filter_discards = stats.mftag_filter_discards; + edev->stats.mac_filter_discards = stats.mac_filter_discards; + + edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes; + edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes; + edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes; + edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts; + edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts; + edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts; + edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts; + edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts; + edev->stats.coalesced_events = stats.tpa_coalesced_events; + edev->stats.coalesced_aborts_num = stats.tpa_aborts_num; + edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts; + edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes; + + edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets; + edev->stats.rx_127_byte_packets = stats.rx_127_byte_packets; + edev->stats.rx_255_byte_packets = stats.rx_255_byte_packets; + edev->stats.rx_511_byte_packets = stats.rx_511_byte_packets; + edev->stats.rx_1023_byte_packets = stats.rx_1023_byte_packets; + edev->stats.rx_1518_byte_packets = stats.rx_1518_byte_packets; + edev->stats.rx_1522_byte_packets = stats.rx_1522_byte_packets; + edev->stats.rx_2047_byte_packets = stats.rx_2047_byte_packets; + edev->stats.rx_4095_byte_packets = stats.rx_4095_byte_packets; + edev->stats.rx_9216_byte_packets = stats.rx_9216_byte_packets; + edev->stats.rx_16383_byte_packets = stats.rx_16383_byte_packets; + edev->stats.rx_crc_errors = stats.rx_crc_errors; + edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames; + edev->stats.rx_pause_frames = stats.rx_pause_frames; + edev->stats.rx_pfc_frames = stats.rx_pfc_frames; + edev->stats.rx_align_errors = stats.rx_align_errors; + edev->stats.rx_carrier_errors = stats.rx_carrier_errors; + edev->stats.rx_oversize_packets = stats.rx_oversize_packets; + edev->stats.rx_jabbers = stats.rx_jabbers; + edev->stats.rx_undersize_packets = stats.rx_undersize_packets; + edev->stats.rx_fragments = stats.rx_fragments; + edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets; + edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets; + edev->stats.tx_128_to_255_byte_packets = + stats.tx_128_to_255_byte_packets; + edev->stats.tx_256_to_511_byte_packets = + stats.tx_256_to_511_byte_packets; + edev->stats.tx_512_to_1023_byte_packets = + stats.tx_512_to_1023_byte_packets; + edev->stats.tx_1024_to_1518_byte_packets = + stats.tx_1024_to_1518_byte_packets; + edev->stats.tx_1519_to_2047_byte_packets = + stats.tx_1519_to_2047_byte_packets; + edev->stats.tx_2048_to_4095_byte_packets = + stats.tx_2048_to_4095_byte_packets; + edev->stats.tx_4096_to_9216_byte_packets = + stats.tx_4096_to_9216_byte_packets; + edev->stats.tx_9217_to_16383_byte_packets = + stats.tx_9217_to_16383_byte_packets; + edev->stats.tx_pause_frames = stats.tx_pause_frames; + edev->stats.tx_pfc_frames = stats.tx_pfc_frames; + edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count; + edev->stats.tx_total_collisions = stats.tx_total_collisions; + edev->stats.brb_truncates = stats.brb_truncates; + edev->stats.brb_discards = stats.brb_discards; + edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames; +} + +static struct rtnl_link_stats64 *qede_get_stats64( + struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct qede_dev *edev = netdev_priv(dev); + + qede_fill_by_demand_stats(edev); + + stats->rx_packets = edev->stats.rx_ucast_pkts + + edev->stats.rx_mcast_pkts + + edev->stats.rx_bcast_pkts; + stats->tx_packets = edev->stats.tx_ucast_pkts + + edev->stats.tx_mcast_pkts + + edev->stats.tx_bcast_pkts; + + stats->rx_bytes = edev->stats.rx_ucast_bytes + + edev->stats.rx_mcast_bytes + + edev->stats.rx_bcast_bytes; + + stats->tx_bytes = edev->stats.tx_ucast_bytes + + edev->stats.tx_mcast_bytes + + edev->stats.tx_bcast_bytes; + + stats->tx_errors = edev->stats.tx_err_drop_pkts; + stats->multicast = edev->stats.rx_mcast_pkts + + edev->stats.rx_bcast_pkts; + + stats->rx_fifo_errors = edev->stats.no_buff_discards; + + stats->collisions = edev->stats.tx_total_collisions; + stats->rx_crc_errors = edev->stats.rx_crc_errors; + stats->rx_frame_errors = edev->stats.rx_align_errors; + + return stats; +} + +static const struct net_device_ops qede_netdev_ops = { + .ndo_open = qede_open, + .ndo_stop = qede_close, + .ndo_start_xmit = qede_start_xmit, + .ndo_set_rx_mode = qede_set_rx_mode, + .ndo_set_mac_address = qede_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = qede_change_mtu, + .ndo_get_stats64 = qede_get_stats64, +}; + +/* ------------------------------------------------------------------------- + * START OF PROBE / REMOVE + * ------------------------------------------------------------------------- + */ + +static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, + struct pci_dev *pdev, + struct qed_dev_eth_info *info, + u32 dp_module, + u8 dp_level) +{ + struct net_device *ndev; + struct qede_dev *edev; + + ndev = alloc_etherdev_mqs(sizeof(*edev), + info->num_queues, + info->num_queues); + if (!ndev) { + pr_err("etherdev allocation failed\n"); + return NULL; + } + + edev = netdev_priv(ndev); + edev->ndev = ndev; + edev->cdev = cdev; + edev->pdev = pdev; + edev->dp_module = dp_module; + edev->dp_level = dp_level; + edev->ops = qed_ops; + edev->q_num_rx_buffers = NUM_RX_BDS_DEF; + edev->q_num_tx_buffers = NUM_TX_BDS_DEF; + + DP_INFO(edev, "Allocated netdev with 64 tx queues and 64 rx queues\n"); + + SET_NETDEV_DEV(ndev, &pdev->dev); + + memset(&edev->stats, 0, sizeof(edev->stats)); + memcpy(&edev->dev_info, info, sizeof(*info)); + + edev->num_tc = edev->dev_info.num_tc; + + return edev; +} + +static void qede_init_ndev(struct qede_dev *edev) +{ + struct net_device *ndev = edev->ndev; + struct pci_dev *pdev = edev->pdev; + u32 hw_features; + + pci_set_drvdata(pdev, ndev); + + ndev->mem_start = edev->dev_info.common.pci_mem_start; + ndev->base_addr = ndev->mem_start; + ndev->mem_end = edev->dev_info.common.pci_mem_end; + ndev->irq = edev->dev_info.common.pci_irq; + + ndev->watchdog_timeo = TX_TIMEOUT; + + ndev->netdev_ops = &qede_netdev_ops; + + qede_set_ethtool_ops(ndev); + + /* user-changeble features */ + hw_features = NETIF_F_GRO | NETIF_F_SG | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_TSO6; + + ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | + NETIF_F_HIGHDMA; + ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA | + NETIF_F_HW_VLAN_CTAG_TX; + + ndev->hw_features = hw_features; + + /* Set network device HW mac */ + ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac); +} + +/* This function converts from 32b param to two params of level and module + * Input 32b decoding: + * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the + * 'happy' flow, e.g. memory allocation failed. + * b30 - enable all INFO prints. INFO prints are for major steps in the flow + * and provide important parameters. + * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that + * module. VERBOSE prints are for tracking the specific flow in low level. + * + * Notice that the level should be that of the lowest required logs. + */ +void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level) +{ + *p_dp_level = QED_LEVEL_NOTICE; + *p_dp_module = 0; + + if (debug & QED_LOG_VERBOSE_MASK) { + *p_dp_level = QED_LEVEL_VERBOSE; + *p_dp_module = (debug & 0x3FFFFFFF); + } else if (debug & QED_LOG_INFO_MASK) { + *p_dp_level = QED_LEVEL_INFO; + } else if (debug & QED_LOG_NOTICE_MASK) { + *p_dp_level = QED_LEVEL_NOTICE; + } +} + +static void qede_free_fp_array(struct qede_dev *edev) +{ + if (edev->fp_array) { + struct qede_fastpath *fp; + int i; + + for_each_rss(i) { + fp = &edev->fp_array[i]; + + kfree(fp->sb_info); + kfree(fp->rxq); + kfree(fp->txqs); + } + kfree(edev->fp_array); + } + edev->num_rss = 0; +} + +static int qede_alloc_fp_array(struct qede_dev *edev) +{ + struct qede_fastpath *fp; + int i; + + edev->fp_array = kcalloc(QEDE_RSS_CNT(edev), + sizeof(*edev->fp_array), GFP_KERNEL); + if (!edev->fp_array) { + DP_NOTICE(edev, "fp array allocation failed\n"); + goto err; + } + + for_each_rss(i) { + fp = &edev->fp_array[i]; + + fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL); + if (!fp->sb_info) { + DP_NOTICE(edev, "sb info struct allocation failed\n"); + goto err; + } + + fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL); + if (!fp->rxq) { + DP_NOTICE(edev, "RXQ struct allocation failed\n"); + goto err; + } + + fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), GFP_KERNEL); + if (!fp->txqs) { + DP_NOTICE(edev, "TXQ array allocation failed\n"); + goto err; + } + } + + return 0; +err: + qede_free_fp_array(edev); + return -ENOMEM; +} + +static void qede_sp_task(struct work_struct *work) +{ + struct qede_dev *edev = container_of(work, struct qede_dev, + sp_task.work); + mutex_lock(&edev->qede_lock); + + if (edev->state == QEDE_STATE_OPEN) { + if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags)) + qede_config_rx_mode(edev->ndev); + } + + mutex_unlock(&edev->qede_lock); +} + +static void qede_update_pf_params(struct qed_dev *cdev) +{ + struct qed_pf_params pf_params; + + /* 16 rx + 16 tx */ + memset(&pf_params, 0, sizeof(struct qed_pf_params)); + pf_params.eth_pf_params.num_cons = 32; + qed_ops->common->update_pf_params(cdev, &pf_params); +} + +enum qede_probe_mode { + QEDE_PROBE_NORMAL, +}; + +static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, + enum qede_probe_mode mode) +{ + struct qed_slowpath_params params; + struct qed_dev_eth_info dev_info; + struct qede_dev *edev; + struct qed_dev *cdev; + int rc; + + if (unlikely(dp_level & QED_LEVEL_INFO)) + pr_notice("Starting qede probe\n"); + + cdev = qed_ops->common->probe(pdev, QED_PROTOCOL_ETH, + dp_module, dp_level); + if (!cdev) { + rc = -ENODEV; + goto err0; + } + + qede_update_pf_params(cdev); + + /* Start the Slowpath-process */ + memset(¶ms, 0, sizeof(struct qed_slowpath_params)); + params.int_mode = QED_INT_MODE_MSIX; + params.drv_major = QEDE_MAJOR_VERSION; + params.drv_minor = QEDE_MINOR_VERSION; + params.drv_rev = QEDE_REVISION_VERSION; + params.drv_eng = QEDE_ENGINEERING_VERSION; + strlcpy(params.name, "qede LAN", QED_DRV_VER_STR_SIZE); + rc = qed_ops->common->slowpath_start(cdev, ¶ms); + if (rc) { + pr_notice("Cannot start slowpath\n"); + goto err1; + } + + /* Learn information crucial for qede to progress */ + rc = qed_ops->fill_dev_info(cdev, &dev_info); + if (rc) + goto err2; + + edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module, + dp_level); + if (!edev) { + rc = -ENOMEM; + goto err2; + } + + qede_init_ndev(edev); + + rc = register_netdev(edev->ndev); + if (rc) { + DP_NOTICE(edev, "Cannot register net-device\n"); + goto err3; + } + + edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION); + + edev->ops->register_ops(cdev, &qede_ll_ops, edev); + + INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); + mutex_init(&edev->qede_lock); + + DP_INFO(edev, "Ending successfully qede probe\n"); + + return 0; + +err3: + free_netdev(edev->ndev); +err2: + qed_ops->common->slowpath_stop(cdev); +err1: + qed_ops->common->remove(cdev); +err0: + return rc; +} + +static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + u32 dp_module = 0; + u8 dp_level = 0; + + qede_config_debug(debug, &dp_module, &dp_level); + + return __qede_probe(pdev, dp_module, dp_level, + QEDE_PROBE_NORMAL); +} + +enum qede_remove_mode { + QEDE_REMOVE_NORMAL, +}; + +static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct qede_dev *edev = netdev_priv(ndev); + struct qed_dev *cdev = edev->cdev; + + DP_INFO(edev, "Starting qede_remove\n"); + + cancel_delayed_work_sync(&edev->sp_task); + unregister_netdev(ndev); + + edev->ops->common->set_power_state(cdev, PCI_D0); + + pci_set_drvdata(pdev, NULL); + + free_netdev(ndev); + + /* Use global ops since we've freed edev */ + qed_ops->common->slowpath_stop(cdev); + qed_ops->common->remove(cdev); + + pr_notice("Ending successfully qede_remove\n"); +} + +static void qede_remove(struct pci_dev *pdev) +{ + __qede_remove(pdev, QEDE_REMOVE_NORMAL); +} + +/* ------------------------------------------------------------------------- + * START OF LOAD / UNLOAD + * ------------------------------------------------------------------------- + */ + +static int qede_set_num_queues(struct qede_dev *edev) +{ + int rc; + u16 rss_num; + + /* Setup queues according to possible resources*/ + rss_num = netif_get_num_default_rss_queues() * + edev->dev_info.common.num_hwfns; + + rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num); + + rc = edev->ops->common->set_fp_int(edev->cdev, rss_num); + if (rc > 0) { + /* Managed to request interrupts for our queues */ + edev->num_rss = rc; + DP_INFO(edev, "Managed %d [of %d] RSS queues\n", + QEDE_RSS_CNT(edev), rss_num); + rc = 0; + } + return rc; +} + +static void qede_free_mem_sb(struct qede_dev *edev, + struct qed_sb_info *sb_info) +{ + if (sb_info->sb_virt) + dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt), + (void *)sb_info->sb_virt, sb_info->sb_phys); +} + +/* This function allocates fast-path status block memory */ +static int qede_alloc_mem_sb(struct qede_dev *edev, + struct qed_sb_info *sb_info, + u16 sb_id) +{ + struct status_block *sb_virt; + dma_addr_t sb_phys; + int rc; + + sb_virt = dma_alloc_coherent(&edev->pdev->dev, + sizeof(*sb_virt), + &sb_phys, GFP_KERNEL); + if (!sb_virt) { + DP_ERR(edev, "Status block allocation failed\n"); + return -ENOMEM; + } + + rc = edev->ops->common->sb_init(edev->cdev, sb_info, + sb_virt, sb_phys, sb_id, + QED_SB_TYPE_L2_QUEUE); + if (rc) { + DP_ERR(edev, "Status block initialization failed\n"); + dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt), + sb_virt, sb_phys); + return rc; + } + + return 0; +} + +static void qede_free_rx_buffers(struct qede_dev *edev, + struct qede_rx_queue *rxq) +{ + u16 i; + + for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) { + struct sw_rx_data *rx_buf; + u8 *data; + + rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX]; + data = rx_buf->data; + + dma_unmap_single(&edev->pdev->dev, + dma_unmap_addr(rx_buf, mapping), + rxq->rx_buf_size, DMA_FROM_DEVICE); + + rx_buf->data = NULL; + kfree(data); + } +} + +static void qede_free_mem_rxq(struct qede_dev *edev, + struct qede_rx_queue *rxq) +{ + /* Free rx buffers */ + qede_free_rx_buffers(edev, rxq); + + /* Free the parallel SW ring */ + kfree(rxq->sw_rx_ring); + + /* Free the real RQ ring used by FW */ + edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring); + edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring); +} + +static int qede_alloc_rx_buffer(struct qede_dev *edev, + struct qede_rx_queue *rxq) +{ + struct sw_rx_data *sw_rx_data; + struct eth_rx_bd *rx_bd; + dma_addr_t mapping; + u16 rx_buf_size; + u8 *data; + + rx_buf_size = rxq->rx_buf_size; + + data = kmalloc(rx_buf_size, GFP_ATOMIC); + if (unlikely(!data)) { + DP_NOTICE(edev, "Failed to allocate Rx data\n"); + return -ENOMEM; + } + + mapping = dma_map_single(&edev->pdev->dev, data, + rx_buf_size, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { + kfree(data); + DP_NOTICE(edev, "Failed to map Rx buffer\n"); + return -ENOMEM; + } + + sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; + sw_rx_data->data = data; + + dma_unmap_addr_set(sw_rx_data, mapping, mapping); + + /* Advance PROD and get BD pointer */ + rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring); + WARN_ON(!rx_bd); + rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping)); + rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping)); + + rxq->sw_rx_prod++; + + return 0; +} + +/* This function allocates all memory needed per Rx queue */ +static int qede_alloc_mem_rxq(struct qede_dev *edev, + struct qede_rx_queue *rxq) +{ + int i, rc, size, num_allocated; + + rxq->num_rx_buffers = edev->q_num_rx_buffers; + + rxq->rx_buf_size = NET_IP_ALIGN + + ETH_OVERHEAD + + edev->ndev->mtu + + QEDE_FW_RX_ALIGN_END; + + /* Allocate the parallel driver ring for Rx buffers */ + size = sizeof(*rxq->sw_rx_ring) * NUM_RX_BDS_MAX; + rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL); + if (!rxq->sw_rx_ring) { + DP_ERR(edev, "Rx buffers ring allocation failed\n"); + goto err; + } + + /* Allocate FW Rx ring */ + rc = edev->ops->common->chain_alloc(edev->cdev, + QED_CHAIN_USE_TO_CONSUME_PRODUCE, + QED_CHAIN_MODE_NEXT_PTR, + NUM_RX_BDS_MAX, + sizeof(struct eth_rx_bd), + &rxq->rx_bd_ring); + + if (rc) + goto err; + + /* Allocate FW completion ring */ + rc = edev->ops->common->chain_alloc(edev->cdev, + QED_CHAIN_USE_TO_CONSUME, + QED_CHAIN_MODE_PBL, + NUM_RX_BDS_MAX, + sizeof(union eth_rx_cqe), + &rxq->rx_comp_ring); + if (rc) + goto err; + + /* Allocate buffers for the Rx ring */ + for (i = 0; i < rxq->num_rx_buffers; i++) { + rc = qede_alloc_rx_buffer(edev, rxq); + if (rc) + break; + } + num_allocated = i; + if (!num_allocated) { + DP_ERR(edev, "Rx buffers allocation failed\n"); + goto err; + } else if (num_allocated < rxq->num_rx_buffers) { + DP_NOTICE(edev, + "Allocated less buffers than desired (%d allocated)\n", + num_allocated); + } + + return 0; + +err: + qede_free_mem_rxq(edev, rxq); + return -ENOMEM; +} + +static void qede_free_mem_txq(struct qede_dev *edev, + struct qede_tx_queue *txq) +{ + /* Free the parallel SW ring */ + kfree(txq->sw_tx_ring); + + /* Free the real RQ ring used by FW */ + edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl); +} + +/* This function allocates all memory needed per Tx queue */ +static int qede_alloc_mem_txq(struct qede_dev *edev, + struct qede_tx_queue *txq) +{ + int size, rc; + union eth_tx_bd_types *p_virt; + + txq->num_tx_buffers = edev->q_num_tx_buffers; + + /* Allocate the parallel driver ring for Tx buffers */ + size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX; + txq->sw_tx_ring = kzalloc(size, GFP_KERNEL); + if (!txq->sw_tx_ring) { + DP_NOTICE(edev, "Tx buffers ring allocation failed\n"); + goto err; + } + + rc = edev->ops->common->chain_alloc(edev->cdev, + QED_CHAIN_USE_TO_CONSUME_PRODUCE, + QED_CHAIN_MODE_PBL, + NUM_TX_BDS_MAX, + sizeof(*p_virt), + &txq->tx_pbl); + if (rc) + goto err; + + return 0; + +err: + qede_free_mem_txq(edev, txq); + return -ENOMEM; +} + +/* This function frees all memory of a single fp */ +static void qede_free_mem_fp(struct qede_dev *edev, + struct qede_fastpath *fp) +{ + int tc; + + qede_free_mem_sb(edev, fp->sb_info); + + qede_free_mem_rxq(edev, fp->rxq); + + for (tc = 0; tc < edev->num_tc; tc++) + qede_free_mem_txq(edev, &fp->txqs[tc]); +} + +/* This function allocates all memory needed for a single fp (i.e. an entity + * which contains status block, one rx queue and multiple per-TC tx queues. + */ +static int qede_alloc_mem_fp(struct qede_dev *edev, + struct qede_fastpath *fp) +{ + int rc, tc; + + rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->rss_id); + if (rc) + goto err; + + rc = qede_alloc_mem_rxq(edev, fp->rxq); + if (rc) + goto err; + + for (tc = 0; tc < edev->num_tc; tc++) { + rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]); + if (rc) + goto err; + } + + return 0; + +err: + qede_free_mem_fp(edev, fp); + return -ENOMEM; +} + +static void qede_free_mem_load(struct qede_dev *edev) +{ + int i; + + for_each_rss(i) { + struct qede_fastpath *fp = &edev->fp_array[i]; + + qede_free_mem_fp(edev, fp); + } +} + +/* This function allocates all qede memory at NIC load. */ +static int qede_alloc_mem_load(struct qede_dev *edev) +{ + int rc = 0, rss_id; + + for (rss_id = 0; rss_id < QEDE_RSS_CNT(edev); rss_id++) { + struct qede_fastpath *fp = &edev->fp_array[rss_id]; + + rc = qede_alloc_mem_fp(edev, fp); + if (rc) + break; + } + + if (rss_id != QEDE_RSS_CNT(edev)) { + /* Failed allocating memory for all the queues */ + if (!rss_id) { + DP_ERR(edev, + "Failed to allocate memory for the leading queue\n"); + rc = -ENOMEM; + } else { + DP_NOTICE(edev, + "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n", + QEDE_RSS_CNT(edev), rss_id); + } + edev->num_rss = rss_id; + } + + return 0; +} + +/* This function inits fp content and resets the SB, RXQ and TXQ structures */ +static void qede_init_fp(struct qede_dev *edev) +{ + int rss_id, txq_index, tc; + struct qede_fastpath *fp; + + for_each_rss(rss_id) { + fp = &edev->fp_array[rss_id]; + + fp->edev = edev; + fp->rss_id = rss_id; + + memset((void *)&fp->napi, 0, sizeof(fp->napi)); + + memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info)); + + memset((void *)fp->rxq, 0, sizeof(*fp->rxq)); + fp->rxq->rxq_id = rss_id; + + memset((void *)fp->txqs, 0, (edev->num_tc * sizeof(*fp->txqs))); + for (tc = 0; tc < edev->num_tc; tc++) { + txq_index = tc * QEDE_RSS_CNT(edev) + rss_id; + fp->txqs[tc].index = txq_index; + } + + snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", + edev->ndev->name, rss_id); + } +} + +static int qede_set_real_num_queues(struct qede_dev *edev) +{ + int rc = 0; + + rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_CNT(edev)); + if (rc) { + DP_NOTICE(edev, "Failed to set real number of Tx queues\n"); + return rc; + } + rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_CNT(edev)); + if (rc) { + DP_NOTICE(edev, "Failed to set real number of Rx queues\n"); + return rc; + } + + return 0; +} + +static void qede_napi_disable_remove(struct qede_dev *edev) +{ + int i; + + for_each_rss(i) { + napi_disable(&edev->fp_array[i].napi); + + netif_napi_del(&edev->fp_array[i].napi); + } +} + +static void qede_napi_add_enable(struct qede_dev *edev) +{ + int i; + + /* Add NAPI objects */ + for_each_rss(i) { + netif_napi_add(edev->ndev, &edev->fp_array[i].napi, + qede_poll, NAPI_POLL_WEIGHT); + napi_enable(&edev->fp_array[i].napi); + } +} + +static void qede_sync_free_irqs(struct qede_dev *edev) +{ + int i; + + for (i = 0; i < edev->int_info.used_cnt; i++) { + if (edev->int_info.msix_cnt) { + synchronize_irq(edev->int_info.msix[i].vector); + free_irq(edev->int_info.msix[i].vector, + &edev->fp_array[i]); + } else { + edev->ops->common->simd_handler_clean(edev->cdev, i); + } + } + + edev->int_info.used_cnt = 0; +} + +static int qede_req_msix_irqs(struct qede_dev *edev) +{ + int i, rc; + + /* Sanitize number of interrupts == number of prepared RSS queues */ + if (QEDE_RSS_CNT(edev) > edev->int_info.msix_cnt) { + DP_ERR(edev, + "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n", + QEDE_RSS_CNT(edev), edev->int_info.msix_cnt); + return -EINVAL; + } + + for (i = 0; i < QEDE_RSS_CNT(edev); i++) { + rc = request_irq(edev->int_info.msix[i].vector, + qede_msix_fp_int, 0, edev->fp_array[i].name, + &edev->fp_array[i]); + if (rc) { + DP_ERR(edev, "Request fp %d irq failed\n", i); + qede_sync_free_irqs(edev); + return rc; + } + DP_VERBOSE(edev, NETIF_MSG_INTR, + "Requested fp irq for %s [entry %d]. Cookie is at %p\n", + edev->fp_array[i].name, i, + &edev->fp_array[i]); + edev->int_info.used_cnt++; + } + + return 0; +} + +static void qede_simd_fp_handler(void *cookie) +{ + struct qede_fastpath *fp = (struct qede_fastpath *)cookie; + + napi_schedule_irqoff(&fp->napi); +} + +static int qede_setup_irqs(struct qede_dev *edev) +{ + int i, rc = 0; + + /* Learn Interrupt configuration */ + rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info); + if (rc) + return rc; + + if (edev->int_info.msix_cnt) { + rc = qede_req_msix_irqs(edev); + if (rc) + return rc; + edev->ndev->irq = edev->int_info.msix[0].vector; + } else { + const struct qed_common_ops *ops; + + /* qed should learn receive the RSS ids and callbacks */ + ops = edev->ops->common; + for (i = 0; i < QEDE_RSS_CNT(edev); i++) + ops->simd_handler_config(edev->cdev, + &edev->fp_array[i], i, + qede_simd_fp_handler); + edev->int_info.used_cnt = QEDE_RSS_CNT(edev); + } + return 0; +} + +static int qede_drain_txq(struct qede_dev *edev, + struct qede_tx_queue *txq, + bool allow_drain) +{ + int rc, cnt = 1000; + + while (txq->sw_tx_cons != txq->sw_tx_prod) { + if (!cnt) { + if (allow_drain) { + DP_NOTICE(edev, + "Tx queue[%d] is stuck, requesting MCP to drain\n", + txq->index); + rc = edev->ops->common->drain(edev->cdev); + if (rc) + return rc; + return qede_drain_txq(edev, txq, false); + } + DP_NOTICE(edev, + "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n", + txq->index, txq->sw_tx_prod, + txq->sw_tx_cons); + return -ENODEV; + } + cnt--; + usleep_range(1000, 2000); + barrier(); + } + + /* FW finished processing, wait for HW to transmit all tx packets */ + usleep_range(1000, 2000); + + return 0; +} + +static int qede_stop_queues(struct qede_dev *edev) +{ + struct qed_update_vport_params vport_update_params; + struct qed_dev *cdev = edev->cdev; + int rc, tc, i; + + /* Disable the vport */ + memset(&vport_update_params, 0, sizeof(vport_update_params)); + vport_update_params.vport_id = 0; + vport_update_params.update_vport_active_flg = 1; + vport_update_params.vport_active_flg = 0; + vport_update_params.update_rss_flg = 0; + + rc = edev->ops->vport_update(cdev, &vport_update_params); + if (rc) { + DP_ERR(edev, "Failed to update vport\n"); + return rc; + } + + /* Flush Tx queues. If needed, request drain from MCP */ + for_each_rss(i) { + struct qede_fastpath *fp = &edev->fp_array[i]; + + for (tc = 0; tc < edev->num_tc; tc++) { + struct qede_tx_queue *txq = &fp->txqs[tc]; + + rc = qede_drain_txq(edev, txq, true); + if (rc) + return rc; + } + } + + /* Stop all Queues in reverse order*/ + for (i = QEDE_RSS_CNT(edev) - 1; i >= 0; i--) { + struct qed_stop_rxq_params rx_params; + + /* Stop the Tx Queue(s)*/ + for (tc = 0; tc < edev->num_tc; tc++) { + struct qed_stop_txq_params tx_params; + + tx_params.rss_id = i; + tx_params.tx_queue_id = tc * QEDE_RSS_CNT(edev) + i; + rc = edev->ops->q_tx_stop(cdev, &tx_params); + if (rc) { + DP_ERR(edev, "Failed to stop TXQ #%d\n", + tx_params.tx_queue_id); + return rc; + } + } + + /* Stop the Rx Queue*/ + memset(&rx_params, 0, sizeof(rx_params)); + rx_params.rss_id = i; + rx_params.rx_queue_id = i; + + rc = edev->ops->q_rx_stop(cdev, &rx_params); + if (rc) { + DP_ERR(edev, "Failed to stop RXQ #%d\n", i); + return rc; + } + } + + /* Stop the vport */ + rc = edev->ops->vport_stop(cdev, 0); + if (rc) + DP_ERR(edev, "Failed to stop VPORT\n"); + + return rc; +} + +static int qede_start_queues(struct qede_dev *edev) +{ + int rc, tc, i; + int vport_id = 0, drop_ttl0_flg = 1, vlan_removal_en = 1; + struct qed_dev *cdev = edev->cdev; + struct qed_update_vport_rss_params *rss_params = &edev->rss_params; + struct qed_update_vport_params vport_update_params; + struct qed_queue_start_common_params q_params; + + if (!edev->num_rss) { + DP_ERR(edev, + "Cannot update V-VPORT as active as there are no Rx queues\n"); + return -EINVAL; + } + + rc = edev->ops->vport_start(cdev, vport_id, + edev->ndev->mtu, + drop_ttl0_flg, + vlan_removal_en); + + if (rc) { + DP_ERR(edev, "Start V-PORT failed %d\n", rc); + return rc; + } + + DP_VERBOSE(edev, NETIF_MSG_IFUP, + "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n", + vport_id, edev->ndev->mtu + 0xe, vlan_removal_en); + + for_each_rss(i) { + struct qede_fastpath *fp = &edev->fp_array[i]; + dma_addr_t phys_table = fp->rxq->rx_comp_ring.pbl.p_phys_table; + + memset(&q_params, 0, sizeof(q_params)); + q_params.rss_id = i; + q_params.queue_id = i; + q_params.vport_id = 0; + q_params.sb = fp->sb_info->igu_sb_id; + q_params.sb_idx = RX_PI; + + rc = edev->ops->q_rx_start(cdev, &q_params, + fp->rxq->rx_buf_size, + fp->rxq->rx_bd_ring.p_phys_addr, + phys_table, + fp->rxq->rx_comp_ring.page_cnt, + &fp->rxq->hw_rxq_prod_addr); + if (rc) { + DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc); + return rc; + } + + fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI]; + + qede_update_rx_prod(edev, fp->rxq); + + for (tc = 0; tc < edev->num_tc; tc++) { + struct qede_tx_queue *txq = &fp->txqs[tc]; + int txq_index = tc * QEDE_RSS_CNT(edev) + i; + + memset(&q_params, 0, sizeof(q_params)); + q_params.rss_id = i; + q_params.queue_id = txq_index; + q_params.vport_id = 0; + q_params.sb = fp->sb_info->igu_sb_id; + q_params.sb_idx = TX_PI(tc); + + rc = edev->ops->q_tx_start(cdev, &q_params, + txq->tx_pbl.pbl.p_phys_table, + txq->tx_pbl.page_cnt, + &txq->doorbell_addr); + if (rc) { + DP_ERR(edev, "Start TXQ #%d failed %d\n", + txq_index, rc); + return rc; + } + + txq->hw_cons_ptr = + &fp->sb_info->sb_virt->pi_array[TX_PI(tc)]; + SET_FIELD(txq->tx_db.data.params, + ETH_DB_DATA_DEST, DB_DEST_XCM); + SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, + DB_AGG_CMD_SET); + SET_FIELD(txq->tx_db.data.params, + ETH_DB_DATA_AGG_VAL_SEL, + DQ_XCM_ETH_TX_BD_PROD_CMD); + + txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; + } + } + + /* Prepare and send the vport enable */ + memset(&vport_update_params, 0, sizeof(vport_update_params)); + vport_update_params.vport_id = vport_id; + vport_update_params.update_vport_active_flg = 1; + vport_update_params.vport_active_flg = 1; + + /* Fill struct with RSS params */ + if (QEDE_RSS_CNT(edev) > 1) { + vport_update_params.update_rss_flg = 1; + for (i = 0; i < 128; i++) + rss_params->rss_ind_table[i] = + ethtool_rxfh_indir_default(i, QEDE_RSS_CNT(edev)); + netdev_rss_key_fill(rss_params->rss_key, + sizeof(rss_params->rss_key)); + } else { + memset(rss_params, 0, sizeof(*rss_params)); + } + memcpy(&vport_update_params.rss_params, rss_params, + sizeof(*rss_params)); + + rc = edev->ops->vport_update(cdev, &vport_update_params); + if (rc) { + DP_ERR(edev, "Update V-PORT failed %d\n", rc); + return rc; + } + + return 0; +} + +static int qede_set_mcast_rx_mac(struct qede_dev *edev, + enum qed_filter_xcast_params_type opcode, + unsigned char *mac, int num_macs) +{ + struct qed_filter_params filter_cmd; + int i; + + memset(&filter_cmd, 0, sizeof(filter_cmd)); + filter_cmd.type = QED_FILTER_TYPE_MCAST; + filter_cmd.filter.mcast.type = opcode; + filter_cmd.filter.mcast.num = num_macs; + + for (i = 0; i < num_macs; i++, mac += ETH_ALEN) + ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac); + + return edev->ops->filter_config(edev->cdev, &filter_cmd); +} + +enum qede_unload_mode { + QEDE_UNLOAD_NORMAL, +}; + +static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode) +{ + struct qed_link_params link_params; + int rc; + + DP_INFO(edev, "Starting qede unload\n"); + + mutex_lock(&edev->qede_lock); + edev->state = QEDE_STATE_CLOSED; + + /* Close OS Tx */ + netif_tx_disable(edev->ndev); + netif_carrier_off(edev->ndev); + + /* Reset the link */ + memset(&link_params, 0, sizeof(link_params)); + link_params.link_up = false; + edev->ops->common->set_link(edev->cdev, &link_params); + rc = qede_stop_queues(edev); + if (rc) { + qede_sync_free_irqs(edev); + goto out; + } + + DP_INFO(edev, "Stopped Queues\n"); + + edev->ops->fastpath_stop(edev->cdev); + + /* Release the interrupts */ + qede_sync_free_irqs(edev); + edev->ops->common->set_fp_int(edev->cdev, 0); + + qede_napi_disable_remove(edev); + + qede_free_mem_load(edev); + qede_free_fp_array(edev); + +out: + mutex_unlock(&edev->qede_lock); + DP_INFO(edev, "Ending qede unload\n"); +} + +enum qede_load_mode { + QEDE_LOAD_NORMAL, +}; + +static int qede_load(struct qede_dev *edev, enum qede_load_mode mode) +{ + struct qed_link_params link_params; + struct qed_link_output link_output; + int rc; + + DP_INFO(edev, "Starting qede load\n"); + + rc = qede_set_num_queues(edev); + if (rc) + goto err0; + + rc = qede_alloc_fp_array(edev); + if (rc) + goto err0; + + qede_init_fp(edev); + + rc = qede_alloc_mem_load(edev); + if (rc) + goto err1; + DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n", + QEDE_RSS_CNT(edev), edev->num_tc); + + rc = qede_set_real_num_queues(edev); + if (rc) + goto err2; + + qede_napi_add_enable(edev); + DP_INFO(edev, "Napi added and enabled\n"); + + rc = qede_setup_irqs(edev); + if (rc) + goto err3; + DP_INFO(edev, "Setup IRQs succeeded\n"); + + rc = qede_start_queues(edev); + if (rc) + goto err4; + DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n"); + + /* Add primary mac and set Rx filters */ + ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr); + + mutex_lock(&edev->qede_lock); + edev->state = QEDE_STATE_OPEN; + mutex_unlock(&edev->qede_lock); + + /* Ask for link-up using current configuration */ + memset(&link_params, 0, sizeof(link_params)); + link_params.link_up = true; + edev->ops->common->set_link(edev->cdev, &link_params); + + /* Query whether link is already-up */ + memset(&link_output, 0, sizeof(link_output)); + edev->ops->common->get_link(edev->cdev, &link_output); + qede_link_update(edev, &link_output); + + DP_INFO(edev, "Ending successfully qede load\n"); + + return 0; + +err4: + qede_sync_free_irqs(edev); + memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info)); +err3: + qede_napi_disable_remove(edev); +err2: + qede_free_mem_load(edev); +err1: + edev->ops->common->set_fp_int(edev->cdev, 0); + qede_free_fp_array(edev); + edev->num_rss = 0; +err0: + return rc; +} + +void qede_reload(struct qede_dev *edev, + void (*func)(struct qede_dev *, union qede_reload_args *), + union qede_reload_args *args) +{ + qede_unload(edev, QEDE_UNLOAD_NORMAL); + /* Call function handler to update parameters + * needed for function load. + */ + if (func) + func(edev, args); + + qede_load(edev, QEDE_LOAD_NORMAL); + + mutex_lock(&edev->qede_lock); + qede_config_rx_mode(edev->ndev); + mutex_unlock(&edev->qede_lock); +} + +/* called with rtnl_lock */ +static int qede_open(struct net_device *ndev) +{ + struct qede_dev *edev = netdev_priv(ndev); + + netif_carrier_off(ndev); + + edev->ops->common->set_power_state(edev->cdev, PCI_D0); + + return qede_load(edev, QEDE_LOAD_NORMAL); +} + +static int qede_close(struct net_device *ndev) +{ + struct qede_dev *edev = netdev_priv(ndev); + + qede_unload(edev, QEDE_UNLOAD_NORMAL); + + return 0; +} + +static void qede_link_update(void *dev, struct qed_link_output *link) +{ + struct qede_dev *edev = dev; + + if (!netif_running(edev->ndev)) { + DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n"); + return; + } + + if (link->link_up) { + DP_NOTICE(edev, "Link is up\n"); + netif_tx_start_all_queues(edev->ndev); + netif_carrier_on(edev->ndev); + } else { + DP_NOTICE(edev, "Link is down\n"); + netif_tx_disable(edev->ndev); + netif_carrier_off(edev->ndev); + } +} + +static int qede_set_mac_addr(struct net_device *ndev, void *p) +{ + struct qede_dev *edev = netdev_priv(ndev); + struct sockaddr *addr = p; + int rc; + + ASSERT_RTNL(); /* @@@TBD To be removed */ + + DP_INFO(edev, "Set_mac_addr called\n"); + + if (!is_valid_ether_addr(addr->sa_data)) { + DP_NOTICE(edev, "The MAC address is not valid\n"); + return -EFAULT; + } + + ether_addr_copy(ndev->dev_addr, addr->sa_data); + + if (!netif_running(ndev)) { + DP_NOTICE(edev, "The device is currently down\n"); + return 0; + } + + /* Remove the previous primary mac */ + rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL, + edev->primary_mac); + if (rc) + return rc; + + /* Add MAC filter according to the new unicast HW MAC address */ + ether_addr_copy(edev->primary_mac, ndev->dev_addr); + return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD, + edev->primary_mac); +} + +static int +qede_configure_mcast_filtering(struct net_device *ndev, + enum qed_filter_rx_mode_type *accept_flags) +{ + struct qede_dev *edev = netdev_priv(ndev); + unsigned char *mc_macs, *temp; + struct netdev_hw_addr *ha; + int rc = 0, mc_count; + size_t size; + + size = 64 * ETH_ALEN; + + mc_macs = kzalloc(size, GFP_KERNEL); + if (!mc_macs) { + DP_NOTICE(edev, + "Failed to allocate memory for multicast MACs\n"); + rc = -ENOMEM; + goto exit; + } + + temp = mc_macs; + + /* Remove all previously configured MAC filters */ + rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL, + mc_macs, 1); + if (rc) + goto exit; + + netif_addr_lock_bh(ndev); + + mc_count = netdev_mc_count(ndev); + if (mc_count < 64) { + netdev_for_each_mc_addr(ha, ndev) { + ether_addr_copy(temp, ha->addr); + temp += ETH_ALEN; + } + } + + netif_addr_unlock_bh(ndev); + + /* Check for all multicast @@@TBD resource allocation */ + if ((ndev->flags & IFF_ALLMULTI) || + (mc_count > 64)) { + if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR) + *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; + } else { + /* Add all multicast MAC filters */ + rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD, + mc_macs, mc_count); + } + +exit: + kfree(mc_macs); + return rc; +} + +static void qede_set_rx_mode(struct net_device *ndev) +{ + struct qede_dev *edev = netdev_priv(ndev); + + DP_INFO(edev, "qede_set_rx_mode called\n"); + + if (edev->state != QEDE_STATE_OPEN) { + DP_INFO(edev, + "qede_set_rx_mode called while interface is down\n"); + } else { + set_bit(QEDE_SP_RX_MODE, &edev->sp_flags); + schedule_delayed_work(&edev->sp_task, 0); + } +} + +/* Must be called with qede_lock held */ +static void qede_config_rx_mode(struct net_device *ndev) +{ + enum qed_filter_rx_mode_type accept_flags = QED_FILTER_TYPE_UCAST; + struct qede_dev *edev = netdev_priv(ndev); + struct qed_filter_params rx_mode; + unsigned char *uc_macs, *temp; + struct netdev_hw_addr *ha; + int rc, uc_count; + size_t size; + + netif_addr_lock_bh(ndev); + + uc_count = netdev_uc_count(ndev); + size = uc_count * ETH_ALEN; + + uc_macs = kzalloc(size, GFP_ATOMIC); + if (!uc_macs) { + DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n"); + netif_addr_unlock_bh(ndev); + return; + } + + temp = uc_macs; + netdev_for_each_uc_addr(ha, ndev) { + ether_addr_copy(temp, ha->addr); + temp += ETH_ALEN; + } + + netif_addr_unlock_bh(ndev); + + /* Configure the struct for the Rx mode */ + memset(&rx_mode, 0, sizeof(struct qed_filter_params)); + rx_mode.type = QED_FILTER_TYPE_RX_MODE; + + /* Remove all previous unicast secondary macs and multicast macs + * (configrue / leave the primary mac) + */ + rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE, + edev->primary_mac); + if (rc) + goto out; + + /* Check for promiscuous */ + if ((ndev->flags & IFF_PROMISC) || + (uc_count > 15)) { /* @@@TBD resource allocation - 1 */ + accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC; + } else { + /* Add MAC filters according to the unicast secondary macs */ + int i; + + temp = uc_macs; + for (i = 0; i < uc_count; i++) { + rc = qede_set_ucast_rx_mac(edev, + QED_FILTER_XCAST_TYPE_ADD, + temp); + if (rc) + goto out; + + temp += ETH_ALEN; + } + + rc = qede_configure_mcast_filtering(ndev, &accept_flags); + if (rc) + goto out; + } + + rx_mode.filter.accept_flags = accept_flags; + edev->ops->filter_config(edev->cdev, &rx_mode); +out: + kfree(uc_macs); +} diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 4847713211ca..b09a6b80d107 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -1736,8 +1736,6 @@ static void ql_get_drvinfo(struct net_device *ndev, sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, pci_name(qdev->pdev), sizeof(drvinfo->bus_info)); - drvinfo->regdump_len = 0; - drvinfo->eedump_len = 0; } static u32 ql_get_msglevel(struct net_device *ndev) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index d6696cfa11d2..46bbea8e023c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -1092,7 +1092,7 @@ struct qlcnic_filter_hash { struct qlcnic_mailbox { struct workqueue_struct *work_q; struct qlcnic_adapter *adapter; - struct qlcnic_mbx_ops *ops; + const struct qlcnic_mbx_ops *ops; struct work_struct work; struct completion completion; struct list_head cmd_q; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 9f0bdd993955..37a731be7d39 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -4048,7 +4048,7 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work) struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox, work); struct qlcnic_adapter *adapter = mbx->adapter; - struct qlcnic_mbx_ops *mbx_ops = mbx->ops; + const struct qlcnic_mbx_ops *mbx_ops = mbx->ops; struct device *dev = &adapter->pdev->dev; atomic_t *rsp_status = &mbx->rsp_status; struct list_head *head = &mbx->cmd_q; @@ -4098,7 +4098,7 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work) } } -static struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = { +static const struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = { .enqueue_cmd = qlcnic_83xx_enqueue_mbx_cmd, .dequeue_cmd = qlcnic_83xx_dequeue_mbx_cmd, .decode_resp = qlcnic_83xx_decode_mbx_rsp, diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c index c3c514e332b5..5dade1fd08b8 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c @@ -415,13 +415,6 @@ static void ql_get_drvinfo(struct net_device *ndev, (qdev->fw_rev_id & 0x000000ff)); strlcpy(drvinfo->bus_info, pci_name(qdev->pdev), sizeof(drvinfo->bus_info)); - drvinfo->n_stats = 0; - drvinfo->testinfo_len = 0; - if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) - drvinfo->regdump_len = sizeof(struct ql_mpi_coredump); - else - drvinfo->regdump_len = sizeof(struct ql_reg_dump); - drvinfo->eedump_len = 0; } static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index 78bb4ceb1cdd..ef668d300800 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -2388,7 +2388,6 @@ static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo * strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); - info->regdump_len = tp->regs_len; } static int rtl8139_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index eafa907965ec..32a80d2df7ff 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -3672,7 +3672,7 @@ static int rocker_port_fdb_flush(struct rocker_port *rocker_port, rocker_port->stp_state == BR_STATE_FORWARDING) return 0; - flags |= ROCKER_OP_FLAG_REMOVE; + flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE; spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); @@ -4374,7 +4374,7 @@ static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port, } static int rocker_port_attr_set(struct net_device *dev, - struct switchdev_attr *attr, + const struct switchdev_attr *attr, struct switchdev_trans *trans) { struct rocker_port *rocker_port = netdev_priv(dev); @@ -4382,8 +4382,7 @@ static int rocker_port_attr_set(struct net_device *dev, switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_STP_STATE: - err = rocker_port_stp_update(rocker_port, trans, - ROCKER_OP_FLAG_NOWAIT, + err = rocker_port_stp_update(rocker_port, trans, 0, attr->u.stp_state); break; case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: @@ -4469,7 +4468,7 @@ static int rocker_port_obj_add(struct net_device *dev, fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj); err = rocker_port_fib_ipv4(rocker_port, trans, htonl(fib4->dst), fib4->dst_len, - fib4->fi, fib4->tb_id, 0); + &fib4->fi, fib4->tb_id, 0); break; case SWITCHDEV_OBJ_ID_PORT_FDB: err = rocker_port_fdb_add(rocker_port, trans, @@ -4517,7 +4516,7 @@ static int rocker_port_fdb_del(struct rocker_port *rocker_port, const struct switchdev_obj_port_fdb *fdb) { __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL); - int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE; + int flags = ROCKER_OP_FLAG_REMOVE; if (!rocker_port_is_bridged(rocker_port)) return -EINVAL; @@ -4541,7 +4540,7 @@ static int rocker_port_obj_del(struct net_device *dev, fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj); err = rocker_port_fib_ipv4(rocker_port, NULL, htonl(fib4->dst), fib4->dst_len, - fib4->fi, fib4->tb_id, + &fib4->fi, fib4->tb_id, ROCKER_OP_FLAG_REMOVE); break; case SWITCHDEV_OBJ_ID_PORT_FDB: @@ -4571,7 +4570,7 @@ static int rocker_port_fdb_dump(const struct rocker_port *rocker_port, hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { if (found->key.rocker_port != rocker_port) continue; - fdb->addr = found->key.addr; + ether_addr_copy(fdb->addr, found->key.addr); fdb->ndm_state = NUD_REACHABLE; fdb->vid = rocker_port_vlan_to_vid(rocker_port, found->key.vlan_id); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 974637d3ae25..6e11ee6173ce 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2062,7 +2062,7 @@ static void efx_init_napi_channel(struct efx_channel *channel) netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll, napi_weight); napi_hash_add(&channel->napi_str); - efx_channel_init_lock(channel); + efx_channel_busy_poll_init(channel); } static void efx_init_napi(struct efx_nic *efx) @@ -2125,7 +2125,7 @@ static int efx_busy_poll(struct napi_struct *napi) if (!netif_running(efx->net_dev)) return LL_FLUSH_FAILED; - if (!efx_channel_lock_poll(channel)) + if (!efx_channel_try_lock_poll(channel)) return LL_FLUSH_BUSY; old_rx_packets = channel->rx_queue.rx_packets; diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index ad56231743a6..229e68c89634 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -431,21 +431,8 @@ struct efx_channel { struct net_device *napi_dev; struct napi_struct napi_str; #ifdef CONFIG_NET_RX_BUSY_POLL - unsigned int state; - spinlock_t state_lock; -#define EFX_CHANNEL_STATE_IDLE 0 -#define EFX_CHANNEL_STATE_NAPI (1 << 0) /* NAPI owns this channel */ -#define EFX_CHANNEL_STATE_POLL (1 << 1) /* poll owns this channel */ -#define EFX_CHANNEL_STATE_DISABLED (1 << 2) /* channel is disabled */ -#define EFX_CHANNEL_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this channel */ -#define EFX_CHANNEL_STATE_POLL_YIELD (1 << 4) /* poll yielded this channel */ -#define EFX_CHANNEL_OWNED \ - (EFX_CHANNEL_STATE_NAPI | EFX_CHANNEL_STATE_POLL) -#define EFX_CHANNEL_LOCKED \ - (EFX_CHANNEL_OWNED | EFX_CHANNEL_STATE_DISABLED) -#define EFX_CHANNEL_USER_PEND \ - (EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_POLL_YIELD) -#endif /* CONFIG_NET_RX_BUSY_POLL */ + unsigned long busy_poll_state; +#endif struct efx_special_buffer eventq; unsigned int eventq_mask; unsigned int eventq_read_ptr; @@ -480,98 +467,94 @@ struct efx_channel { }; #ifdef CONFIG_NET_RX_BUSY_POLL -static inline void efx_channel_init_lock(struct efx_channel *channel) +enum efx_channel_busy_poll_state { + EFX_CHANNEL_STATE_IDLE = 0, + EFX_CHANNEL_STATE_NAPI = BIT(0), + EFX_CHANNEL_STATE_NAPI_REQ_BIT = 1, + EFX_CHANNEL_STATE_NAPI_REQ = BIT(1), + EFX_CHANNEL_STATE_POLL_BIT = 2, + EFX_CHANNEL_STATE_POLL = BIT(2), + EFX_CHANNEL_STATE_DISABLE_BIT = 3, +}; + +static inline void efx_channel_busy_poll_init(struct efx_channel *channel) { - spin_lock_init(&channel->state_lock); + WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE); } /* Called from the device poll routine to get ownership of a channel. */ static inline bool efx_channel_lock_napi(struct efx_channel *channel) { - bool rc = true; - - spin_lock_bh(&channel->state_lock); - if (channel->state & EFX_CHANNEL_LOCKED) { - WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI); - channel->state |= EFX_CHANNEL_STATE_NAPI_YIELD; - rc = false; - } else { - /* we don't care if someone yielded */ - channel->state = EFX_CHANNEL_STATE_NAPI; + unsigned long prev, old = READ_ONCE(channel->busy_poll_state); + + while (1) { + switch (old) { + case EFX_CHANNEL_STATE_POLL: + /* Ensure efx_channel_try_lock_poll() wont starve us */ + set_bit(EFX_CHANNEL_STATE_NAPI_REQ_BIT, + &channel->busy_poll_state); + /* fallthrough */ + case EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_REQ: + return false; + default: + break; + } + prev = cmpxchg(&channel->busy_poll_state, old, + EFX_CHANNEL_STATE_NAPI); + if (unlikely(prev != old)) { + /* This is likely to mean we've just entered polling + * state. Go back round to set the REQ bit. + */ + old = prev; + continue; + } + return true; } - spin_unlock_bh(&channel->state_lock); - return rc; } static inline void efx_channel_unlock_napi(struct efx_channel *channel) { - spin_lock_bh(&channel->state_lock); - WARN_ON(channel->state & - (EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_YIELD)); - - channel->state &= EFX_CHANNEL_STATE_DISABLED; - spin_unlock_bh(&channel->state_lock); + /* Make sure write has completed from efx_channel_lock_napi() */ + smp_wmb(); + WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE); } /* Called from efx_busy_poll(). */ -static inline bool efx_channel_lock_poll(struct efx_channel *channel) +static inline bool efx_channel_try_lock_poll(struct efx_channel *channel) { - bool rc = true; - - spin_lock_bh(&channel->state_lock); - if ((channel->state & EFX_CHANNEL_LOCKED)) { - channel->state |= EFX_CHANNEL_STATE_POLL_YIELD; - rc = false; - } else { - /* preserve yield marks */ - channel->state |= EFX_CHANNEL_STATE_POLL; - } - spin_unlock_bh(&channel->state_lock); - return rc; + return cmpxchg(&channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE, + EFX_CHANNEL_STATE_POLL) == EFX_CHANNEL_STATE_IDLE; } -/* Returns true if NAPI tried to get the channel while it was locked. */ static inline void efx_channel_unlock_poll(struct efx_channel *channel) { - spin_lock_bh(&channel->state_lock); - WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI); - - /* will reset state to idle, unless channel is disabled */ - channel->state &= EFX_CHANNEL_STATE_DISABLED; - spin_unlock_bh(&channel->state_lock); + clear_bit_unlock(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state); } -/* True if a socket is polling, even if it did not get the lock. */ static inline bool efx_channel_busy_polling(struct efx_channel *channel) { - WARN_ON(!(channel->state & EFX_CHANNEL_OWNED)); - return channel->state & EFX_CHANNEL_USER_PEND; + return test_bit(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state); } static inline void efx_channel_enable(struct efx_channel *channel) { - spin_lock_bh(&channel->state_lock); - channel->state = EFX_CHANNEL_STATE_IDLE; - spin_unlock_bh(&channel->state_lock); + clear_bit_unlock(EFX_CHANNEL_STATE_DISABLE_BIT, + &channel->busy_poll_state); } -/* False if the channel is currently owned. */ +/* Stop further polling or napi access. + * Returns false if the channel is currently busy polling. + */ static inline bool efx_channel_disable(struct efx_channel *channel) { - bool rc = true; - - spin_lock_bh(&channel->state_lock); - if (channel->state & EFX_CHANNEL_OWNED) - rc = false; - channel->state |= EFX_CHANNEL_STATE_DISABLED; - spin_unlock_bh(&channel->state_lock); - - return rc; + set_bit(EFX_CHANNEL_STATE_DISABLE_BIT, &channel->busy_poll_state); + /* Implicit barrier in efx_channel_busy_polling() */ + return !efx_channel_busy_polling(channel); } #else /* CONFIG_NET_RX_BUSY_POLL */ -static inline void efx_channel_init_lock(struct efx_channel *channel) +static inline void efx_channel_busy_poll_init(struct efx_channel *channel) { } @@ -584,7 +567,7 @@ static inline void efx_channel_unlock_napi(struct efx_channel *channel) { } -static inline bool efx_channel_lock_poll(struct efx_channel *channel) +static inline bool efx_channel_try_lock_poll(struct efx_channel *channel) { return false; } diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 6ce973187225..062bce9acde6 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -4529,9 +4529,6 @@ static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info)); - info->regdump_len = cp->casreg_len < CAS_MAX_REGS ? - cp->casreg_len : CAS_MAX_REGS; - info->n_stats = CAS_NUM_STAT_KEYS; } static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index a9cac8413e49..14c9d1baa85c 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -2182,11 +2182,6 @@ bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(priv->pdev), sizeof(drvinfo->bus_info)); - - drvinfo->n_stats = ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0); - drvinfo->testinfo_len = 0; - drvinfo->regdump_len = 0; - drvinfo->eedump_len = 0; } /* diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index cba3d9fcb465..77d26fe286c0 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -899,7 +899,6 @@ static void cpmac_get_drvinfo(struct net_device *dev, strlcpy(info->driver, "cpmac", sizeof(info->driver)); strlcpy(info->version, CPMAC_VERSION, sizeof(info->version)); snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac"); - info->regdump_len = 0; } static const struct ethtool_ops cpmac_ethtool_ops = { diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 75584cc36339..040fbc1e5508 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -31,6 +31,7 @@ #include <linux/pm_runtime.h> #include <linux/gpio.h> #include <linux/of.h> +#include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/of_device.h> #include <linux/if_vlan.h> @@ -366,6 +367,7 @@ struct cpsw_priv { spinlock_t lock; struct platform_device *pdev; struct net_device *ndev; + struct device_node *phy_node; struct napi_struct napi_rx; struct napi_struct napi_tx; struct device *dev; @@ -1146,7 +1148,11 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); - slave->phy = phy_connect(priv->ndev, slave->data->phy_id, + if (priv->phy_node) + slave->phy = of_phy_connect(priv->ndev, priv->phy_node, + &cpsw_adjust_link, 0, slave->data->phy_if); + else + slave->phy = phy_connect(priv->ndev, slave->data->phy_id, &cpsw_adjust_link, slave->data->phy_if); if (IS_ERR(slave->phy)) { dev_err(priv->dev, "phy %s not found on slave %d\n", @@ -1784,7 +1790,6 @@ static void cpsw_get_drvinfo(struct net_device *ndev, strlcpy(info->driver, "cpsw", sizeof(info->driver)); strlcpy(info->version, "1.0", sizeof(info->version)); strlcpy(info->bus_info, priv->pdev->name, sizeof(info->bus_info)); - info->regdump_len = cpsw_get_regs_len(ndev); } static u32 cpsw_get_msglevel(struct net_device *ndev) @@ -1935,11 +1940,12 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, slave->port_vlan = data->dual_emac_res_vlan; } -static int cpsw_probe_dt(struct cpsw_platform_data *data, +static int cpsw_probe_dt(struct cpsw_priv *priv, struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct device_node *slave_node; + struct cpsw_platform_data *data = &priv->data; int i = 0, ret; u32 prop; @@ -2030,6 +2036,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, if (strcmp(slave_node->name, "slave")) continue; + priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0); parp = of_get_property(slave_node, "phy_id", &lenp); if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i); @@ -2045,7 +2052,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, } snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), PHY_ID_FMT, mdio->name, phyid); - slave_data->phy_if = of_get_phy_mode(slave_node); if (slave_data->phy_if < 0) { dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n", @@ -2246,7 +2252,7 @@ static int cpsw_probe(struct platform_device *pdev) /* Select default pin state */ pinctrl_pm_select_default_state(&pdev->dev); - if (cpsw_probe_dt(&priv->data, pdev)) { + if (cpsw_probe_dt(priv, pdev)) { dev_err(&pdev->dev, "cpsw: platform data missing\n"); ret = -ENODEV; goto clean_runtime_disable_ret; @@ -2584,17 +2590,7 @@ static struct platform_driver cpsw_driver = { .remove = cpsw_remove, }; -static int __init cpsw_init(void) -{ - return platform_driver_register(&cpsw_driver); -} -late_initcall(cpsw_init); - -static void __exit cpsw_exit(void) -{ - platform_driver_unregister(&cpsw_driver); -} -module_exit(cpsw_exit); +module_platform_driver(cpsw_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>"); diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 691ec936e88d..a274cd49afe9 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -791,7 +791,6 @@ static void tlan_get_drvinfo(struct net_device *dev, sizeof(info->bus_info)); else strlcpy(info->bus_info, "EISA", sizeof(info->bus_info)); - info->eedump_len = TLAN_EEPROM_SIZE; } static int tlan_get_eeprom_len(struct net_device *dev) diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index a83263743665..2b7550c43f78 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -2134,10 +2134,11 @@ static int rhine_rx(struct net_device *dev, int limit) } skb_put(skb, pkt_len); - skb->protocol = eth_type_trans(skb, dev); rhine_rx_vlan_tag(skb, desc, data_size); + skb->protocol = eth_type_trans(skb, dev); + netif_receive_skb(skb); u64_stats_update_begin(&rp->rx_stats.syncp); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index d95f9aae95e7..4684644703cc 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1135,7 +1135,6 @@ static void axienet_ethtools_get_drvinfo(struct net_device *ndev, { strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); - ed->regdump_len = sizeof(u32) * AXIENET_REGS_N; } /** diff --git a/drivers/net/fjes/fjes_ethtool.c b/drivers/net/fjes/fjes_ethtool.c index 0119dd199276..9c218e140c41 100644 --- a/drivers/net/fjes/fjes_ethtool.c +++ b/drivers/net/fjes/fjes_ethtool.c @@ -105,8 +105,6 @@ static void fjes_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->fw_version, "none", sizeof(drvinfo->fw_version)); snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info), "platform:%s", plat_dev->name); - drvinfo->regdump_len = 0; - drvinfo->eedump_len = 0; } static int fjes_get_settings(struct net_device *netdev, diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 8f5c02eed47d..445071c163cb 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -594,14 +594,12 @@ static struct rtable *geneve_get_rt(struct sk_buff *skb, rt = ip_route_output_key(geneve->net, fl4); if (IS_ERR(rt)) { netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr); - dev->stats.tx_carrier_errors++; - return rt; + return ERR_PTR(-ENETUNREACH); } if (rt->dst.dev == dev) { /* is this necessary? */ netdev_dbg(dev, "circular route to %pI4\n", &fl4->daddr); - dev->stats.collisions++; ip_rt_put(rt); - return ERR_PTR(-EINVAL); + return ERR_PTR(-ELOOP); } return rt; } @@ -627,12 +625,12 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) struct ip_tunnel_info *info = NULL; struct rtable *rt = NULL; const struct iphdr *iip; /* interior IP header */ + int err = -EINVAL; struct flowi4 fl4; __u8 tos, ttl; __be16 sport; bool udp_csum; __be16 df; - int err; if (geneve->collect_md) { info = skb_tunnel_info(skb); @@ -647,7 +645,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) rt = geneve_get_rt(skb, dev, &fl4, info); if (IS_ERR(rt)) { netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr); - dev->stats.tx_carrier_errors++; + err = PTR_ERR(rt); goto tx_error; } @@ -699,10 +697,37 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) tx_error: dev_kfree_skb(skb); err: - dev->stats.tx_errors++; + if (err == -ELOOP) + dev->stats.collisions++; + else if (err == -ENETUNREACH) + dev->stats.tx_carrier_errors++; + else + dev->stats.tx_errors++; return NETDEV_TX_OK; } +static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) +{ + struct ip_tunnel_info *info = skb_tunnel_info(skb); + struct geneve_dev *geneve = netdev_priv(dev); + struct rtable *rt; + struct flowi4 fl4; + + if (ip_tunnel_info_af(info) != AF_INET) + return -EINVAL; + + rt = geneve_get_rt(skb, dev, &fl4, info); + if (IS_ERR(rt)) + return PTR_ERR(rt); + + ip_rt_put(rt); + info->key.u.ipv4.src = fl4.saddr; + info->key.tp_src = udp_flow_src_port(geneve->net, skb, + 1, USHRT_MAX, true); + info->key.tp_dst = geneve->dst_port; + return 0; +} + static const struct net_device_ops geneve_netdev_ops = { .ndo_init = geneve_init, .ndo_uninit = geneve_uninit, @@ -713,6 +738,7 @@ static const struct net_device_ops geneve_netdev_ops = { .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, + .ndo_fill_metadata_dst = geneve_fill_metadata_dst, }; static void geneve_get_drvinfo(struct net_device *dev, @@ -870,14 +896,14 @@ static int geneve_newlink(struct net *net, struct net_device *dev, __be16 dst_port = htons(GENEVE_UDP_PORT); __u8 ttl = 0, tos = 0; bool metadata = false; - __be32 rem_addr; - __u32 vni; + __be32 rem_addr = 0; + __u32 vni = 0; - if (!data[IFLA_GENEVE_ID] || !data[IFLA_GENEVE_REMOTE]) - return -EINVAL; + if (data[IFLA_GENEVE_ID]) + vni = nla_get_u32(data[IFLA_GENEVE_ID]); - vni = nla_get_u32(data[IFLA_GENEVE_ID]); - rem_addr = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]); + if (data[IFLA_GENEVE_REMOTE]) + rem_addr = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]); if (data[IFLA_GENEVE_TTL]) ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 24f8dbcf854f..d50887e3df6d 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -348,7 +348,7 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb) struct rtable *rt; int err, ret = NET_XMIT_DROP; struct flowi4 fl4 = { - .flowi4_oif = dev_get_iflink(dev), + .flowi4_oif = dev->ifindex, .flowi4_tos = RT_TOS(ip4h->tos), .flowi4_flags = FLOWI_FLAG_ANYSRC, .daddr = ip4h->daddr, @@ -386,7 +386,7 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) struct dst_entry *dst; int err, ret = NET_XMIT_DROP; struct flowi6 fl6 = { - .flowi6_iif = skb->dev->ifindex, + .flowi6_iif = dev->ifindex, .daddr = ip6h->daddr, .saddr = ip6h->saddr, .flowi6_flags = FLOWI_FLAG_ANYSRC, diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 248478c6f6e4..197c93937c2d 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -137,7 +137,7 @@ static const struct proto_ops macvtap_socket_ops; #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ NETIF_F_TSO6 | NETIF_F_UFO) #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) -#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) +#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST) static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev) { diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 9d097ae54fb2..60994a83a0d6 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -141,6 +141,11 @@ config MICREL_PHY ---help--- Supports the KSZ9021, VSC8201, KS8001 PHYs. +config DP83848_PHY + tristate "Driver for Texas Instruments DP83848 PHY" + ---help--- + Supports the DP83848 PHY. + config DP83867_PHY tristate "Drivers for Texas Instruments DP83867 Gigabit PHY" ---help--- @@ -187,8 +192,6 @@ config MDIO_OCTEON busses. It is required by the Octeon and ThunderX ethernet device drivers. - If in doubt, say Y. - config MDIO_SUN4I tristate "Allwinner sun4i MDIO interface support" depends on ARCH_SUNXI diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 7655d47ad8d8..f31a4e25cf15 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -26,6 +26,7 @@ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o obj-$(CONFIG_NATIONAL_PHY) += national.o obj-$(CONFIG_DP83640_PHY) += dp83640.o +obj-$(CONFIG_DP83848_PHY) += dp83848.o obj-$(CONFIG_DP83867_PHY) += dp83867.o obj-$(CONFIG_STE10XP) += ste10Xp.o obj-$(CONFIG_MICREL_PHY) += micrel.o diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c index d6111affbcb6..f1936b7a7af6 100644 --- a/drivers/net/phy/aquantia.c +++ b/drivers/net/phy/aquantia.c @@ -171,20 +171,7 @@ static struct phy_driver aquantia_driver[] = { }, }; -static int __init aquantia_init(void) -{ - return phy_drivers_register(aquantia_driver, - ARRAY_SIZE(aquantia_driver)); -} - -static void __exit aquantia_exit(void) -{ - return phy_drivers_unregister(aquantia_driver, - ARRAY_SIZE(aquantia_driver)); -} - -module_init(aquantia_init); -module_exit(aquantia_exit); +module_phy_driver(aquantia_driver); static struct mdio_device_id __maybe_unused aquantia_tbl[] = { { PHY_ID_AQ1202, 0xfffffff0 }, diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c index dd79ea6ba023..ddb377e53633 100644 --- a/drivers/net/phy/bcm-phy-lib.c +++ b/drivers/net/phy/bcm-phy-lib.c @@ -15,6 +15,7 @@ #include <linux/brcmphy.h> #include <linux/export.h> #include <linux/mdio.h> +#include <linux/module.h> #include <linux/phy.h> #define MII_BCM_CHANNEL_WIDTH 0x2000 @@ -206,3 +207,7 @@ int bcm_phy_enable_eee(struct phy_device *phydev) return 0; } EXPORT_SYMBOL_GPL(bcm_phy_enable_eee); + +MODULE_DESCRIPTION("Broadcom PHY Library"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Broadcom Corporation"); diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c new file mode 100644 index 000000000000..5ce9bef54468 --- /dev/null +++ b/drivers/net/phy/dp83848.c @@ -0,0 +1,99 @@ +/* + * Driver for the Texas Instruments DP83848 PHY + * + * Copyright (C) 2015 Texas Instruments Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/phy.h> + +#define DP83848_PHY_ID 0x20005c90 + +/* Registers */ +#define DP83848_MICR 0x11 +#define DP83848_MISR 0x12 + +/* MICR Register Fields */ +#define DP83848_MICR_INT_OE BIT(0) /* Interrupt Output Enable */ +#define DP83848_MICR_INTEN BIT(1) /* Interrupt Enable */ + +/* MISR Register Fields */ +#define DP83848_MISR_RHF_INT_EN BIT(0) /* Receive Error Counter */ +#define DP83848_MISR_FHF_INT_EN BIT(1) /* False Carrier Counter */ +#define DP83848_MISR_ANC_INT_EN BIT(2) /* Auto-negotiation complete */ +#define DP83848_MISR_DUP_INT_EN BIT(3) /* Duplex Status */ +#define DP83848_MISR_SPD_INT_EN BIT(4) /* Speed status */ +#define DP83848_MISR_LINK_INT_EN BIT(5) /* Link status */ +#define DP83848_MISR_ED_INT_EN BIT(6) /* Energy detect */ +#define DP83848_MISR_LQM_INT_EN BIT(7) /* Link Quality Monitor */ + +static int dp83848_ack_interrupt(struct phy_device *phydev) +{ + int err = phy_read(phydev, DP83848_MISR); + + return err < 0 ? err : 0; +} + +static int dp83848_config_intr(struct phy_device *phydev) +{ + int err; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = phy_write(phydev, DP83848_MICR, + DP83848_MICR_INT_OE | + DP83848_MICR_INTEN); + if (err < 0) + return err; + + return phy_write(phydev, DP83848_MISR, + DP83848_MISR_ANC_INT_EN | + DP83848_MISR_DUP_INT_EN | + DP83848_MISR_SPD_INT_EN | + DP83848_MISR_LINK_INT_EN); + } + + return phy_write(phydev, DP83848_MICR, 0x0); +} + +static struct mdio_device_id __maybe_unused dp83848_tbl[] = { + { DP83848_PHY_ID, 0xfffffff0 }, + { } +}; +MODULE_DEVICE_TABLE(mdio, dp83848_tbl); + +static struct phy_driver dp83848_driver[] = { + { + .phy_id = DP83848_PHY_ID, + .phy_id_mask = 0xfffffff0, + .name = "TI DP83848", + .features = PHY_BASIC_FEATURES, + .flags = PHY_HAS_INTERRUPT, + + .soft_reset = genphy_soft_reset, + .config_init = genphy_config_init, + .suspend = genphy_suspend, + .resume = genphy_resume, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + + /* IRQ related */ + .ack_interrupt = dp83848_ack_interrupt, + .config_intr = dp83848_config_intr, + + .driver = { .owner = THIS_MODULE, }, + }, +}; +module_phy_driver(dp83848_driver); + +MODULE_DESCRIPTION("Texas Instruments DP83848 PHY driver"); +MODULE_AUTHOR("Andrew F. Davis <afd@ti.com"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 3bc9f03349f3..95f51d7267b3 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -25,7 +25,7 @@ #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/gpio.h> -#include <linux/mdio-gpio.h> +#include <linux/platform_data/mdio-gpio.h> #include <linux/of_gpio.h> #include <linux/of_mdio.h> diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 12f44c53cc8e..88cb4592b6fb 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -372,6 +372,33 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr) EXPORT_SYMBOL(mdiobus_scan); /** + * mdiobus_read_nested - Nested version of the mdiobus_read function + * @bus: the mii_bus struct + * @addr: the phy address + * @regnum: register number to read + * + * In case of nested MDIO bus access avoid lockdep false positives by + * using mutex_lock_nested(). + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + */ +int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum) +{ + int retval; + + BUG_ON(in_interrupt()); + + mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING); + retval = bus->read(bus, addr, regnum); + mutex_unlock(&bus->mdio_lock); + + return retval; +} +EXPORT_SYMBOL(mdiobus_read_nested); + +/** * mdiobus_read - Convenience function for reading a given MII mgmt register * @bus: the mii_bus struct * @addr: the phy address @@ -396,6 +423,34 @@ int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum) EXPORT_SYMBOL(mdiobus_read); /** + * mdiobus_write_nested - Nested version of the mdiobus_write function + * @bus: the mii_bus struct + * @addr: the phy address + * @regnum: register number to write + * @val: value to write to @regnum + * + * In case of nested MDIO bus access avoid lockdep false positives by + * using mutex_lock_nested(). + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + */ +int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val) +{ + int err; + + BUG_ON(in_interrupt()); + + mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING); + err = bus->write(bus, addr, regnum, val); + mutex_unlock(&bus->mdio_lock); + + return err; +} +EXPORT_SYMBOL(mdiobus_write_nested); + +/** * mdiobus_write - Convenience function for writing a given MII mgmt register * @bus: the mii_bus struct * @addr: the phy address diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 499185eaf413..cf6312fafea5 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -514,6 +514,27 @@ static int ksz8873mll_read_status(struct phy_device *phydev) return 0; } +static int ksz9031_read_status(struct phy_device *phydev) +{ + int err; + int regval; + + err = genphy_read_status(phydev); + if (err) + return err; + + /* Make sure the PHY is not broken. Read idle error count, + * and reset the PHY if it is maxed out. + */ + regval = phy_read(phydev, MII_STAT1000); + if ((regval & 0xFF) == 0xFF) { + phy_init_hw(phydev); + phydev->link = 0; + } + + return 0; +} + static int ksz8873mll_config_aneg(struct phy_device *phydev) { return 0; @@ -772,7 +793,7 @@ static struct phy_driver ksphy_driver[] = { .driver_data = &ksz9021_type, .config_init = ksz9031_config_init, .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, + .read_status = ksz9031_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, .suspend = genphy_suspend, diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index 70b08958763a..dc2da8770918 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -43,16 +43,25 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev) static int smsc_phy_config_init(struct phy_device *phydev) { + int __maybe_unused len; + struct device *dev __maybe_unused = &phydev->dev; + struct device_node *of_node __maybe_unused = dev->of_node; int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); + int enable_energy = 1; if (rc < 0) return rc; - /* Enable energy detect mode for this SMSC Transceivers */ - rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS, - rc | MII_LAN83C185_EDPWRDOWN); - if (rc < 0) - return rc; + if (of_find_property(of_node, "smsc,disable-energy-detect", &len)) + enable_energy = 0; + + if (enable_energy) { + /* Enable energy detect mode for this SMSC Transceivers */ + rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS, + rc | MII_LAN83C185_EDPWRDOWN); + if (rc < 0) + return rc; + } return smsc_phy_ack_interrupt(phydev); } diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c index 91e1bec6079f..07463fcca212 100644 --- a/drivers/net/phy/teranetics.c +++ b/drivers/net/phy/teranetics.c @@ -112,20 +112,7 @@ static struct phy_driver teranetics_driver[] = { }, }; -static int __init teranetics_init(void) -{ - return phy_drivers_register(teranetics_driver, - ARRAY_SIZE(teranetics_driver)); -} - -static void __exit teranetics_exit(void) -{ - return phy_drivers_unregister(teranetics_driver, - ARRAY_SIZE(teranetics_driver)); -} - -module_init(teranetics_init); -module_exit(teranetics_exit); +module_phy_driver(teranetics_driver); static struct mdio_device_id __maybe_unused teranetics_tbl[] = { { PHY_ID_TN2020, 0xffffffff }, diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 3837ae344f63..5e0b43283bce 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -313,7 +313,6 @@ static void pppoe_flush_dev(struct net_device *dev) if (po->pppoe_dev == dev && sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) { pppox_unbind_sock(sk); - sk->sk_state = PPPOX_ZOMBIE; sk->sk_state_change(sk); po->pppoe_dev = NULL; dev_put(dev); @@ -590,7 +589,7 @@ static int pppoe_release(struct socket *sock) po = pppox_sk(sk); - if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) { + if (po->pppoe_dev) { dev_put(po->pppoe_dev); po->pppoe_dev = NULL; } diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 3a8a36c8ded1..7f83504dfa69 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -166,6 +166,7 @@ config USB_NET_AX8817X * Aten UC210T * ASIX AX88172 * Billionton Systems, USB2AR + * Billionton Systems, GUSB2AM-1G-B * Buffalo LUA-U2-KTX * Corega FEther USB2-TX * D-Link DUB-E100 diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index a186b0a12d50..bd9acff1eb7b 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -588,7 +588,6 @@ void asix_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) usbnet_get_drvinfo(net, info); strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver)); strlcpy(info->version, DRIVER_VERSION, sizeof(info->version)); - info->eedump_len = AX_EEPROM_LEN; } int asix_set_mac_address(struct net_device *net, void *p) diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 1173a24feda3..5cabefc23494 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -959,6 +959,10 @@ static const struct usb_device_id products [] = { USB_DEVICE (0x08dd, 0x90ff), .driver_info = (unsigned long) &ax8817x_info, }, { + // Billionton Systems, GUSB2AM-1G-B + USB_DEVICE(0x08dd, 0x0114), + .driver_info = (unsigned long) &ax88178_info, +}, { // ATEN UC210T USB_DEVICE (0x0557, 0x2009), .driver_info = (unsigned long) &ax8817x_info, diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 6e9c344c7a20..0b4bdd39106b 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -258,7 +258,6 @@ static void dm9601_get_drvinfo(struct net_device *net, { /* Inherit standard device info */ usbnet_get_drvinfo(net, info); - info->eedump_len = DM_EEPROM_LEN; } static u32 dm9601_get_link(struct net_device *net) diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c index 82d844a8ebd0..4f345bd4e6e2 100644 --- a/drivers/net/usb/mcs7830.c +++ b/drivers/net/usb/mcs7830.c @@ -445,7 +445,6 @@ static int mcs7830_get_regs_len(struct net_device *net) static void mcs7830_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *drvinfo) { usbnet_get_drvinfo(net, drvinfo); - drvinfo->regdump_len = mcs7830_get_regs_len(net); } static void mcs7830_get_regs(struct net_device *net, struct ethtool_regs *regs, void *data) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 4752e69de00e..75ae756e93cf 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -711,6 +711,10 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */ {QMI_FIXED_INTF(0x1199, 0x9057, 8)}, {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ + {QMI_FIXED_INTF(0x1199, 0x9070, 8)}, /* Sierra Wireless MC74xx/EM74xx */ + {QMI_FIXED_INTF(0x1199, 0x9070, 10)}, /* Sierra Wireless MC74xx/EM74xx */ + {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx/EM74xx */ + {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx/EM74xx */ {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c index 953de13267df..a50df0d8fb9a 100644 --- a/drivers/net/usb/sr9800.c +++ b/drivers/net/usb/sr9800.c @@ -470,14 +470,10 @@ static int sr_get_eeprom(struct net_device *net, static void sr_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) { - struct usbnet *dev = netdev_priv(net); - struct sr_data *data = (struct sr_data *)&dev->data; - /* Inherit standard device info */ usbnet_get_drvinfo(net, info); strncpy(info->driver, DRIVER_NAME, sizeof(info->driver)); strncpy(info->version, DRIVER_VERSION, sizeof(info->version)); - info->eedump_len = data->eeprom_len; } static u32 sr_get_link(struct net_device *net) diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index a681569ae0b5..9ba11d737753 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -214,10 +214,6 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->n_stats = vmxnet3_get_sset_count(netdev, ETH_SS_STATS); - drvinfo->testinfo_len = 0; - drvinfo->eedump_len = 0; - drvinfo->regdump_len = vmxnet3_get_regs_len(netdev); } diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 191579aeab16..92fa3e1ea65c 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -30,6 +30,7 @@ #include <net/arp.h> #include <net/ip.h> #include <net/ip_fib.h> +#include <net/ip6_fib.h> #include <net/ip6_route.h> #include <net/rtnetlink.h> #include <net/route.h> @@ -57,6 +58,7 @@ struct slave_queue { struct net_vrf { struct slave_queue queue; struct rtable *rth; + struct rt6_info *rt6; u32 tb_id; }; @@ -104,12 +106,56 @@ static struct dst_ops vrf_dst_ops = { .default_advmss = vrf_default_advmss, }; +/* neighbor handling is done with actual device; do not want + * to flip skb->dev for those ndisc packets. This really fails + * for multiple next protocols (e.g., NEXTHDR_HOP). But it is + * a start. + */ +#if IS_ENABLED(CONFIG_IPV6) +static bool check_ipv6_frame(const struct sk_buff *skb) +{ + const struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb->data; + size_t hlen = sizeof(*ipv6h); + bool rc = true; + + if (skb->len < hlen) + goto out; + + if (ipv6h->nexthdr == NEXTHDR_ICMP) { + const struct icmp6hdr *icmph; + + if (skb->len < hlen + sizeof(*icmph)) + goto out; + + icmph = (struct icmp6hdr *)(skb->data + sizeof(*ipv6h)); + switch (icmph->icmp6_type) { + case NDISC_ROUTER_SOLICITATION: + case NDISC_ROUTER_ADVERTISEMENT: + case NDISC_NEIGHBOUR_SOLICITATION: + case NDISC_NEIGHBOUR_ADVERTISEMENT: + case NDISC_REDIRECT: + rc = false; + break; + } + } + +out: + return rc; +} +#else +static bool check_ipv6_frame(const struct sk_buff *skb) +{ + return false; +} +#endif + static bool is_ip_rx_frame(struct sk_buff *skb) { switch (skb->protocol) { case htons(ETH_P_IP): - case htons(ETH_P_IPV6): return true; + case htons(ETH_P_IPV6): + return check_ipv6_frame(skb); } return false; } @@ -169,12 +215,53 @@ static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev, return stats; } +#if IS_ENABLED(CONFIG_IPV6) +static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, + struct net_device *dev) +{ + const struct ipv6hdr *iph = ipv6_hdr(skb); + struct net *net = dev_net(skb->dev); + struct flowi6 fl6 = { + /* needed to match OIF rule */ + .flowi6_oif = dev->ifindex, + .flowi6_iif = LOOPBACK_IFINDEX, + .daddr = iph->daddr, + .saddr = iph->saddr, + .flowlabel = ip6_flowinfo(iph), + .flowi6_mark = skb->mark, + .flowi6_proto = iph->nexthdr, + .flowi6_flags = FLOWI_FLAG_L3MDEV_SRC | FLOWI_FLAG_SKIP_NH_OIF, + }; + int ret = NET_XMIT_DROP; + struct dst_entry *dst; + struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst; + + dst = ip6_route_output(net, NULL, &fl6); + if (dst == dst_null) + goto err; + + skb_dst_drop(skb); + skb_dst_set(skb, dst); + + ret = ip6_local_out(net, skb->sk, skb); + if (unlikely(net_xmit_eval(ret))) + dev->stats.tx_errors++; + else + ret = NET_XMIT_SUCCESS; + + return ret; +err: + vrf_tx_error(dev, skb); + return NET_XMIT_DROP; +} +#else static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, struct net_device *dev) { vrf_tx_error(dev, skb); return NET_XMIT_DROP; } +#endif static int vrf_send_v4_prep(struct sk_buff *skb, struct flowi4 *fl4, struct net_device *vrf_dev) @@ -269,6 +356,157 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) return ret; } +#if IS_ENABLED(CONFIG_IPV6) +static struct dst_entry *vrf_ip6_check(struct dst_entry *dst, u32 cookie) +{ + return dst; +} + +static struct dst_ops vrf_dst_ops6 = { + .family = AF_INET6, + .local_out = ip6_local_out, + .check = vrf_ip6_check, + .mtu = vrf_v4_mtu, + .destroy = vrf_dst_destroy, + .default_advmss = vrf_default_advmss, +}; + +static int init_dst_ops6_kmem_cachep(void) +{ + vrf_dst_ops6.kmem_cachep = kmem_cache_create("vrf_ip6_dst_cache", + sizeof(struct rt6_info), + 0, + SLAB_HWCACHE_ALIGN, + NULL); + + if (!vrf_dst_ops6.kmem_cachep) + return -ENOMEM; + + return 0; +} + +static void free_dst_ops6_kmem_cachep(void) +{ + kmem_cache_destroy(vrf_dst_ops6.kmem_cachep); +} + +static int vrf_input6(struct sk_buff *skb) +{ + skb->dev->stats.rx_errors++; + kfree_skb(skb); + return 0; +} + +/* modelled after ip6_finish_output2 */ +static int vrf_finish_output6(struct net *net, struct sock *sk, + struct sk_buff *skb) +{ + struct dst_entry *dst = skb_dst(skb); + struct net_device *dev = dst->dev; + struct neighbour *neigh; + struct in6_addr *nexthop; + int ret; + + skb->protocol = htons(ETH_P_IPV6); + skb->dev = dev; + + rcu_read_lock_bh(); + nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr); + neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop); + if (unlikely(!neigh)) + neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false); + if (!IS_ERR(neigh)) { + ret = dst_neigh_output(dst, neigh, skb); + rcu_read_unlock_bh(); + return ret; + } + rcu_read_unlock_bh(); + + IP6_INC_STATS(dev_net(dst->dev), + ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); + kfree_skb(skb); + return -EINVAL; +} + +/* modelled after ip6_output */ +static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb) +{ + return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, + net, sk, skb, NULL, skb_dst(skb)->dev, + vrf_finish_output6, + !(IP6CB(skb)->flags & IP6SKB_REROUTED)); +} + +static void vrf_rt6_destroy(struct net_vrf *vrf) +{ + dst_destroy(&vrf->rt6->dst); + free_percpu(vrf->rt6->rt6i_pcpu); + vrf->rt6 = NULL; +} + +static int vrf_rt6_create(struct net_device *dev) +{ + struct net_vrf *vrf = netdev_priv(dev); + struct dst_entry *dst; + struct rt6_info *rt6; + int cpu; + int rc = -ENOMEM; + + rt6 = dst_alloc(&vrf_dst_ops6, dev, 0, + DST_OBSOLETE_NONE, + (DST_HOST | DST_NOPOLICY | DST_NOXFRM)); + if (!rt6) + goto out; + + dst = &rt6->dst; + + rt6->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_KERNEL); + if (!rt6->rt6i_pcpu) { + dst_destroy(dst); + goto out; + } + for_each_possible_cpu(cpu) { + struct rt6_info **p = per_cpu_ptr(rt6->rt6i_pcpu, cpu); + *p = NULL; + } + + memset(dst + 1, 0, sizeof(*rt6) - sizeof(*dst)); + + INIT_LIST_HEAD(&rt6->rt6i_siblings); + INIT_LIST_HEAD(&rt6->rt6i_uncached); + + rt6->dst.input = vrf_input6; + rt6->dst.output = vrf_output6; + + rt6->rt6i_table = fib6_get_table(dev_net(dev), vrf->tb_id); + + atomic_set(&rt6->dst.__refcnt, 2); + + vrf->rt6 = rt6; + rc = 0; +out: + return rc; +} +#else +static int init_dst_ops6_kmem_cachep(void) +{ + return 0; +} + +static void free_dst_ops6_kmem_cachep(void) +{ +} + +static void vrf_rt6_destroy(struct net_vrf *vrf) +{ +} + +static int vrf_rt6_create(struct net_device *dev) +{ + return 0; +} +#endif + /* modelled after ip_finish_output2 */ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) { @@ -490,6 +728,7 @@ static void vrf_dev_uninit(struct net_device *dev) struct slave *slave, *next; vrf_rtable_destroy(vrf); + vrf_rt6_destroy(vrf); list_for_each_entry_safe(slave, next, head, list) vrf_del_slave(dev, slave->dev); @@ -513,10 +752,15 @@ static int vrf_dev_init(struct net_device *dev) if (!vrf->rth) goto out_stats; + if (vrf_rt6_create(dev) != 0) + goto out_rth; + dev->flags = IFF_MASTER | IFF_NOARP; return 0; +out_rth: + vrf_rtable_destroy(vrf); out_stats: free_percpu(dev->dstats); dev->dstats = NULL; @@ -586,10 +830,30 @@ static void vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4) fl4->flowi4_scope = scope; } +#if IS_ENABLED(CONFIG_IPV6) +static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev, + const struct flowi6 *fl6) +{ + struct rt6_info *rt = NULL; + + if (!(fl6->flowi6_flags & FLOWI_FLAG_L3MDEV_SRC)) { + struct net_vrf *vrf = netdev_priv(dev); + + rt = vrf->rt6; + atomic_inc(&rt->dst.__refcnt); + } + + return (struct dst_entry *)rt; +} +#endif + static const struct l3mdev_ops vrf_l3mdev_ops = { .l3mdev_fib_table = vrf_fib_table, .l3mdev_get_rtable = vrf_get_rtable, .l3mdev_get_saddr = vrf_get_saddr, +#if IS_ENABLED(CONFIG_IPV6) + .l3mdev_get_rt6_dst = vrf_get_rt6_dst, +#endif }; static void vrf_get_drvinfo(struct net_device *dev, @@ -731,6 +995,10 @@ static int __init vrf_init_module(void) if (!vrf_dst_ops.kmem_cachep) return -ENOMEM; + rc = init_dst_ops6_kmem_cachep(); + if (rc != 0) + goto error2; + register_netdevice_notifier(&vrf_notifier_block); rc = rtnl_link_register(&vrf_link_ops); @@ -741,6 +1009,8 @@ static int __init vrf_init_module(void) error: unregister_netdevice_notifier(&vrf_notifier_block); + free_dst_ops6_kmem_cachep(); +error2: kmem_cache_destroy(vrf_dst_ops.kmem_cachep); return rc; } @@ -750,6 +1020,7 @@ static void __exit vrf_cleanup_module(void) rtnl_link_unregister(&vrf_link_ops); unregister_netdevice_notifier(&vrf_notifier_block); kmem_cache_destroy(vrf_dst_ops.kmem_cachep); + free_dst_ops6_kmem_cachep(); } module_init(vrf_init_module); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index ce704df7681b..6369a5734d4c 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2360,6 +2360,46 @@ static int vxlan_change_mtu(struct net_device *dev, int new_mtu) return 0; } +static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb, + struct ip_tunnel_info *info, + __be16 sport, __be16 dport) +{ + struct vxlan_dev *vxlan = netdev_priv(dev); + struct rtable *rt; + struct flowi4 fl4; + + memset(&fl4, 0, sizeof(fl4)); + fl4.flowi4_tos = RT_TOS(info->key.tos); + fl4.flowi4_mark = skb->mark; + fl4.flowi4_proto = IPPROTO_UDP; + fl4.daddr = info->key.u.ipv4.dst; + + rt = ip_route_output_key(vxlan->net, &fl4); + if (IS_ERR(rt)) + return PTR_ERR(rt); + ip_rt_put(rt); + + info->key.u.ipv4.src = fl4.saddr; + info->key.tp_src = sport; + info->key.tp_dst = dport; + return 0; +} + +static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) +{ + struct vxlan_dev *vxlan = netdev_priv(dev); + struct ip_tunnel_info *info = skb_tunnel_info(skb); + __be16 sport, dport; + + sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, + vxlan->cfg.port_max, true); + dport = info->key.tp_dst ? : vxlan->cfg.dst_port; + + if (ip_tunnel_info_af(info) == AF_INET) + return egress_ipv4_tun_info(dev, skb, info, sport, dport); + return -EINVAL; +} + static const struct net_device_ops vxlan_netdev_ops = { .ndo_init = vxlan_init, .ndo_uninit = vxlan_uninit, @@ -2374,6 +2414,7 @@ static const struct net_device_ops vxlan_netdev_ops = { .ndo_fdb_add = vxlan_fdb_add, .ndo_fdb_del = vxlan_fdb_delete, .ndo_fdb_dump = vxlan_fdb_dump, + .ndo_fill_metadata_dst = vxlan_fill_metadata_dst, }; /* Info for udev, that this is a virtual tunnel endpoint */ @@ -2794,11 +2835,10 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, struct vxlan_config conf; int err; - if (!data[IFLA_VXLAN_ID]) - return -EINVAL; - memset(&conf, 0, sizeof(conf)); - conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]); + + if (data[IFLA_VXLAN_ID]) + conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]); if (data[IFLA_VXLAN_GROUP]) { conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]); diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index a63ab2e83105..f9f94229bf1b 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -214,8 +214,6 @@ config USB_NET_RNDIS_WLAN If you choose to build a module, it'll be called rndis_wlan. -source "drivers/net/wireless/rtl818x/Kconfig" - config ADM8211 tristate "ADMtek ADM8211 support" depends on MAC80211 && PCI @@ -243,6 +241,8 @@ config ADM8211 Thanks to Infineon-ADMtek for their support of this driver. +source "drivers/net/wireless/realtek/rtl818x/Kconfig" + config MAC80211_HWSIM tristate "Simulated radio testing tool for mac80211" depends on MAC80211 @@ -278,7 +278,8 @@ source "drivers/net/wireless/orinoco/Kconfig" source "drivers/net/wireless/p54/Kconfig" source "drivers/net/wireless/rt2x00/Kconfig" source "drivers/net/wireless/mediatek/Kconfig" -source "drivers/net/wireless/rtlwifi/Kconfig" +source "drivers/net/wireless/realtek/rtlwifi/Kconfig" +source "drivers/net/wireless/realtek/rtl8xxxu/Kconfig" source "drivers/net/wireless/ti/Kconfig" source "drivers/net/wireless/zd1211rw/Kconfig" source "drivers/net/wireless/mwifiex/Kconfig" diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 6b9e729dd8ac..740fdd353c5d 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -22,9 +22,7 @@ obj-$(CONFIG_HOSTAP) += hostap/ obj-$(CONFIG_B43) += b43/ obj-$(CONFIG_B43LEGACY) += b43legacy/ obj-$(CONFIG_ZD1211RW) += zd1211rw/ -obj-$(CONFIG_RTL8180) += rtl818x/ -obj-$(CONFIG_RTL8187) += rtl818x/ -obj-$(CONFIG_RTLWIFI) += rtlwifi/ +obj-$(CONFIG_WLAN) += realtek/ # 16-bit wireless PCMCIA client drivers obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h index df7c7616533b..7d3231acfb24 100644 --- a/drivers/net/wireless/ath/ath10k/bmi.h +++ b/drivers/net/wireless/ath/ath10k/bmi.h @@ -82,6 +82,16 @@ enum bmi_cmd_id { #define BMI_NVRAM_SEG_NAME_SZ 16 +#define BMI_PARAM_GET_EEPROM_BOARD_ID 0x10 + +#define ATH10K_BMI_BOARD_ID_FROM_OTP_MASK 0x7c00 +#define ATH10K_BMI_BOARD_ID_FROM_OTP_LSB 10 + +#define ATH10K_BMI_CHIP_ID_FROM_OTP_MASK 0x18000 +#define ATH10K_BMI_CHIP_ID_FROM_OTP_LSB 15 + +#define ATH10K_BMI_BOARD_ID_STATUS_MASK 0xff + struct bmi_cmd { __le32 id; /* enum bmi_cmd_id */ union { diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index cf28fbebaedc..84220c376308 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -413,7 +413,7 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) lockdep_assert_held(&ar_pci->ce_lock); if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0) - return -EIO; + return -ENOSPC; desc->addr = __cpu_to_le32(paddr); desc->nbytes = 0; @@ -1076,9 +1076,7 @@ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id) } int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, - const struct ce_attr *attr, - void (*send_cb)(struct ath10k_ce_pipe *), - void (*recv_cb)(struct ath10k_ce_pipe *)) + const struct ce_attr *attr) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; @@ -1104,10 +1102,10 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, ce_state->src_sz_max = attr->src_sz_max; if (attr->src_nentries) - ce_state->send_cb = send_cb; + ce_state->send_cb = attr->send_cb; if (attr->dest_nentries) - ce_state->recv_cb = recv_cb; + ce_state->recv_cb = attr->recv_cb; if (attr->src_nentries) { ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr); diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index 5c903e15dd65..dbb94fdb274b 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -209,9 +209,7 @@ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, const struct ce_attr *attr); void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id); int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, - const struct ce_attr *attr, - void (*send_cb)(struct ath10k_ce_pipe *), - void (*recv_cb)(struct ath10k_ce_pipe *)); + const struct ce_attr *attr); void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id); /*==================CE Engine Shutdown=======================*/ @@ -277,6 +275,9 @@ struct ce_attr { /* #entries in destination ring - Must be a power of 2 */ unsigned int dest_nentries; + + void (*send_cb)(struct ath10k_ce_pipe *); + void (*recv_cb)(struct ath10k_ce_pipe *); }; #define SR_BA_ADDRESS 0x0000 diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 879625adc63a..13de3617d5ab 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -448,6 +448,56 @@ out: return ret; } +static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) +{ + u32 result, address; + u8 board_id, chip_id; + int ret; + + address = ar->hw_params.patch_load_addr; + + if (!ar->otp_data || !ar->otp_len) { + ath10k_warn(ar, + "failed to retrieve board id because of invalid otp\n"); + return -ENODATA; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot upload otp to 0x%x len %zd for board id\n", + address, ar->otp_len); + + ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len); + if (ret) { + ath10k_err(ar, "could not write otp for board id check: %d\n", + ret); + return ret; + } + + ret = ath10k_bmi_execute(ar, address, BMI_PARAM_GET_EEPROM_BOARD_ID, + &result); + if (ret) { + ath10k_err(ar, "could not execute otp for board id check: %d\n", + ret); + return ret; + } + + board_id = MS(result, ATH10K_BMI_BOARD_ID_FROM_OTP); + chip_id = MS(result, ATH10K_BMI_CHIP_ID_FROM_OTP); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot get otp board id result 0x%08x board_id %d chip_id %d\n", + result, board_id, chip_id); + + if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0) + return -EOPNOTSUPP; + + ar->id.bmi_ids_valid = true; + ar->id.bmi_board_id = board_id; + ar->id.bmi_chip_id = chip_id; + + return 0; +} + static int ath10k_download_and_run_otp(struct ath10k *ar) { u32 result, address = ar->hw_params.patch_load_addr; @@ -486,8 +536,8 @@ static int ath10k_download_and_run_otp(struct ath10k *ar) ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result); if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT, - ar->fw_features)) - && result != 0) { + ar->fw_features)) && + result != 0) { ath10k_err(ar, "otp calibration failed: %d", result); return -EINVAL; } @@ -510,7 +560,7 @@ static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode) data_len = ar->firmware_len; mode_name = "normal"; ret = ath10k_swap_code_seg_configure(ar, - ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW); + ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW); if (ret) { ath10k_err(ar, "failed to configure fw code swap: %d\n", ret); @@ -541,11 +591,18 @@ static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode) return ret; } -static void ath10k_core_free_firmware_files(struct ath10k *ar) +static void ath10k_core_free_board_files(struct ath10k *ar) { if (!IS_ERR(ar->board)) release_firmware(ar->board); + ar->board = NULL; + ar->board_data = NULL; + ar->board_len = 0; +} + +static void ath10k_core_free_firmware_files(struct ath10k *ar) +{ if (!IS_ERR(ar->otp)) release_firmware(ar->otp); @@ -557,10 +614,6 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar) ath10k_swap_code_seg_release(ar); - ar->board = NULL; - ar->board_data = NULL; - ar->board_len = 0; - ar->otp = NULL; ar->otp_data = NULL; ar->otp_len = 0; @@ -570,7 +623,6 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar) ar->firmware_len = 0; ar->cal_file = NULL; - } static int ath10k_fetch_cal_file(struct ath10k *ar) @@ -592,68 +644,251 @@ static int ath10k_fetch_cal_file(struct ath10k *ar) return 0; } -static int ath10k_core_fetch_spec_board_file(struct ath10k *ar) +static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar) { - char filename[100]; - - scnprintf(filename, sizeof(filename), "board-%s-%s.bin", - ath10k_bus_str(ar->hif.bus), ar->spec_board_id); + if (!ar->hw_params.fw.board) { + ath10k_err(ar, "failed to find board file fw entry\n"); + return -EINVAL; + } - ar->board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, filename); + ar->board = ath10k_fetch_fw_file(ar, + ar->hw_params.fw.dir, + ar->hw_params.fw.board); if (IS_ERR(ar->board)) return PTR_ERR(ar->board); ar->board_data = ar->board->data; ar->board_len = ar->board->size; - ar->spec_board_loaded = true; return 0; } -static int ath10k_core_fetch_generic_board_file(struct ath10k *ar) +static int ath10k_core_parse_bd_ie_board(struct ath10k *ar, + const void *buf, size_t buf_len, + const char *boardname) { - if (!ar->hw_params.fw.board) { - ath10k_err(ar, "failed to find board file fw entry\n"); - return -EINVAL; + const struct ath10k_fw_ie *hdr; + bool name_match_found; + int ret, board_ie_id; + size_t board_ie_len; + const void *board_ie_data; + + name_match_found = false; + + /* go through ATH10K_BD_IE_BOARD_ elements */ + while (buf_len > sizeof(struct ath10k_fw_ie)) { + hdr = buf; + board_ie_id = le32_to_cpu(hdr->id); + board_ie_len = le32_to_cpu(hdr->len); + board_ie_data = hdr->data; + + buf_len -= sizeof(*hdr); + buf += sizeof(*hdr); + + if (buf_len < ALIGN(board_ie_len, 4)) { + ath10k_err(ar, "invalid ATH10K_BD_IE_BOARD length: %zu < %zu\n", + buf_len, ALIGN(board_ie_len, 4)); + ret = -EINVAL; + goto out; + } + + switch (board_ie_id) { + case ATH10K_BD_IE_BOARD_NAME: + ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "board name", "", + board_ie_data, board_ie_len); + + if (board_ie_len != strlen(boardname)) + break; + + ret = memcmp(board_ie_data, boardname, strlen(boardname)); + if (ret) + break; + + name_match_found = true; + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot found match for name '%s'", + boardname); + break; + case ATH10K_BD_IE_BOARD_DATA: + if (!name_match_found) + /* no match found */ + break; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot found board data for '%s'", + boardname); + + ar->board_data = board_ie_data; + ar->board_len = board_ie_len; + + ret = 0; + goto out; + default: + ath10k_warn(ar, "unknown ATH10K_BD_IE_BOARD found: %d\n", + board_ie_id); + break; + } + + /* jump over the padding */ + board_ie_len = ALIGN(board_ie_len, 4); + + buf_len -= board_ie_len; + buf += board_ie_len; } - ar->board = ath10k_fetch_fw_file(ar, - ar->hw_params.fw.dir, - ar->hw_params.fw.board); + /* no match found */ + ret = -ENOENT; + +out: + return ret; +} + +static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar, + const char *boardname, + const char *filename) +{ + size_t len, magic_len, ie_len; + struct ath10k_fw_ie *hdr; + const u8 *data; + int ret, ie_id; + + ar->board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, filename); if (IS_ERR(ar->board)) return PTR_ERR(ar->board); - ar->board_data = ar->board->data; - ar->board_len = ar->board->size; - ar->spec_board_loaded = false; + data = ar->board->data; + len = ar->board->size; + + /* magic has extra null byte padded */ + magic_len = strlen(ATH10K_BOARD_MAGIC) + 1; + if (len < magic_len) { + ath10k_err(ar, "failed to find magic value in %s/%s, file too short: %zu\n", + ar->hw_params.fw.dir, filename, len); + ret = -EINVAL; + goto err; + } + + if (memcmp(data, ATH10K_BOARD_MAGIC, magic_len)) { + ath10k_err(ar, "found invalid board magic\n"); + ret = -EINVAL; + goto err; + } + + /* magic is padded to 4 bytes */ + magic_len = ALIGN(magic_len, 4); + if (len < magic_len) { + ath10k_err(ar, "failed: %s/%s too small to contain board data, len: %zu\n", + ar->hw_params.fw.dir, filename, len); + ret = -EINVAL; + goto err; + } + + data += magic_len; + len -= magic_len; + + while (len > sizeof(struct ath10k_fw_ie)) { + hdr = (struct ath10k_fw_ie *)data; + ie_id = le32_to_cpu(hdr->id); + ie_len = le32_to_cpu(hdr->len); + + len -= sizeof(*hdr); + data = hdr->data; + + if (len < ALIGN(ie_len, 4)) { + ath10k_err(ar, "invalid length for board ie_id %d ie_len %zu len %zu\n", + ie_id, ie_len, len); + ret = -EINVAL; + goto err; + } + + switch (ie_id) { + case ATH10K_BD_IE_BOARD: + ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len, + boardname); + if (ret == -ENOENT) + /* no match found, continue */ + break; + else if (ret) + /* there was an error, bail out */ + goto err; + + /* board data found */ + goto out; + } + + /* jump over the padding */ + ie_len = ALIGN(ie_len, 4); + + len -= ie_len; + data += ie_len; + } + +out: + if (!ar->board_data || !ar->board_len) { + ath10k_err(ar, + "failed to fetch board data for %s from %s/%s\n", + ar->hw_params.fw.dir, boardname, filename); + ret = -ENODATA; + goto err; + } + + return 0; + +err: + ath10k_core_free_board_files(ar); + return ret; +} + +static int ath10k_core_create_board_name(struct ath10k *ar, char *name, + size_t name_len) +{ + if (ar->id.bmi_ids_valid) { + scnprintf(name, name_len, + "bus=%s,bmi-chip-id=%d,bmi-board-id=%d", + ath10k_bus_str(ar->hif.bus), + ar->id.bmi_chip_id, + ar->id.bmi_board_id); + goto out; + } + + scnprintf(name, name_len, + "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x", + ath10k_bus_str(ar->hif.bus), + ar->id.vendor, ar->id.device, + ar->id.subsystem_vendor, ar->id.subsystem_device); + +out: + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using board name '%s'\n", name); return 0; } static int ath10k_core_fetch_board_file(struct ath10k *ar) { + char boardname[100]; int ret; - if (strlen(ar->spec_board_id) > 0) { - ret = ath10k_core_fetch_spec_board_file(ar); - if (ret) { - ath10k_info(ar, "failed to load spec board file, falling back to generic: %d\n", - ret); - goto generic; - } - - ath10k_dbg(ar, ATH10K_DBG_BOOT, "found specific board file for %s\n", - ar->spec_board_id); - return 0; + ret = ath10k_core_create_board_name(ar, boardname, sizeof(boardname)); + if (ret) { + ath10k_err(ar, "failed to create board name: %d", ret); + return ret; } -generic: - ret = ath10k_core_fetch_generic_board_file(ar); + ar->bd_api = 2; + ret = ath10k_core_fetch_board_data_api_n(ar, boardname, + ATH10K_BOARD_API2_FILE); + if (!ret) + goto success; + + ar->bd_api = 1; + ret = ath10k_core_fetch_board_data_api_1(ar); if (ret) { - ath10k_err(ar, "failed to fetch generic board data: %d\n", ret); + ath10k_err(ar, "failed to fetch board data\n"); return ret; } +success: + ath10k_dbg(ar, ATH10K_DBG_BOOT, "using board api %d\n", ar->bd_api); return 0; } @@ -885,12 +1120,6 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar) /* calibration file is optional, don't check for any errors */ ath10k_fetch_cal_file(ar); - ret = ath10k_core_fetch_board_file(ar); - if (ret) { - ath10k_err(ar, "failed to fetch board file: %d\n", ret); - return ret; - } - ar->fw_api = 5; ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); @@ -1263,10 +1492,10 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) goto err; /* Some of of qca988x solutions are having global reset issue - * during target initialization. Bypassing PLL setting before - * downloading firmware and letting the SoC run on REF_CLK is - * fixing the problem. Corresponding firmware change is also needed - * to set the clock source once the target is initialized. + * during target initialization. Bypassing PLL setting before + * downloading firmware and letting the SoC run on REF_CLK is + * fixing the problem. Corresponding firmware change is also needed + * to set the clock source once the target is initialized. */ if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT, ar->fw_features)) { @@ -1500,6 +1729,19 @@ static int ath10k_core_probe_fw(struct ath10k *ar) goto err_power_down; } + ret = ath10k_core_get_board_id_from_otp(ar); + if (ret && ret != -EOPNOTSUPP) { + ath10k_err(ar, "failed to get board id from otp for qca99x0: %d\n", + ret); + return ret; + } + + ret = ath10k_core_fetch_board_file(ar); + if (ret) { + ath10k_err(ar, "failed to fetch board file: %d\n", ret); + goto err_free_firmware_files; + } + ret = ath10k_core_init_firmware_features(ar); if (ret) { ath10k_err(ar, "fatal problem with firmware features: %d\n", @@ -1627,6 +1869,7 @@ void ath10k_core_unregister(struct ath10k *ar) ath10k_testmode_destroy(ar); ath10k_core_free_firmware_files(ar); + ath10k_core_free_board_files(ar); ath10k_debug_unregister(ar); } diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 04e040a06cb1..7cc7cdd56c95 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -250,6 +250,30 @@ struct ath10k_fw_stats { struct list_head peers; }; +#define ATH10K_TPC_TABLE_TYPE_FLAG 1 +#define ATH10K_TPC_PREAM_TABLE_END 0xFFFF + +struct ath10k_tpc_table { + u32 pream_idx[WMI_TPC_RATE_MAX]; + u8 rate_code[WMI_TPC_RATE_MAX]; + char tpc_value[WMI_TPC_RATE_MAX][WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE]; +}; + +struct ath10k_tpc_stats { + u32 reg_domain; + u32 chan_freq; + u32 phy_mode; + u32 twice_antenna_reduction; + u32 twice_max_rd_power; + s32 twice_antenna_gain; + u32 power_limit; + u32 num_tx_chain; + u32 ctl; + u32 rate_max; + u8 flag[WMI_TPC_FLAG]; + struct ath10k_tpc_table tpc_table[WMI_TPC_FLAG]; +}; + struct ath10k_dfs_stats { u32 phy_errors; u32 pulses_total; @@ -378,6 +402,11 @@ struct ath10k_debug { struct ath10k_dfs_stats dfs_stats; struct ath_dfs_pool_stats dfs_pool_stats; + /* used for tpc-dump storage, protected by data-lock */ + struct ath10k_tpc_stats *tpc_stats; + + struct completion tpc_complete; + /* protected by conf_mutex */ u32 fw_dbglog_mask; u32 fw_dbglog_level; @@ -647,10 +676,19 @@ struct ath10k { struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info; } swap; - char spec_board_id[100]; - bool spec_board_loaded; + struct { + u32 vendor; + u32 device; + u32 subsystem_vendor; + u32 subsystem_device; + + bool bmi_ids_valid; + u8 bmi_board_id; + u8 bmi_chip_id; + } id; int fw_api; + int bd_api; enum ath10k_cal_mode cal_mode; struct { diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index bf033f46f8aa..6cc1aa3449c8 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -125,19 +125,25 @@ EXPORT_SYMBOL(ath10k_info); void ath10k_print_driver_info(struct ath10k *ar) { char fw_features[128] = {}; + char boardinfo[100]; ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features)); - ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d features %s\n", + if (ar->id.bmi_ids_valid) + scnprintf(boardinfo, sizeof(boardinfo), "bmi %d:%d", + ar->id.bmi_chip_id, ar->id.bmi_board_id); + else + scnprintf(boardinfo, sizeof(boardinfo), "sub %04x:%04x", + ar->id.subsystem_vendor, ar->id.subsystem_device); + + ath10k_info(ar, "%s (0x%08x, 0x%08x %s) fw %s fwapi %d bdapi %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d features %s\n", ar->hw_params.name, ar->target_version, ar->chip_id, - (strlen(ar->spec_board_id) > 0 ? ", " : ""), - ar->spec_board_id, - (strlen(ar->spec_board_id) > 0 && !ar->spec_board_loaded - ? " fallback" : ""), + boardinfo, ar->hw->wiphy->fw_version, ar->fw_api, + ar->bd_api, ar->htt.target_version_major, ar->htt.target_version_minor, ar->wmi.op_version, @@ -285,28 +291,6 @@ static void ath10k_debug_fw_stats_reset(struct ath10k *ar) spin_unlock_bh(&ar->data_lock); } -static size_t ath10k_debug_fw_stats_num_peers(struct list_head *head) -{ - struct ath10k_fw_stats_peer *i; - size_t num = 0; - - list_for_each_entry(i, head, list) - ++num; - - return num; -} - -static size_t ath10k_debug_fw_stats_num_vdevs(struct list_head *head) -{ - struct ath10k_fw_stats_vdev *i; - size_t num = 0; - - list_for_each_entry(i, head, list) - ++num; - - return num; -} - void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_fw_stats stats = {}; @@ -343,8 +327,8 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) goto free; } - num_peers = ath10k_debug_fw_stats_num_peers(&ar->debug.fw_stats.peers); - num_vdevs = ath10k_debug_fw_stats_num_vdevs(&ar->debug.fw_stats.vdevs); + num_peers = ath10k_wmi_fw_stats_num_peers(&ar->debug.fw_stats.peers); + num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&ar->debug.fw_stats.vdevs); is_start = (list_empty(&ar->debug.fw_stats.pdevs) && !list_empty(&stats.pdevs)); is_end = (!list_empty(&ar->debug.fw_stats.pdevs) && @@ -429,240 +413,6 @@ static int ath10k_debug_fw_stats_request(struct ath10k *ar) return 0; } -/* FIXME: How to calculate the buffer size sanely? */ -#define ATH10K_FW_STATS_BUF_SIZE (1024*1024) - -static void ath10k_fw_stats_fill(struct ath10k *ar, - struct ath10k_fw_stats *fw_stats, - char *buf) -{ - unsigned int len = 0; - unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE; - const struct ath10k_fw_stats_pdev *pdev; - const struct ath10k_fw_stats_vdev *vdev; - const struct ath10k_fw_stats_peer *peer; - size_t num_peers; - size_t num_vdevs; - int i; - - spin_lock_bh(&ar->data_lock); - - pdev = list_first_entry_or_null(&fw_stats->pdevs, - struct ath10k_fw_stats_pdev, list); - if (!pdev) { - ath10k_warn(ar, "failed to get pdev stats\n"); - goto unlock; - } - - num_peers = ath10k_debug_fw_stats_num_peers(&fw_stats->peers); - num_vdevs = ath10k_debug_fw_stats_num_vdevs(&fw_stats->vdevs); - - len += scnprintf(buf + len, buf_len - len, "\n"); - len += scnprintf(buf + len, buf_len - len, "%30s\n", - "ath10k PDEV stats"); - len += scnprintf(buf + len, buf_len - len, "%30s\n\n", - "================="); - - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Channel noise floor", pdev->ch_noise_floor); - len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "Channel TX power", pdev->chan_tx_power); - len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "TX frame count", pdev->tx_frame_count); - len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "RX frame count", pdev->rx_frame_count); - len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "RX clear count", pdev->rx_clear_count); - len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "Cycle count", pdev->cycle_count); - len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "PHY error count", pdev->phy_err_count); - len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "RTS bad count", pdev->rts_bad); - len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "RTS good count", pdev->rts_good); - len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "FCS bad count", pdev->fcs_bad); - len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "No beacon count", pdev->no_beacons); - len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "MIB int count", pdev->mib_int_count); - - len += scnprintf(buf + len, buf_len - len, "\n"); - len += scnprintf(buf + len, buf_len - len, "%30s\n", - "ath10k PDEV TX stats"); - len += scnprintf(buf + len, buf_len - len, "%30s\n\n", - "================="); - - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "HTT cookies queued", pdev->comp_queued); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "HTT cookies disp.", pdev->comp_delivered); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MSDU queued", pdev->msdu_enqued); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MPDU queued", pdev->mpdu_enqued); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MSDUs dropped", pdev->wmm_drop); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Local enqued", pdev->local_enqued); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Local freed", pdev->local_freed); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "HW queued", pdev->hw_queued); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "PPDUs reaped", pdev->hw_reaped); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Num underruns", pdev->underrun); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "PPDUs cleaned", pdev->tx_abort); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MPDUs requed", pdev->mpdus_requed); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Excessive retries", pdev->tx_ko); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "HW rate", pdev->data_rc); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Sched self tiggers", pdev->self_triggers); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Dropped due to SW retries", - pdev->sw_retry_failure); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Illegal rate phy errors", - pdev->illgl_rate_phy_err); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Pdev continous xretry", pdev->pdev_cont_xretry); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "TX timeout", pdev->pdev_tx_timeout); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "PDEV resets", pdev->pdev_resets); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "PHY underrun", pdev->phy_underrun); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MPDU is more than txop limit", pdev->txop_ovf); - - len += scnprintf(buf + len, buf_len - len, "\n"); - len += scnprintf(buf + len, buf_len - len, "%30s\n", - "ath10k PDEV RX stats"); - len += scnprintf(buf + len, buf_len - len, "%30s\n\n", - "================="); - - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Mid PPDU route change", - pdev->mid_ppdu_route_change); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Tot. number of statuses", pdev->status_rcvd); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Extra frags on rings 0", pdev->r0_frags); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Extra frags on rings 1", pdev->r1_frags); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Extra frags on rings 2", pdev->r2_frags); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Extra frags on rings 3", pdev->r3_frags); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MSDUs delivered to HTT", pdev->htt_msdus); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MPDUs delivered to HTT", pdev->htt_mpdus); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MSDUs delivered to stack", pdev->loc_msdus); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MPDUs delivered to stack", pdev->loc_mpdus); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Oversized AMSUs", pdev->oversize_amsdu); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "PHY errors", pdev->phy_errs); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "PHY errors drops", pdev->phy_err_drop); - len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs); - - len += scnprintf(buf + len, buf_len - len, "\n"); - len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", - "ath10k VDEV stats", num_vdevs); - len += scnprintf(buf + len, buf_len - len, "%30s\n\n", - "================="); - - list_for_each_entry(vdev, &fw_stats->vdevs, list) { - len += scnprintf(buf + len, buf_len - len, "%30s %u\n", - "vdev id", vdev->vdev_id); - len += scnprintf(buf + len, buf_len - len, "%30s %u\n", - "beacon snr", vdev->beacon_snr); - len += scnprintf(buf + len, buf_len - len, "%30s %u\n", - "data snr", vdev->data_snr); - len += scnprintf(buf + len, buf_len - len, "%30s %u\n", - "num rx frames", vdev->num_rx_frames); - len += scnprintf(buf + len, buf_len - len, "%30s %u\n", - "num rts fail", vdev->num_rts_fail); - len += scnprintf(buf + len, buf_len - len, "%30s %u\n", - "num rts success", vdev->num_rts_success); - len += scnprintf(buf + len, buf_len - len, "%30s %u\n", - "num rx err", vdev->num_rx_err); - len += scnprintf(buf + len, buf_len - len, "%30s %u\n", - "num rx discard", vdev->num_rx_discard); - len += scnprintf(buf + len, buf_len - len, "%30s %u\n", - "num tx not acked", vdev->num_tx_not_acked); - - for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++) - len += scnprintf(buf + len, buf_len - len, - "%25s [%02d] %u\n", - "num tx frames", i, - vdev->num_tx_frames[i]); - - for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++) - len += scnprintf(buf + len, buf_len - len, - "%25s [%02d] %u\n", - "num tx frames retries", i, - vdev->num_tx_frames_retries[i]); - - for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++) - len += scnprintf(buf + len, buf_len - len, - "%25s [%02d] %u\n", - "num tx frames failures", i, - vdev->num_tx_frames_failures[i]); - - for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++) - len += scnprintf(buf + len, buf_len - len, - "%25s [%02d] 0x%08x\n", - "tx rate history", i, - vdev->tx_rate_history[i]); - - for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++) - len += scnprintf(buf + len, buf_len - len, - "%25s [%02d] %u\n", - "beacon rssi history", i, - vdev->beacon_rssi_history[i]); - - len += scnprintf(buf + len, buf_len - len, "\n"); - } - - len += scnprintf(buf + len, buf_len - len, "\n"); - len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", - "ath10k PEER stats", num_peers); - len += scnprintf(buf + len, buf_len - len, "%30s\n\n", - "================="); - - list_for_each_entry(peer, &fw_stats->peers, list) { - len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", - "Peer MAC address", peer->peer_macaddr); - len += scnprintf(buf + len, buf_len - len, "%30s %u\n", - "Peer RSSI", peer->peer_rssi); - len += scnprintf(buf + len, buf_len - len, "%30s %u\n", - "Peer TX rate", peer->peer_tx_rate); - len += scnprintf(buf + len, buf_len - len, "%30s %u\n", - "Peer RX rate", peer->peer_rx_rate); - len += scnprintf(buf + len, buf_len - len, "\n"); - } - -unlock: - spin_unlock_bh(&ar->data_lock); - - if (len >= buf_len) - buf[len - 1] = 0; - else - buf[len] = 0; -} - static int ath10k_fw_stats_open(struct inode *inode, struct file *file) { struct ath10k *ar = inode->i_private; @@ -688,7 +438,12 @@ static int ath10k_fw_stats_open(struct inode *inode, struct file *file) goto err_free; } - ath10k_fw_stats_fill(ar, &ar->debug.fw_stats, buf); + ret = ath10k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, buf); + if (ret) { + ath10k_warn(ar, "failed to fill fw stats: %d\n", ret); + goto err_free; + } + file->private_data = buf; mutex_unlock(&ar->conf_mutex); @@ -1843,6 +1598,233 @@ static const struct file_operations fops_nf_cal_period = { .llseek = default_llseek, }; +#define ATH10K_TPC_CONFIG_BUF_SIZE (1024 * 1024) + +static int ath10k_debug_tpc_stats_request(struct ath10k *ar) +{ + int ret; + unsigned long time_left; + + lockdep_assert_held(&ar->conf_mutex); + + reinit_completion(&ar->debug.tpc_complete); + + ret = ath10k_wmi_pdev_get_tpc_config(ar, WMI_TPC_CONFIG_PARAM); + if (ret) { + ath10k_warn(ar, "failed to request tpc config: %d\n", ret); + return ret; + } + + time_left = wait_for_completion_timeout(&ar->debug.tpc_complete, + 1 * HZ); + if (time_left == 0) + return -ETIMEDOUT; + + return 0; +} + +void ath10k_debug_tpc_stats_process(struct ath10k *ar, + struct ath10k_tpc_stats *tpc_stats) +{ + spin_lock_bh(&ar->data_lock); + + kfree(ar->debug.tpc_stats); + ar->debug.tpc_stats = tpc_stats; + complete(&ar->debug.tpc_complete); + + spin_unlock_bh(&ar->data_lock); +} + +static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats, + unsigned int j, char *buf, unsigned int *len) +{ + unsigned int i, buf_len; + static const char table_str[][5] = { "CDD", + "STBC", + "TXBF" }; + static const char pream_str[][6] = { "CCK", + "OFDM", + "HT20", + "HT40", + "VHT20", + "VHT40", + "VHT80", + "HTCUP" }; + + buf_len = ATH10K_TPC_CONFIG_BUF_SIZE; + *len += scnprintf(buf + *len, buf_len - *len, + "********************************\n"); + *len += scnprintf(buf + *len, buf_len - *len, + "******************* %s POWER TABLE ****************\n", + table_str[j]); + *len += scnprintf(buf + *len, buf_len - *len, + "********************************\n"); + *len += scnprintf(buf + *len, buf_len - *len, + "No. Preamble Rate_code tpc_value1 tpc_value2 tpc_value3\n"); + + for (i = 0; i < tpc_stats->rate_max; i++) { + *len += scnprintf(buf + *len, buf_len - *len, + "%8d %s 0x%2x %s\n", i, + pream_str[tpc_stats->tpc_table[j].pream_idx[i]], + tpc_stats->tpc_table[j].rate_code[i], + tpc_stats->tpc_table[j].tpc_value[i]); + } + + *len += scnprintf(buf + *len, buf_len - *len, + "***********************************\n"); +} + +static void ath10k_tpc_stats_fill(struct ath10k *ar, + struct ath10k_tpc_stats *tpc_stats, + char *buf) +{ + unsigned int len, j, buf_len; + + len = 0; + buf_len = ATH10K_TPC_CONFIG_BUF_SIZE; + + spin_lock_bh(&ar->data_lock); + + if (!tpc_stats) { + ath10k_warn(ar, "failed to get tpc stats\n"); + goto unlock; + } + + len += scnprintf(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, + "*************************************\n"); + len += scnprintf(buf + len, buf_len - len, + "TPC config for channel %4d mode %d\n", + tpc_stats->chan_freq, + tpc_stats->phy_mode); + len += scnprintf(buf + len, buf_len - len, + "*************************************\n"); + len += scnprintf(buf + len, buf_len - len, + "CTL = 0x%2x Reg. Domain = %2d\n", + tpc_stats->ctl, + tpc_stats->reg_domain); + len += scnprintf(buf + len, buf_len - len, + "Antenna Gain = %2d Reg. Max Antenna Gain = %2d\n", + tpc_stats->twice_antenna_gain, + tpc_stats->twice_antenna_reduction); + len += scnprintf(buf + len, buf_len - len, + "Power Limit = %2d Reg. Max Power = %2d\n", + tpc_stats->power_limit, + tpc_stats->twice_max_rd_power / 2); + len += scnprintf(buf + len, buf_len - len, + "Num tx chains = %2d Num supported rates = %2d\n", + tpc_stats->num_tx_chain, + tpc_stats->rate_max); + + for (j = 0; j < tpc_stats->num_tx_chain ; j++) { + switch (j) { + case WMI_TPC_TABLE_TYPE_CDD: + if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) { + len += scnprintf(buf + len, buf_len - len, + "CDD not supported\n"); + break; + } + + ath10k_tpc_stats_print(tpc_stats, j, buf, &len); + break; + case WMI_TPC_TABLE_TYPE_STBC: + if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) { + len += scnprintf(buf + len, buf_len - len, + "STBC not supported\n"); + break; + } + + ath10k_tpc_stats_print(tpc_stats, j, buf, &len); + break; + case WMI_TPC_TABLE_TYPE_TXBF: + if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) { + len += scnprintf(buf + len, buf_len - len, + "TXBF not supported\n***************************\n"); + break; + } + + ath10k_tpc_stats_print(tpc_stats, j, buf, &len); + break; + default: + len += scnprintf(buf + len, buf_len - len, + "Invalid Type\n"); + break; + } + } + +unlock: + spin_unlock_bh(&ar->data_lock); + + if (len >= buf_len) + buf[len - 1] = 0; + else + buf[len] = 0; +} + +static int ath10k_tpc_stats_open(struct inode *inode, struct file *file) +{ + struct ath10k *ar = inode->i_private; + void *buf = NULL; + int ret; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON) { + ret = -ENETDOWN; + goto err_unlock; + } + + buf = vmalloc(ATH10K_TPC_CONFIG_BUF_SIZE); + if (!buf) { + ret = -ENOMEM; + goto err_unlock; + } + + ret = ath10k_debug_tpc_stats_request(ar); + if (ret) { + ath10k_warn(ar, "failed to request tpc config stats: %d\n", + ret); + goto err_free; + } + + ath10k_tpc_stats_fill(ar, ar->debug.tpc_stats, buf); + file->private_data = buf; + + mutex_unlock(&ar->conf_mutex); + return 0; + +err_free: + vfree(buf); + +err_unlock: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static int ath10k_tpc_stats_release(struct inode *inode, struct file *file) +{ + vfree(file->private_data); + + return 0; +} + +static ssize_t ath10k_tpc_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + const char *buf = file->private_data; + unsigned int len = strlen(buf); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_tpc_stats = { + .open = ath10k_tpc_stats_open, + .release = ath10k_tpc_stats_release, + .read = ath10k_tpc_stats_read, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + int ath10k_debug_start(struct ath10k *ar) { int ret; @@ -2111,6 +2093,8 @@ void ath10k_debug_destroy(struct ath10k *ar) ar->debug.fw_crash_data = NULL; ath10k_debug_fw_stats_reset(ar); + + kfree(ar->debug.tpc_stats); } int ath10k_debug_register(struct ath10k *ar) @@ -2127,6 +2111,7 @@ int ath10k_debug_register(struct ath10k *ar) INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork, ath10k_debug_htt_stats_dwork); + init_completion(&ar->debug.tpc_complete); init_completion(&ar->debug.fw_stats_complete); debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar, @@ -2195,6 +2180,9 @@ int ath10k_debug_register(struct ath10k *ar) debugfs_create_file("quiet_period", S_IRUGO | S_IWUSR, ar->debug.debugfs_phy, ar, &fops_quiet_period); + debugfs_create_file("tpc_stats", S_IRUSR, + ar->debug.debugfs_phy, ar, &fops_tpc_stats); + return 0; } diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index 53bd6a19eab6..7de780c4ec8d 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h @@ -55,6 +55,9 @@ enum ath10k_dbg_aggr_mode { ATH10K_DBG_AGGR_MODE_MAX, }; +/* FIXME: How to calculate the buffer size sanely? */ +#define ATH10K_FW_STATS_BUF_SIZE (1024*1024) + extern unsigned int ath10k_debug_mask; __printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...); @@ -70,6 +73,8 @@ void ath10k_debug_destroy(struct ath10k *ar); int ath10k_debug_register(struct ath10k *ar); void ath10k_debug_unregister(struct ath10k *ar); void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb); +void ath10k_debug_tpc_stats_process(struct ath10k *ar, + struct ath10k_tpc_stats *tpc_stats); struct ath10k_fw_crash_data * ath10k_debug_get_new_fw_crash_data(struct ath10k *ar); @@ -117,6 +122,12 @@ static inline void ath10k_debug_fw_stats_process(struct ath10k *ar, { } +static inline void ath10k_debug_tpc_stats_process(struct ath10k *ar, + struct ath10k_tpc_stats *tpc_stats) +{ + kfree(tpc_stats); +} + static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len) { diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h index 0c92e0251e84..89e7076c919f 100644 --- a/drivers/net/wireless/ath/ath10k/hif.h +++ b/drivers/net/wireless/ath/ath10k/hif.h @@ -30,13 +30,6 @@ struct ath10k_hif_sg_item { u16 len; }; -struct ath10k_hif_cb { - int (*tx_completion)(struct ath10k *ar, - struct sk_buff *wbuf); - int (*rx_completion)(struct ath10k *ar, - struct sk_buff *wbuf); -}; - struct ath10k_hif_ops { /* send a scatter-gather list to the target */ int (*tx_sg)(struct ath10k *ar, u8 pipe_id, @@ -65,8 +58,7 @@ struct ath10k_hif_ops { void (*stop)(struct ath10k *ar); int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id, - u8 *ul_pipe, u8 *dl_pipe, - int *ul_is_polled, int *dl_is_polled); + u8 *ul_pipe, u8 *dl_pipe); void (*get_default_pipe)(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe); @@ -80,9 +72,6 @@ struct ath10k_hif_ops { */ void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force); - void (*set_callbacks)(struct ath10k *ar, - struct ath10k_hif_cb *callbacks); - u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id); u32 (*read32)(struct ath10k *ar, u32 address); @@ -142,13 +131,10 @@ static inline void ath10k_hif_stop(struct ath10k *ar) static inline int ath10k_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, - u8 *ul_pipe, u8 *dl_pipe, - int *ul_is_polled, - int *dl_is_polled) + u8 *ul_pipe, u8 *dl_pipe) { return ar->hif.ops->map_service_to_pipe(ar, service_id, - ul_pipe, dl_pipe, - ul_is_polled, dl_is_polled); + ul_pipe, dl_pipe); } static inline void ath10k_hif_get_default_pipe(struct ath10k *ar, @@ -163,12 +149,6 @@ static inline void ath10k_hif_send_complete_check(struct ath10k *ar, ar->hif.ops->send_complete_check(ar, pipe_id, force); } -static inline void ath10k_hif_set_callbacks(struct ath10k *ar, - struct ath10k_hif_cb *callbacks) -{ - ar->hif.ops->set_callbacks(ar, callbacks); -} - static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar, u8 pipe_id) { diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index 32d9ff1b19dc..5b3c6bcf9598 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -23,16 +23,6 @@ /* Send */ /********/ -static inline void ath10k_htc_send_complete_check(struct ath10k_htc_ep *ep, - int force) -{ - /* - * Check whether HIF has any prior sends that have finished, - * have not had the post-processing done. - */ - ath10k_hif_send_complete_check(ep->htc->ar, ep->ul_pipe_id, force); -} - static void ath10k_htc_control_tx_complete(struct ath10k *ar, struct sk_buff *skb) { @@ -181,24 +171,22 @@ err_pull: return ret; } -static int ath10k_htc_tx_completion_handler(struct ath10k *ar, - struct sk_buff *skb) +void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_htc *htc = &ar->htc; struct ath10k_skb_cb *skb_cb; struct ath10k_htc_ep *ep; if (WARN_ON_ONCE(!skb)) - return 0; + return; skb_cb = ATH10K_SKB_CB(skb); ep = &htc->endpoint[skb_cb->eid]; ath10k_htc_notify_tx_completion(ep, skb); /* the skb now belongs to the completion handler */ - - return 0; } +EXPORT_SYMBOL(ath10k_htc_tx_completion_handler); /***********/ /* Receive */ @@ -304,8 +292,7 @@ static int ath10k_htc_process_trailer(struct ath10k_htc *htc, return status; } -static int ath10k_htc_rx_completion_handler(struct ath10k *ar, - struct sk_buff *skb) +void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb) { int status = 0; struct ath10k_htc *htc = &ar->htc; @@ -326,21 +313,11 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar, ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid); ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "", hdr, sizeof(*hdr)); - status = -EINVAL; goto out; } ep = &htc->endpoint[eid]; - /* - * If this endpoint that received a message from the target has - * a to-target HIF pipe whose send completions are polled rather - * than interrupt-driven, this is a good point to ask HIF to check - * whether it has any completed sends to handle. - */ - if (ep->ul_is_polled) - ath10k_htc_send_complete_check(ep, 1); - payload_len = __le16_to_cpu(hdr->len); if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) { @@ -348,7 +325,6 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar, payload_len + sizeof(*hdr)); ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "", hdr, sizeof(*hdr)); - status = -EINVAL; goto out; } @@ -358,7 +334,6 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar, skb->len, payload_len); ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "", hdr, sizeof(*hdr)); - status = -EINVAL; goto out; } @@ -374,7 +349,6 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar, (trailer_len > payload_len)) { ath10k_warn(ar, "Invalid trailer length: %d\n", trailer_len); - status = -EPROTO; goto out; } @@ -407,7 +381,6 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar, * sending unsolicited messages on the ep 0 */ ath10k_warn(ar, "HTC rx ctrl still processing\n"); - status = -EINVAL; complete(&htc->ctl_resp); goto out; } @@ -439,9 +412,8 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar, skb = NULL; out: kfree_skb(skb); - - return status; } +EXPORT_SYMBOL(ath10k_htc_rx_completion_handler); static void ath10k_htc_control_rx_complete(struct ath10k *ar, struct sk_buff *skb) @@ -767,9 +739,7 @@ setup: status = ath10k_hif_map_service_to_pipe(htc->ar, ep->service_id, &ep->ul_pipe_id, - &ep->dl_pipe_id, - &ep->ul_is_polled, - &ep->dl_is_polled); + &ep->dl_pipe_id); if (status) return status; @@ -778,10 +748,6 @@ setup: htc_service_name(ep->service_id), ep->ul_pipe_id, ep->dl_pipe_id, ep->eid); - ath10k_dbg(ar, ATH10K_DBG_BOOT, - "boot htc ep %d ul polled %d dl polled %d\n", - ep->eid, ep->ul_is_polled, ep->dl_is_polled); - if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) { ep->tx_credit_flow_enabled = false; ath10k_dbg(ar, ATH10K_DBG_BOOT, @@ -841,7 +807,6 @@ int ath10k_htc_start(struct ath10k_htc *htc) /* registered target arrival callback from the HIF layer */ int ath10k_htc_init(struct ath10k *ar) { - struct ath10k_hif_cb htc_callbacks; struct ath10k_htc_ep *ep = NULL; struct ath10k_htc *htc = &ar->htc; @@ -849,15 +814,11 @@ int ath10k_htc_init(struct ath10k *ar) ath10k_htc_reset_endpoint_states(htc); - /* setup HIF layer callbacks */ - htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler; - htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler; htc->ar = ar; /* Get HIF default pipe for HTC message exchange */ ep = &htc->endpoint[ATH10K_HTC_EP_0]; - ath10k_hif_set_callbacks(ar, &htc_callbacks); ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id); init_completion(&htc->ctl_resp); diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h index 527179c0edce..e70aa38e6e05 100644 --- a/drivers/net/wireless/ath/ath10k/htc.h +++ b/drivers/net/wireless/ath/ath10k/htc.h @@ -312,8 +312,6 @@ struct ath10k_htc_ep { int max_ep_message_len; u8 ul_pipe_id; u8 dl_pipe_id; - int ul_is_polled; /* call HIF to get tx completions */ - int dl_is_polled; /* call HIF to fetch rx (not implemented) */ u8 seq_no; /* for debugging */ int tx_credits; @@ -355,5 +353,7 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid, struct sk_buff *packet); struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size); +void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb); +void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 5a8e4eae7a9c..2bad50e520b5 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -1488,7 +1488,6 @@ struct ath10k_htt { int num_pending_mgmt_tx; struct idr pending_tx; wait_queue_head_t empty_tx_wq; - struct dma_pool *tx_pool; /* set if host-fw communication goes haywire * used to avoid further failures */ @@ -1509,6 +1508,11 @@ struct ath10k_htt { dma_addr_t paddr; struct htt_msdu_ext_desc *vaddr; } frag_desc; + + struct { + dma_addr_t paddr; + struct ath10k_htt_txbuf *vaddr; + } txbuf; }; #define RX_HTT_HDR_STATUS_LEN 64 @@ -1587,6 +1591,7 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt); int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, u8 max_subfrms_ampdu, u8 max_subfrms_amsdu); +void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb); void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc); int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb); diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 606c1a34f004..6060dda4e910 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -2125,6 +2125,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) /* Free the indication buffer */ dev_kfree_skb_any(skb); } +EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler); static void ath10k_htt_txrx_compl_task(unsigned long ptr) { diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index eb5ba9bb8b4d..16823970dbfd 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -108,9 +108,12 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt) spin_lock_init(&htt->tx_lock); idr_init(&htt->pending_tx); - htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev, - sizeof(struct ath10k_htt_txbuf), 4, 0); - if (!htt->tx_pool) { + size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf); + htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size, + &htt->txbuf.paddr, + GFP_DMA); + if (!htt->txbuf.vaddr) { + ath10k_err(ar, "failed to alloc tx buffer\n"); ret = -ENOMEM; goto free_idr_pending_tx; } @@ -125,14 +128,17 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt) if (!htt->frag_desc.vaddr) { ath10k_warn(ar, "failed to alloc fragment desc memory\n"); ret = -ENOMEM; - goto free_tx_pool; + goto free_txbuf; } skip_frag_desc_alloc: return 0; -free_tx_pool: - dma_pool_destroy(htt->tx_pool); +free_txbuf: + size = htt->max_num_pending_tx * + sizeof(struct ath10k_htt_txbuf); + dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr, + htt->txbuf.paddr); free_idr_pending_tx: idr_destroy(&htt->pending_tx); return ret; @@ -160,7 +166,13 @@ void ath10k_htt_tx_free(struct ath10k_htt *htt) idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar); idr_destroy(&htt->pending_tx); - dma_pool_destroy(htt->tx_pool); + + if (htt->txbuf.vaddr) { + size = htt->max_num_pending_tx * + sizeof(struct ath10k_htt_txbuf); + dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr, + htt->txbuf.paddr); + } if (htt->frag_desc.vaddr) { size = htt->max_num_pending_tx * @@ -175,6 +187,12 @@ void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) dev_kfree_skb_any(skb); } +void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb) +{ + dev_kfree_skb_any(skb); +} +EXPORT_SYMBOL(ath10k_htt_hif_tx_complete); + int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; @@ -454,9 +472,9 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); spin_unlock_bh(&htt->tx_lock); - if (res < 0) { + if (res < 0) goto err_tx_dec; - } + msdu_id = res; txdesc = ath10k_htc_alloc_skb(ar, len); @@ -521,7 +539,6 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) int res; u8 flags0 = 0; u16 msdu_id, flags1 = 0; - dma_addr_t paddr = 0; u32 frags_paddr = 0; struct htt_msdu_ext_desc *ext_desc = NULL; bool limit_mgmt_desc = false; @@ -542,21 +559,17 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); spin_unlock_bh(&htt->tx_lock); - if (res < 0) { + if (res < 0) goto err_tx_dec; - } + msdu_id = res; prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = roundup(prefetch_len, 4); - skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC, - &paddr); - if (!skb_cb->htt.txbuf) { - res = -ENOMEM; - goto err_free_msdu_id; - } - skb_cb->htt.txbuf_paddr = paddr; + skb_cb->htt.txbuf = &htt->txbuf.vaddr[msdu_id]; + skb_cb->htt.txbuf_paddr = htt->txbuf.paddr + + (sizeof(struct ath10k_htt_txbuf) * msdu_id); if ((ieee80211_is_action(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control) || @@ -574,7 +587,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) res = dma_mapping_error(dev, skb_cb->paddr); if (res) { res = -EIO; - goto err_free_txbuf; + goto err_free_msdu_id; } switch (skb_cb->txmode) { @@ -706,10 +719,6 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); -err_free_txbuf: - dma_pool_free(htt->tx_pool, - skb_cb->htt.txbuf, - skb_cb->htt.txbuf_paddr); err_free_msdu_id: spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index bc421a5c5356..2d87737e35ff 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -97,6 +97,9 @@ enum qca6174_chip_id_rev { /* includes also the null byte */ #define ATH10K_FIRMWARE_MAGIC "QCA-ATH10K" +#define ATH10K_BOARD_MAGIC "QCA-ATH10K-BOARD" + +#define ATH10K_BOARD_API2_FILE "board-2.bin" #define REG_DUMP_COUNT_QCA988X 60 @@ -159,6 +162,16 @@ enum ath10k_fw_htt_op_version { ATH10K_FW_HTT_OP_VERSION_MAX, }; +enum ath10k_bd_ie_type { + /* contains sub IEs of enum ath10k_bd_ie_board_type */ + ATH10K_BD_IE_BOARD = 0, +}; + +enum ath10k_bd_ie_board_type { + ATH10K_BD_IE_BOARD_NAME = 0, + ATH10K_BD_IE_BOARD_DATA = 1, +}; + enum ath10k_hw_rev { ATH10K_HW_QCA988X, ATH10K_HW_QCA6174, @@ -337,7 +350,7 @@ enum ath10k_hw_rate_cck { #define TARGET_10X_MAX_FRAG_ENTRIES 0 /* 10.2 parameters */ -#define TARGET_10_2_DMA_BURST_SIZE 1 +#define TARGET_10_2_DMA_BURST_SIZE 0 /* Target specific defines for WMI-TLV firmware */ #define TARGET_TLV_NUM_VDEVS 4 @@ -391,7 +404,7 @@ enum ath10k_hw_rate_cck { #define TARGET_10_4_TX_DBG_LOG_SIZE 1024 #define TARGET_10_4_NUM_WDS_ENTRIES 32 -#define TARGET_10_4_DMA_BURST_SIZE 1 +#define TARGET_10_4_DMA_BURST_SIZE 0 #define TARGET_10_4_MAC_AGGR_DELIM 0 #define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1 #define TARGET_10_4_VOW_CONFIG 0 diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 79490ad41ac5..484c1a10372f 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -197,9 +197,8 @@ static int ath10k_send_key(struct ath10k_vif *arvif, return -EOPNOTSUPP; } - if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { + if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; - } if (cmd == DISABLE_KEY) { arg.key_cipher = WMI_CIPHER_NONE; @@ -1111,7 +1110,8 @@ static int ath10k_monitor_recalc(struct ath10k *ar) ret = ath10k_monitor_stop(ar); if (ret) - ath10k_warn(ar, "failed to stop disallowed monitor: %d\n", ret); + ath10k_warn(ar, "failed to stop disallowed monitor: %d\n", + ret); /* not serious */ } @@ -2084,7 +2084,8 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, enum ieee80211_band band; const u8 *ht_mcs_mask; const u16 *vht_mcs_mask; - int i, n, max_nss; + int i, n; + u8 max_nss; u32 stbc; lockdep_assert_held(&ar->conf_mutex); @@ -2169,7 +2170,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, arg->peer_ht_rates.rates[i] = i; } else { arg->peer_ht_rates.num_rates = n; - arg->peer_num_spatial_streams = max_nss; + arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss); } ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", @@ -4065,6 +4066,7 @@ static u32 get_nss_from_chainmask(u16 chain_mask) static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar) { int nsts = ar->vht_cap_info; + nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; @@ -4081,8 +4083,9 @@ static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar) static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar) { int sound_dim = ar->vht_cap_info; + sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; - sound_dim >>=IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; + sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; /* If the sounding dimension is not advertised by the firmware, * let's use a default value of 1 @@ -4656,7 +4659,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, info->use_cts_prot ? 1 : 0); if (ret) ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n", - info->use_cts_prot, arvif->vdev_id, ret); + info->use_cts_prot, arvif->vdev_id, ret); } if (changed & BSS_CHANGED_ERP_SLOT) { @@ -6268,8 +6271,8 @@ ath10k_mac_update_rx_channel(struct ath10k *ar, rcu_read_lock(); if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) { ieee80211_iter_chan_contexts_atomic(ar->hw, - ath10k_mac_get_any_chandef_iter, - &def); + ath10k_mac_get_any_chandef_iter, + &def); if (vifs) def = &vifs[0].new_ctx->def; @@ -7301,7 +7304,7 @@ int ath10k_mac_register(struct ath10k *ar) ath10k_reg_notifier); if (ret) { ath10k_err(ar, "failed to initialise regulatory: %i\n", ret); - goto err_free; + goto err_dfs_detector_exit; } ar->hw->wiphy->cipher_suites = cipher_suites; @@ -7310,7 +7313,7 @@ int ath10k_mac_register(struct ath10k *ar) ret = ieee80211_register_hw(ar->hw); if (ret) { ath10k_err(ar, "failed to register ieee80211: %d\n", ret); - goto err_free; + goto err_dfs_detector_exit; } if (!ath_is_world_regd(&ar->ath_common.regulatory)) { @@ -7324,10 +7327,16 @@ int ath10k_mac_register(struct ath10k *ar) err_unregister: ieee80211_unregister_hw(ar->hw); + +err_dfs_detector_exit: + if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) + ar->dfs_detector->exit(ar->dfs_detector); + err_free: kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels); kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels); + SET_IEEE80211_DEV(ar->hw, NULL); return ret; } diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 110fcad609b9..5c05b0cf54a1 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -104,6 +104,10 @@ static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, struct ath10k_ce_pipe *rx_pipe, struct bmi_xfer *xfer); static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar); +static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state); static const struct ce_attr host_ce_config_wlan[] = { /* CE0: host->target HTC control and raw streams */ @@ -112,6 +116,7 @@ static const struct ce_attr host_ce_config_wlan[] = { .src_nentries = 16, .src_sz_max = 256, .dest_nentries = 0, + .send_cb = ath10k_pci_htc_tx_cb, }, /* CE1: target->host HTT + HTC control */ @@ -120,6 +125,7 @@ static const struct ce_attr host_ce_config_wlan[] = { .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, + .recv_cb = ath10k_pci_htc_rx_cb, }, /* CE2: target->host WMI */ @@ -128,6 +134,7 @@ static const struct ce_attr host_ce_config_wlan[] = { .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 128, + .recv_cb = ath10k_pci_htc_rx_cb, }, /* CE3: host->target WMI */ @@ -136,6 +143,7 @@ static const struct ce_attr host_ce_config_wlan[] = { .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, + .send_cb = ath10k_pci_htc_tx_cb, }, /* CE4: host->target HTT */ @@ -144,14 +152,16 @@ static const struct ce_attr host_ce_config_wlan[] = { .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES, .src_sz_max = 256, .dest_nentries = 0, + .send_cb = ath10k_pci_htt_tx_cb, }, - /* CE5: unused */ + /* CE5: target->host HTT (HIF->HTT) */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, - .src_sz_max = 0, - .dest_nentries = 0, + .src_sz_max = 512, + .dest_nentries = 512, + .recv_cb = ath10k_pci_htt_rx_cb, }, /* CE6: target autonomous hif_memcpy */ @@ -257,12 +267,12 @@ static const struct ce_pipe_config target_ce_config_wlan[] = { /* NB: 50% of src nentries, since tx has 2 frags */ - /* CE5: unused */ + /* CE5: target->host HTT (HIF->HTT) */ { .pipenum = __cpu_to_le32(5), - .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .pipedir = __cpu_to_le32(PIPEDIR_IN), .nentries = __cpu_to_le32(32), - .nbytes_max = __cpu_to_le32(2048), + .nbytes_max = __cpu_to_le32(512), .flags = __cpu_to_le32(CE_ATTR_FLAGS), .reserved = __cpu_to_le32(0), }, @@ -396,7 +406,7 @@ static const struct service_to_pipe target_service_to_ce_map_wlan[] = { { __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ - __cpu_to_le32(1), + __cpu_to_le32(5), }, /* (Additions here) */ @@ -452,8 +462,12 @@ static int ath10k_pci_wake_wait(struct ath10k *ar) int curr_delay = 5; while (tot_delay < PCIE_WAKE_TIMEOUT) { - if (ath10k_pci_is_awake(ar)) + if (ath10k_pci_is_awake(ar)) { + if (tot_delay > PCIE_WAKE_LATE_US) + ath10k_warn(ar, "device wakeup took %d ms which is unusally long, otherwise it works normally.\n", + tot_delay / 1000); return 0; + } udelay(curr_delay); tot_delay += curr_delay; @@ -465,12 +479,53 @@ static int ath10k_pci_wake_wait(struct ath10k *ar) return -ETIMEDOUT; } +static int ath10k_pci_force_wake(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&ar_pci->ps_lock, flags); + + if (!ar_pci->ps_awake) { + iowrite32(PCIE_SOC_WAKE_V_MASK, + ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS); + + ret = ath10k_pci_wake_wait(ar); + if (ret == 0) + ar_pci->ps_awake = true; + } + + spin_unlock_irqrestore(&ar_pci->ps_lock, flags); + + return ret; +} + +static void ath10k_pci_force_sleep(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + unsigned long flags; + + spin_lock_irqsave(&ar_pci->ps_lock, flags); + + iowrite32(PCIE_SOC_WAKE_RESET, + ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS); + ar_pci->ps_awake = false; + + spin_unlock_irqrestore(&ar_pci->ps_lock, flags); +} + static int ath10k_pci_wake(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); unsigned long flags; int ret = 0; + if (ar_pci->pci_ps == 0) + return ret; + spin_lock_irqsave(&ar_pci->ps_lock, flags); ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n", @@ -502,6 +557,9 @@ static void ath10k_pci_sleep(struct ath10k *ar) struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); unsigned long flags; + if (ar_pci->pci_ps == 0) + return; + spin_lock_irqsave(&ar_pci->ps_lock, flags); ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n", @@ -544,6 +602,11 @@ static void ath10k_pci_sleep_sync(struct ath10k *ar) struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); unsigned long flags; + if (ar_pci->pci_ps == 0) { + ath10k_pci_force_sleep(ar); + return; + } + del_timer_sync(&ar_pci->ps_timer); spin_lock_irqsave(&ar_pci->ps_lock, flags); @@ -682,8 +745,6 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) dma_addr_t paddr; int ret; - lockdep_assert_held(&ar_pci->ce_lock); - skb = dev_alloc_skb(pipe->buf_sz); if (!skb) return -ENOMEM; @@ -701,9 +762,10 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) ATH10K_SKB_RXCB(skb)->paddr = paddr; + spin_lock_bh(&ar_pci->ce_lock); ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr); + spin_unlock_bh(&ar_pci->ce_lock); if (ret) { - ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret); dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); dev_kfree_skb_any(skb); @@ -713,25 +775,27 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) return 0; } -static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) +static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) { struct ath10k *ar = pipe->hif_ce_state; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; int ret, num; - lockdep_assert_held(&ar_pci->ce_lock); - if (pipe->buf_sz == 0) return; if (!ce_pipe->dest_ring) return; + spin_lock_bh(&ar_pci->ce_lock); num = __ath10k_ce_rx_num_free_bufs(ce_pipe); + spin_unlock_bh(&ar_pci->ce_lock); while (num--) { ret = __ath10k_pci_rx_post_buf(pipe); if (ret) { + if (ret == -ENOSPC) + break; ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret); mod_timer(&ar_pci->rx_post_retry, jiffies + ATH10K_PCI_RX_POST_RETRY_MS); @@ -740,25 +804,13 @@ static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) } } -static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) -{ - struct ath10k *ar = pipe->hif_ce_state; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - - spin_lock_bh(&ar_pci->ce_lock); - __ath10k_pci_rx_post_pipe(pipe); - spin_unlock_bh(&ar_pci->ce_lock); -} - static void ath10k_pci_rx_post(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int i; - spin_lock_bh(&ar_pci->ce_lock); for (i = 0; i < CE_COUNT; i++) - __ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]); - spin_unlock_bh(&ar_pci->ce_lock); + ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]); } static void ath10k_pci_rx_replenish_retry(unsigned long ptr) @@ -1102,11 +1154,9 @@ static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value) } /* Called by lower (CE) layer when a send to Target completes. */ -static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state) +static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state) { struct ath10k *ar = ce_state->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current; struct sk_buff_head list; struct sk_buff *skb; u32 ce_data; @@ -1124,16 +1174,16 @@ static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state) } while ((skb = __skb_dequeue(&list))) - cb->tx_completion(ar, skb); + ath10k_htc_tx_completion_handler(ar, skb); } -/* Called by lower (CE) layer when data is received from the Target. */ -static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state) +static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state, + void (*callback)(struct ath10k *ar, + struct sk_buff *skb)) { struct ath10k *ar = ce_state->ar; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; - struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current; struct sk_buff *skb; struct sk_buff_head list; void *transfer_context; @@ -1168,12 +1218,56 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state) ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ", skb->data, skb->len); - cb->rx_completion(ar, skb); + callback(ar, skb); } ath10k_pci_rx_post_pipe(pipe_info); } +/* Called by lower (CE) layer when data is received from the Target. */ +static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state) +{ + ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); +} + +/* Called by lower (CE) layer when a send to HTT Target completes. */ +static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state) +{ + struct ath10k *ar = ce_state->ar; + struct sk_buff *skb; + u32 ce_data; + unsigned int nbytes; + unsigned int transfer_id; + + while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data, + &nbytes, &transfer_id) == 0) { + /* no need to call tx completion for NULL pointers */ + if (!skb) + continue; + + dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, + skb->len, DMA_TO_DEVICE); + ath10k_htt_hif_tx_complete(ar, skb); + } +} + +static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb) +{ + skb_pull(skb, sizeof(struct ath10k_htc_hdr)); + ath10k_htt_t2h_msg_handler(ar, skb); +} + +/* Called by lower (CE) layer when HTT data is received from the Target. */ +static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state) +{ + /* CE4 polling needs to be done whenever CE pipe which transports + * HTT Rx (target->host) is processed. + */ + ath10k_ce_per_engine_service(ce_state->ar, 4); + + ath10k_pci_process_rx_cb(ce_state, ath10k_pci_htt_rx_deliver); +} + static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, struct ath10k_hif_sg_item *items, int n_items) { @@ -1343,17 +1437,6 @@ static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, ath10k_ce_per_engine_service(ar, pipe); } -static void ath10k_pci_hif_set_callbacks(struct ath10k *ar, - struct ath10k_hif_cb *callbacks) -{ - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - - ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif set callbacks\n"); - - memcpy(&ar_pci->msg_callbacks_current, callbacks, - sizeof(ar_pci->msg_callbacks_current)); -} - static void ath10k_pci_kill_tasklet(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); @@ -1368,10 +1451,8 @@ static void ath10k_pci_kill_tasklet(struct ath10k *ar) del_timer_sync(&ar_pci->rx_post_retry); } -static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, - u16 service_id, u8 *ul_pipe, - u8 *dl_pipe, int *ul_is_polled, - int *dl_is_polled) +static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, + u8 *ul_pipe, u8 *dl_pipe) { const struct service_to_pipe *entry; bool ul_set = false, dl_set = false; @@ -1379,9 +1460,6 @@ static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n"); - /* polling for received messages not supported */ - *dl_is_polled = 0; - for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) { entry = &target_service_to_ce_map_wlan[i]; @@ -1415,25 +1493,17 @@ static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, if (WARN_ON(!ul_set || !dl_set)) return -ENOENT; - *ul_is_polled = - (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0; - return 0; } static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe) { - int ul_is_polled, dl_is_polled; - ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n"); (void)ath10k_pci_hif_map_service_to_pipe(ar, ATH10K_HTC_SVC_ID_RSVD_CTRL, - ul_pipe, - dl_pipe, - &ul_is_polled, - &dl_is_polled); + ul_pipe, dl_pipe); } static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) @@ -1504,6 +1574,7 @@ static void ath10k_pci_irq_enable(struct ath10k *ar) static int ath10k_pci_hif_start(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n"); ath10k_pci_irq_enable(ar); @@ -1579,7 +1650,7 @@ static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) ce_ring->per_transfer_context[i] = NULL; - ar_pci->msg_callbacks_current.tx_completion(ar, skb); + ath10k_htc_tx_completion_handler(ar, skb); } } @@ -1999,9 +2070,7 @@ static int ath10k_pci_alloc_pipes(struct ath10k *ar) pipe->pipe_num = i; pipe->hif_ce_state = ar; - ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i], - ath10k_pci_ce_send_done, - ath10k_pci_ce_recv_data); + ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]); if (ret) { ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n", i, ret); @@ -2257,7 +2326,7 @@ static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar) ret = ath10k_pci_wait_for_target_init(ar); if (ret) { ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", - ret); + ret); return ret; } @@ -2397,6 +2466,15 @@ static int ath10k_pci_hif_resume(struct ath10k *ar) struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct pci_dev *pdev = ar_pci->pdev; u32 val; + int ret = 0; + + if (ar_pci->pci_ps == 0) { + ret = ath10k_pci_force_wake(ar); + if (ret) { + ath10k_err(ar, "failed to wake up target: %d\n", ret); + return ret; + } + } /* Suspend/Resume resets the PCI configuration space, so we have to * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries @@ -2407,7 +2485,7 @@ static int ath10k_pci_hif_resume(struct ath10k *ar) if ((val & 0x0000ff00) != 0) pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); - return 0; + return ret; } #endif @@ -2421,7 +2499,6 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = { .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe, .get_default_pipe = ath10k_pci_hif_get_default_pipe, .send_complete_check = ath10k_pci_hif_send_complete_check, - .set_callbacks = ath10k_pci_hif_set_callbacks, .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, .power_up = ath10k_pci_hif_power_up, .power_down = ath10k_pci_hif_power_down, @@ -2501,6 +2578,16 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) { struct ath10k *ar = arg; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int ret; + + if (ar_pci->pci_ps == 0) { + ret = ath10k_pci_force_wake(ar); + if (ret) { + ath10k_warn(ar, "failed to wake device up on irq: %d\n", + ret); + return IRQ_NONE; + } + } if (ar_pci->num_msi_intrs == 0) { if (!ath10k_pci_irq_pending(ar)) @@ -2900,17 +2987,21 @@ static int ath10k_pci_probe(struct pci_dev *pdev, struct ath10k_pci *ar_pci; enum ath10k_hw_rev hw_rev; u32 chip_id; + bool pci_ps; switch (pci_dev->device) { case QCA988X_2_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA988X; + pci_ps = false; break; case QCA6164_2_1_DEVICE_ID: case QCA6174_2_1_DEVICE_ID: hw_rev = ATH10K_HW_QCA6174; + pci_ps = true; break; case QCA99X0_2_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA99X0; + pci_ps = false; break; default: WARN_ON(1); @@ -2924,19 +3015,21 @@ static int ath10k_pci_probe(struct pci_dev *pdev, return -ENOMEM; } - ath10k_dbg(ar, ATH10K_DBG_PCI, "pci probe\n"); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n", + pdev->vendor, pdev->device, + pdev->subsystem_vendor, pdev->subsystem_device); ar_pci = ath10k_pci_priv(ar); ar_pci->pdev = pdev; ar_pci->dev = &pdev->dev; ar_pci->ar = ar; ar->dev_id = pci_dev->device; + ar_pci->pci_ps = pci_ps; - if (pdev->subsystem_vendor || pdev->subsystem_device) - scnprintf(ar->spec_board_id, sizeof(ar->spec_board_id), - "%04x:%04x:%04x:%04x", - pdev->vendor, pdev->device, - pdev->subsystem_vendor, pdev->subsystem_device); + ar->id.vendor = pdev->vendor; + ar->id.device = pdev->device; + ar->id.subsystem_vendor = pdev->subsystem_vendor; + ar->id.subsystem_device = pdev->subsystem_device; spin_lock_init(&ar_pci->ce_lock); spin_lock_init(&ar_pci->ps_lock); @@ -2962,6 +3055,14 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ath10k_pci_ce_deinit(ar); ath10k_pci_irq_disable(ar); + if (ar_pci->pci_ps == 0) { + ret = ath10k_pci_force_wake(ar); + if (ret) { + ath10k_warn(ar, "failed to wake up device : %d\n", ret); + goto err_free_pipes; + } + } + ret = ath10k_pci_init_irq(ar); if (ret) { ath10k_err(ar, "failed to init irqs: %d\n", ret); @@ -3090,13 +3191,16 @@ MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); +MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); /* QCA6174 2.1 firmware files */ MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE); MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE); MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE); +MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE); /* QCA6174 3.1 firmware files */ MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE); MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE); MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE); +MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index 8d364fb8f743..f91bf333cb75 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -175,8 +175,6 @@ struct ath10k_pci { struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX]; - struct ath10k_hif_cb msg_callbacks_current; - /* Copy Engine used for Diagnostic Accesses */ struct ath10k_ce_pipe *ce_diag; @@ -221,6 +219,12 @@ struct ath10k_pci { * powersave register state changes. */ bool ps_awake; + + /* pci power save, disable for QCA988X and QCA99X0. + * Writing 'false' to this variable avoids frequent locking + * on MMIO read/write. + */ + bool pci_ps; }; static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) @@ -230,7 +234,8 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) #define ATH10K_PCI_RX_POST_RETRY_MS 50 #define ATH_PCI_RESET_WAIT_MAX 10 /* ms */ -#define PCIE_WAKE_TIMEOUT 10000 /* 10ms */ +#define PCIE_WAKE_TIMEOUT 30000 /* 30ms */ +#define PCIE_WAKE_LATE_US 10000 /* 10ms */ #define BAR_NUM 0 diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c index 1a899d70dc5d..60fe562e3041 100644 --- a/drivers/net/wireless/ath/ath10k/thermal.c +++ b/drivers/net/wireless/ath/ath10k/thermal.c @@ -215,6 +215,6 @@ err_cooling_destroy: void ath10k_thermal_unregister(struct ath10k *ar) { - thermal_cooling_device_unregister(ar->thermal.cdev); sysfs_remove_link(&ar->dev->kobj, "cooling_device"); + thermal_cooling_device_unregister(ar->thermal.cdev); } diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index 7db7d501726b..6d1105ab4592 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -92,11 +92,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, skb_cb = ATH10K_SKB_CB(msdu); dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); - if (skb_cb->htt.txbuf) - dma_pool_free(htt->tx_pool, - skb_cb->htt.txbuf, - skb_cb->htt.txbuf_paddr); - ath10k_report_offchan_tx(htt->ar, msdu); info = IEEE80211_SKB_CB(msdu); diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h index 248ffc3d6620..b54aa08cb25c 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-ops.h +++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h @@ -177,6 +177,11 @@ struct wmi_ops { const struct wmi_tdls_peer_capab_arg *cap, const struct wmi_channel_arg *chan); struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable); + struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar, + u32 param); + void (*fw_stats_fill)(struct ath10k *ar, + struct ath10k_fw_stats *fw_stats, + char *buf); }; int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); @@ -1270,4 +1275,31 @@ ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable) return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid); } +static inline int +ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_pdev_get_tpc_config) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param); + + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->pdev_get_tpc_config_cmdid); +} + +static inline int +ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats, + char *buf) +{ + if (!ar->wmi.ops->fw_stats_fill) + return -EOPNOTSUPP; + + ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf); + return 0; +} #endif diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index b5849b3fd2f0..8f835480a62c 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -3468,6 +3468,7 @@ static const struct wmi_ops wmi_tlv_ops = { .gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state, .gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update, .gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs, + .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill, }; /************/ diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 87d9de2aa8c5..6e7d7a7f6a97 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -3018,8 +3018,6 @@ static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif, memcpy(skb_put(bcn, arvif->u.ap.noa_len), arvif->u.ap.noa_data, arvif->u.ap.noa_len); - - return; } static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb, @@ -3507,7 +3505,7 @@ void ath10k_wmi_event_spectral_scan(struct ath10k *ar, tsf); if (res < 0) { ath10k_dbg(ar, ATH10K_DBG_WMI, "failed to process fft report: %d\n", - res); + res); return; } break; @@ -3835,9 +3833,258 @@ void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb) ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n"); } +static u8 ath10k_tpc_config_get_rate(struct ath10k *ar, + struct wmi_pdev_tpc_config_event *ev, + u32 rate_idx, u32 num_chains, + u32 rate_code, u8 type) +{ + u8 tpc, num_streams, preamble, ch, stm_idx; + + num_streams = ATH10K_HW_NSS(rate_code); + preamble = ATH10K_HW_PREAMBLE(rate_code); + ch = num_chains - 1; + + tpc = min_t(u8, ev->rates_array[rate_idx], ev->max_reg_allow_pow[ch]); + + if (__le32_to_cpu(ev->num_tx_chain) <= 1) + goto out; + + if (preamble == WMI_RATE_PREAMBLE_CCK) + goto out; + + stm_idx = num_streams - 1; + if (num_chains <= num_streams) + goto out; + + switch (type) { + case WMI_TPC_TABLE_TYPE_STBC: + tpc = min_t(u8, tpc, + ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx]); + break; + case WMI_TPC_TABLE_TYPE_TXBF: + tpc = min_t(u8, tpc, + ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx]); + break; + case WMI_TPC_TABLE_TYPE_CDD: + tpc = min_t(u8, tpc, + ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx]); + break; + default: + ath10k_warn(ar, "unknown wmi tpc table type: %d\n", type); + tpc = 0; + break; + } + +out: + return tpc; +} + +static void ath10k_tpc_config_disp_tables(struct ath10k *ar, + struct wmi_pdev_tpc_config_event *ev, + struct ath10k_tpc_stats *tpc_stats, + u8 *rate_code, u16 *pream_table, u8 type) +{ + u32 i, j, pream_idx, flags; + u8 tpc[WMI_TPC_TX_N_CHAIN]; + char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE]; + char buff[WMI_TPC_BUF_SIZE]; + + flags = __le32_to_cpu(ev->flags); + + switch (type) { + case WMI_TPC_TABLE_TYPE_CDD: + if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) { + ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n"); + tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG; + return; + } + break; + case WMI_TPC_TABLE_TYPE_STBC: + if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) { + ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n"); + tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG; + return; + } + break; + case WMI_TPC_TABLE_TYPE_TXBF: + if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) { + ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n"); + tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG; + return; + } + break; + default: + ath10k_dbg(ar, ATH10K_DBG_WMI, + "invalid table type in wmi tpc event: %d\n", type); + return; + } + + pream_idx = 0; + for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) { + memset(tpc_value, 0, sizeof(tpc_value)); + memset(buff, 0, sizeof(buff)); + if (i == pream_table[pream_idx]) + pream_idx++; + + for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) { + if (j >= __le32_to_cpu(ev->num_tx_chain)) + break; + + tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1, + rate_code[i], + type); + snprintf(buff, sizeof(buff), "%8d ", tpc[j]); + strncat(tpc_value, buff, strlen(buff)); + } + tpc_stats->tpc_table[type].pream_idx[i] = pream_idx; + tpc_stats->tpc_table[type].rate_code[i] = rate_code[i]; + memcpy(tpc_stats->tpc_table[type].tpc_value[i], + tpc_value, sizeof(tpc_value)); + } +} + void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n"); + u32 i, j, pream_idx, num_tx_chain; + u8 rate_code[WMI_TPC_RATE_MAX], rate_idx; + u16 pream_table[WMI_TPC_PREAM_TABLE_MAX]; + struct wmi_pdev_tpc_config_event *ev; + struct ath10k_tpc_stats *tpc_stats; + + ev = (struct wmi_pdev_tpc_config_event *)skb->data; + + tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC); + if (!tpc_stats) + return; + + /* Create the rate code table based on the chains supported */ + rate_idx = 0; + pream_idx = 0; + + /* Fill CCK rate code */ + for (i = 0; i < 4; i++) { + rate_code[rate_idx] = + ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_CCK); + rate_idx++; + } + pream_table[pream_idx] = rate_idx; + pream_idx++; + + /* Fill OFDM rate code */ + for (i = 0; i < 8; i++) { + rate_code[rate_idx] = + ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_OFDM); + rate_idx++; + } + pream_table[pream_idx] = rate_idx; + pream_idx++; + + num_tx_chain = __le32_to_cpu(ev->num_tx_chain); + + /* Fill HT20 rate code */ + for (i = 0; i < num_tx_chain; i++) { + for (j = 0; j < 8; j++) { + rate_code[rate_idx] = + ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT); + rate_idx++; + } + } + pream_table[pream_idx] = rate_idx; + pream_idx++; + + /* Fill HT40 rate code */ + for (i = 0; i < num_tx_chain; i++) { + for (j = 0; j < 8; j++) { + rate_code[rate_idx] = + ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT); + rate_idx++; + } + } + pream_table[pream_idx] = rate_idx; + pream_idx++; + + /* Fill VHT20 rate code */ + for (i = 0; i < __le32_to_cpu(ev->num_tx_chain); i++) { + for (j = 0; j < 10; j++) { + rate_code[rate_idx] = + ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT); + rate_idx++; + } + } + pream_table[pream_idx] = rate_idx; + pream_idx++; + + /* Fill VHT40 rate code */ + for (i = 0; i < num_tx_chain; i++) { + for (j = 0; j < 10; j++) { + rate_code[rate_idx] = + ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT); + rate_idx++; + } + } + pream_table[pream_idx] = rate_idx; + pream_idx++; + + /* Fill VHT80 rate code */ + for (i = 0; i < num_tx_chain; i++) { + for (j = 0; j < 10; j++) { + rate_code[rate_idx] = + ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT); + rate_idx++; + } + } + pream_table[pream_idx] = rate_idx; + pream_idx++; + + rate_code[rate_idx++] = + ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK); + rate_code[rate_idx++] = + ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM); + rate_code[rate_idx++] = + ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK); + rate_code[rate_idx++] = + ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM); + rate_code[rate_idx++] = + ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM); + + pream_table[pream_idx] = ATH10K_TPC_PREAM_TABLE_END; + + tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq); + tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode); + tpc_stats->ctl = __le32_to_cpu(ev->ctl); + tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain); + tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain); + tpc_stats->twice_antenna_reduction = + __le32_to_cpu(ev->twice_antenna_reduction); + tpc_stats->power_limit = __le32_to_cpu(ev->power_limit); + tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power); + tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain); + tpc_stats->rate_max = __le32_to_cpu(ev->rate_max); + + ath10k_tpc_config_disp_tables(ar, ev, tpc_stats, + rate_code, pream_table, + WMI_TPC_TABLE_TYPE_CDD); + ath10k_tpc_config_disp_tables(ar, ev, tpc_stats, + rate_code, pream_table, + WMI_TPC_TABLE_TYPE_STBC); + ath10k_tpc_config_disp_tables(ar, ev, tpc_stats, + rate_code, pream_table, + WMI_TPC_TABLE_TYPE_TXBF); + + ath10k_debug_tpc_stats_process(ar, tpc_stats); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi event tpc config channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n", + __le32_to_cpu(ev->chan_freq), + __le32_to_cpu(ev->phy_mode), + __le32_to_cpu(ev->ctl), + __le32_to_cpu(ev->reg_domain), + a_sle32_to_cpu(ev->twice_antenna_gain), + __le32_to_cpu(ev->twice_antenna_reduction), + __le32_to_cpu(ev->power_limit), + __le32_to_cpu(ev->twice_max_rd_power) / 2, + __le32_to_cpu(ev->num_tx_chain), + __le32_to_cpu(ev->rate_max)); } void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb) @@ -5090,7 +5337,7 @@ static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar) config.rx_timeout_pri[2] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI); config.rx_timeout_pri[3] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_HI_PRI); - config.rx_decap_mode = __cpu_to_le32(TARGET_10_4_RX_DECAP_MODE); + config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode); config.scan_max_pending_req = __cpu_to_le32(TARGET_10_4_SCAN_MAX_REQS); config.bmiss_offload_max_vdev = __cpu_to_le32(TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV); @@ -6356,6 +6603,399 @@ ath10k_wmi_op_gen_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, return skb; } +static struct sk_buff * +ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k *ar, u32 param) +{ + struct wmi_pdev_get_tpc_config_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_pdev_get_tpc_config_cmd *)skb->data; + cmd->param = __cpu_to_le32(param); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi pdev get tcp config param:%d\n", param); + return skb; +} + +size_t ath10k_wmi_fw_stats_num_peers(struct list_head *head) +{ + struct ath10k_fw_stats_peer *i; + size_t num = 0; + + list_for_each_entry(i, head, list) + ++num; + + return num; +} + +size_t ath10k_wmi_fw_stats_num_vdevs(struct list_head *head) +{ + struct ath10k_fw_stats_vdev *i; + size_t num = 0; + + list_for_each_entry(i, head, list) + ++num; + + return num; +} + +static void +ath10k_wmi_fw_pdev_base_stats_fill(const struct ath10k_fw_stats_pdev *pdev, + char *buf, u32 *length) +{ + u32 len = *length; + u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; + + len += scnprintf(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, "%30s\n", + "ath10k PDEV stats"); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "================="); + + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Channel noise floor", pdev->ch_noise_floor); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "Channel TX power", pdev->chan_tx_power); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "TX frame count", pdev->tx_frame_count); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "RX frame count", pdev->rx_frame_count); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "RX clear count", pdev->rx_clear_count); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "Cycle count", pdev->cycle_count); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "PHY error count", pdev->phy_err_count); + + *length = len; +} + +static void +ath10k_wmi_fw_pdev_extra_stats_fill(const struct ath10k_fw_stats_pdev *pdev, + char *buf, u32 *length) +{ + u32 len = *length; + u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; + + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "RTS bad count", pdev->rts_bad); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "RTS good count", pdev->rts_good); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "FCS bad count", pdev->fcs_bad); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "No beacon count", pdev->no_beacons); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "MIB int count", pdev->mib_int_count); + + len += scnprintf(buf + len, buf_len - len, "\n"); + *length = len; +} + +static void +ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev *pdev, + char *buf, u32 *length) +{ + u32 len = *length; + u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; + + len += scnprintf(buf + len, buf_len - len, "\n%30s\n", + "ath10k PDEV TX stats"); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "================="); + + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "HTT cookies queued", pdev->comp_queued); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "HTT cookies disp.", pdev->comp_delivered); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MSDU queued", pdev->msdu_enqued); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDU queued", pdev->mpdu_enqued); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MSDUs dropped", pdev->wmm_drop); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Local enqued", pdev->local_enqued); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Local freed", pdev->local_freed); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "HW queued", pdev->hw_queued); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "PPDUs reaped", pdev->hw_reaped); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Num underruns", pdev->underrun); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "PPDUs cleaned", pdev->tx_abort); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDUs requed", pdev->mpdus_requed); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Excessive retries", pdev->tx_ko); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "HW rate", pdev->data_rc); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Sched self tiggers", pdev->self_triggers); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Dropped due to SW retries", + pdev->sw_retry_failure); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Illegal rate phy errors", + pdev->illgl_rate_phy_err); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Pdev continuous xretry", pdev->pdev_cont_xretry); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "TX timeout", pdev->pdev_tx_timeout); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "PDEV resets", pdev->pdev_resets); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "PHY underrun", pdev->phy_underrun); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDU is more than txop limit", pdev->txop_ovf); + *length = len; +} + +static void +ath10k_wmi_fw_pdev_rx_stats_fill(const struct ath10k_fw_stats_pdev *pdev, + char *buf, u32 *length) +{ + u32 len = *length; + u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; + + len += scnprintf(buf + len, buf_len - len, "\n%30s\n", + "ath10k PDEV RX stats"); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "================="); + + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Mid PPDU route change", + pdev->mid_ppdu_route_change); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Tot. number of statuses", pdev->status_rcvd); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Extra frags on rings 0", pdev->r0_frags); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Extra frags on rings 1", pdev->r1_frags); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Extra frags on rings 2", pdev->r2_frags); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Extra frags on rings 3", pdev->r3_frags); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MSDUs delivered to HTT", pdev->htt_msdus); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDUs delivered to HTT", pdev->htt_mpdus); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MSDUs delivered to stack", pdev->loc_msdus); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDUs delivered to stack", pdev->loc_mpdus); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Oversized AMSUs", pdev->oversize_amsdu); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "PHY errors", pdev->phy_errs); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "PHY errors drops", pdev->phy_err_drop); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs); + *length = len; +} + +static void +ath10k_wmi_fw_vdev_stats_fill(const struct ath10k_fw_stats_vdev *vdev, + char *buf, u32 *length) +{ + u32 len = *length; + u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; + int i; + + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "vdev id", vdev->vdev_id); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "beacon snr", vdev->beacon_snr); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "data snr", vdev->data_snr); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "num rx frames", vdev->num_rx_frames); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "num rts fail", vdev->num_rts_fail); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "num rts success", vdev->num_rts_success); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "num rx err", vdev->num_rx_err); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "num rx discard", vdev->num_rx_discard); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "num tx not acked", vdev->num_tx_not_acked); + + for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++) + len += scnprintf(buf + len, buf_len - len, + "%25s [%02d] %u\n", + "num tx frames", i, + vdev->num_tx_frames[i]); + + for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++) + len += scnprintf(buf + len, buf_len - len, + "%25s [%02d] %u\n", + "num tx frames retries", i, + vdev->num_tx_frames_retries[i]); + + for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++) + len += scnprintf(buf + len, buf_len - len, + "%25s [%02d] %u\n", + "num tx frames failures", i, + vdev->num_tx_frames_failures[i]); + + for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++) + len += scnprintf(buf + len, buf_len - len, + "%25s [%02d] 0x%08x\n", + "tx rate history", i, + vdev->tx_rate_history[i]); + + for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++) + len += scnprintf(buf + len, buf_len - len, + "%25s [%02d] %u\n", + "beacon rssi history", i, + vdev->beacon_rssi_history[i]); + + len += scnprintf(buf + len, buf_len - len, "\n"); + *length = len; +} + +static void +ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer *peer, + char *buf, u32 *length) +{ + u32 len = *length; + u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; + + len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", + "Peer MAC address", peer->peer_macaddr); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "Peer RSSI", peer->peer_rssi); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "Peer TX rate", peer->peer_tx_rate); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "Peer RX rate", peer->peer_rx_rate); + len += scnprintf(buf + len, buf_len - len, "\n"); + *length = len; +} + +void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar, + struct ath10k_fw_stats *fw_stats, + char *buf) +{ + u32 len = 0; + u32 buf_len = ATH10K_FW_STATS_BUF_SIZE; + const struct ath10k_fw_stats_pdev *pdev; + const struct ath10k_fw_stats_vdev *vdev; + const struct ath10k_fw_stats_peer *peer; + size_t num_peers; + size_t num_vdevs; + + spin_lock_bh(&ar->data_lock); + + pdev = list_first_entry_or_null(&fw_stats->pdevs, + struct ath10k_fw_stats_pdev, list); + if (!pdev) { + ath10k_warn(ar, "failed to get pdev stats\n"); + goto unlock; + } + + num_peers = ath10k_wmi_fw_stats_num_peers(&fw_stats->peers); + num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&fw_stats->vdevs); + + ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len); + ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len); + ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len); + + len += scnprintf(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", + "ath10k VDEV stats", num_vdevs); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "================="); + + list_for_each_entry(vdev, &fw_stats->vdevs, list) { + ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len); + } + + len += scnprintf(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", + "ath10k PEER stats", num_peers); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "================="); + + list_for_each_entry(peer, &fw_stats->peers, list) { + ath10k_wmi_fw_peer_stats_fill(peer, buf, &len); + } + +unlock: + spin_unlock_bh(&ar->data_lock); + + if (len >= buf_len) + buf[len - 1] = 0; + else + buf[len] = 0; +} + +void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar, + struct ath10k_fw_stats *fw_stats, + char *buf) +{ + unsigned int len = 0; + unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE; + const struct ath10k_fw_stats_pdev *pdev; + const struct ath10k_fw_stats_vdev *vdev; + const struct ath10k_fw_stats_peer *peer; + size_t num_peers; + size_t num_vdevs; + + spin_lock_bh(&ar->data_lock); + + pdev = list_first_entry_or_null(&fw_stats->pdevs, + struct ath10k_fw_stats_pdev, list); + if (!pdev) { + ath10k_warn(ar, "failed to get pdev stats\n"); + goto unlock; + } + + num_peers = ath10k_wmi_fw_stats_num_peers(&fw_stats->peers); + num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&fw_stats->vdevs); + + ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len); + ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len); + ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len); + ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len); + + len += scnprintf(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", + "ath10k VDEV stats", num_vdevs); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "================="); + + list_for_each_entry(vdev, &fw_stats->vdevs, list) { + ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len); + } + + len += scnprintf(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", + "ath10k PEER stats", num_peers); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "================="); + + list_for_each_entry(peer, &fw_stats->peers, list) { + ath10k_wmi_fw_peer_stats_fill(peer, buf, &len); + } + +unlock: + spin_unlock_bh(&ar->data_lock); + + if (len >= buf_len) + buf[len - 1] = 0; + else + buf[len] = 0; +} + static const struct wmi_ops wmi_ops = { .rx = ath10k_wmi_op_rx, .map_svc = wmi_main_svc_map, @@ -6414,6 +7054,7 @@ static const struct wmi_ops wmi_ops = { .gen_addba_send = ath10k_wmi_op_gen_addba_send, .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, .gen_delba_send = ath10k_wmi_op_gen_delba_send, + .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill, /* .gen_bcn_tmpl not implemented */ /* .gen_prb_tmpl not implemented */ /* .gen_p2p_go_bcn_ie not implemented */ @@ -6479,6 +7120,7 @@ static const struct wmi_ops wmi_10_1_ops = { .gen_addba_send = ath10k_wmi_op_gen_addba_send, .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, .gen_delba_send = ath10k_wmi_op_gen_delba_send, + .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill, /* .gen_bcn_tmpl not implemented */ /* .gen_prb_tmpl not implemented */ /* .gen_p2p_go_bcn_ie not implemented */ @@ -6545,6 +7187,7 @@ static const struct wmi_ops wmi_10_2_ops = { .gen_addba_send = ath10k_wmi_op_gen_addba_send, .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, .gen_delba_send = ath10k_wmi_op_gen_delba_send, + .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill, }; static const struct wmi_ops wmi_10_2_4_ops = { @@ -6606,6 +7249,8 @@ static const struct wmi_ops wmi_10_2_4_ops = { .gen_addba_send = ath10k_wmi_op_gen_addba_send, .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, .gen_delba_send = ath10k_wmi_op_gen_delba_send, + .gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config, + .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill, /* .gen_bcn_tmpl not implemented */ /* .gen_prb_tmpl not implemented */ /* .gen_p2p_go_bcn_ie not implemented */ diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 3e5a1591f772..6e84d1c1a6ca 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -73,6 +73,25 @@ struct wmi_cmd_hdr { #define HTC_PROTOCOL_VERSION 0x0002 #define WMI_PROTOCOL_VERSION 0x0002 +/* + * There is no signed version of __le32, so for a temporary solution come + * up with our own version. The idea is from fs/ntfs/types.h. + * + * Use a_ prefix so that it doesn't conflict if we get proper support to + * linux/types.h. + */ +typedef __s32 __bitwise a_sle32; + +static inline a_sle32 a_cpu_to_sle32(s32 val) +{ + return (__force a_sle32)cpu_to_le32(val); +} + +static inline s32 a_sle32_to_cpu(a_sle32 val) +{ + return le32_to_cpu((__force __le32)val); +} + enum wmi_service { WMI_SERVICE_BEACON_OFFLOAD = 0, WMI_SERVICE_SCAN_OFFLOAD, @@ -3642,8 +3661,18 @@ struct wmi_pdev_get_tpc_config_cmd { __le32 param; } __packed; +#define WMI_TPC_CONFIG_PARAM 1 #define WMI_TPC_RATE_MAX 160 #define WMI_TPC_TX_N_CHAIN 4 +#define WMI_TPC_PREAM_TABLE_MAX 10 +#define WMI_TPC_FLAG 3 +#define WMI_TPC_BUF_SIZE 10 + +enum wmi_tpc_table_type { + WMI_TPC_TABLE_TYPE_CDD = 0, + WMI_TPC_TABLE_TYPE_STBC = 1, + WMI_TPC_TABLE_TYPE_TXBF = 2, +}; enum wmi_tpc_config_event_flag { WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD = 0x1, @@ -3657,7 +3686,7 @@ struct wmi_pdev_tpc_config_event { __le32 phy_mode; __le32 twice_antenna_reduction; __le32 twice_max_rd_power; - s32 twice_antenna_gain; + a_sle32 twice_antenna_gain; __le32 power_limit; __le32 rate_max; __le32 num_tx_chain; @@ -4253,6 +4282,11 @@ enum wmi_rate_preamble { WMI_RATE_PREAMBLE_VHT, }; +#define ATH10K_HW_NSS(rate) (1 + (((rate) >> 4) & 0x3)) +#define ATH10K_HW_PREAMBLE(rate) (((rate) >> 6) & 0x3) +#define ATH10K_HW_RATECODE(rate, nss, preamble) \ + (((preamble) << 6) | ((nss) << 4) | (rate)) + /* Value to disable fixed rate setting */ #define WMI_FIXED_RATE_NONE (0xff) @@ -6064,6 +6098,7 @@ struct ath10k; struct ath10k_vif; struct ath10k_fw_stats_pdev; struct ath10k_fw_stats_peer; +struct ath10k_fw_stats; int ath10k_wmi_attach(struct ath10k *ar); void ath10k_wmi_detach(struct ath10k *ar); @@ -6145,4 +6180,13 @@ void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb); int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb); int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, const void *phyerr_buf, int left_len, struct wmi_phyerr_ev_arg *arg); +void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar, + struct ath10k_fw_stats *fw_stats, + char *buf); +void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar, + struct ath10k_fw_stats *fw_stats, + char *buf); +size_t ath10k_wmi_fw_stats_num_peers(struct list_head *head); +size_t ath10k_wmi_fw_stats_num_vdevs(struct list_head *head); + #endif /* _WMI_H_ */ diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index 3fda750db2a9..76b682e566ab 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -2217,7 +2217,7 @@ static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow) /* enter / leave wow suspend on first vif always */ first_vif = ath6kl_vif_first(ar); - if (WARN_ON(unlikely(!first_vif)) || + if (WARN_ON(!first_vif) || !ath6kl_cfg80211_ready(first_vif)) return -EIO; @@ -2297,7 +2297,7 @@ static int ath6kl_wow_resume(struct ath6kl *ar) int ret; vif = ath6kl_vif_first(ar); - if (WARN_ON(unlikely(!vif)) || + if (WARN_ON(!vif) || !ath6kl_cfg80211_ready(vif)) return -EIO; diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c index e481f14b9878..fffb65b3e652 100644 --- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c +++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c @@ -1085,9 +1085,7 @@ static int htc_setup_tx_complete(struct htc_target *target) send_pkt->completion = NULL; ath6kl_htc_tx_prep_pkt(send_pkt, 0, 0, 0); status = ath6kl_htc_tx_issue(target, send_pkt); - - if (send_pkt != NULL) - htc_reclaim_txctrl_buf(target, send_pkt); + htc_reclaim_txctrl_buf(target, send_pkt); return status; } diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h index 6314ae2e93e3..9d17a5375f64 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h @@ -610,8 +610,8 @@ #define AR_PHY_CCA_MIN_GOOD_VAL_9271_2GHZ -127 #define AR_PHY_CCA_MAX_GOOD_VAL_9271_2GHZ -116 -#define AR_PHY_CCA_NOM_VAL_9287_2GHZ -120 +#define AR_PHY_CCA_NOM_VAL_9287_2GHZ -112 #define AR_PHY_CCA_MIN_GOOD_VAL_9287_2GHZ -127 -#define AR_PHY_CCA_MAX_GOOD_VAL_9287_2GHZ -110 +#define AR_PHY_CCA_MAX_GOOD_VAL_9287_2GHZ -97 #endif diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c index 79fd3b2dcbde..8b238c15916d 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c @@ -857,7 +857,7 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah) qca956x_1p0_common_rx_gain_table); INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds, qca956x_1p0_common_rx_gain_bounds); - INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna, + INIT_INI_ARRAY(&ah->ini_modes_rxgain_xlna, qca956x_1p0_xlna_only); } else if (AR_SREV_9580(ah)) INIT_INI_ARRAY(&ah->iniModesRxGain, @@ -942,7 +942,7 @@ static void ar9003_rx_gain_table_mode2(struct ath_hw *ah) ar9462_2p1_baseband_core_mix_rxgain); INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_postamble, ar9462_2p1_baseband_postamble_mix_rxgain); - INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna, + INIT_INI_ARRAY(&ah->ini_modes_rxgain_xlna, ar9462_2p1_baseband_postamble_5g_xlna); } else if (AR_SREV_9462_20(ah)) { INIT_INI_ARRAY(&ah->iniModesRxGain, @@ -951,7 +951,7 @@ static void ar9003_rx_gain_table_mode2(struct ath_hw *ah) ar9462_2p0_baseband_core_mix_rxgain); INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_postamble, ar9462_2p0_baseband_postamble_mix_rxgain); - INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna, + INIT_INI_ARRAY(&ah->ini_modes_rxgain_xlna, ar9462_2p0_baseband_postamble_5g_xlna); } } @@ -961,12 +961,12 @@ static void ar9003_rx_gain_table_mode3(struct ath_hw *ah) if (AR_SREV_9462_21(ah)) { INIT_INI_ARRAY(&ah->iniModesRxGain, ar9462_2p1_common_5g_xlna_only_rxgain); - INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna, + INIT_INI_ARRAY(&ah->ini_modes_rxgain_xlna, ar9462_2p1_baseband_postamble_5g_xlna); } else if (AR_SREV_9462_20(ah)) { INIT_INI_ARRAY(&ah->iniModesRxGain, ar9462_2p0_common_5g_xlna_only_rxgain); - INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna, + INIT_INI_ARRAY(&ah->ini_modes_rxgain_xlna, ar9462_2p0_baseband_postamble_5g_xlna); } } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 1ad66b76749b..201425e7f9cb 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -926,19 +926,18 @@ static int ar9003_hw_process_ini(struct ath_hw *ah, */ if ((ar9003_hw_get_rx_gain_idx(ah) == 2) || (ar9003_hw_get_rx_gain_idx(ah) == 3)) { - REG_WRITE_ARRAY(&ah->ini_modes_rxgain_5g_xlna, + REG_WRITE_ARRAY(&ah->ini_modes_rxgain_xlna, modesIndex, regWrites); } - - if (AR_SREV_9561(ah) && (ar9003_hw_get_rx_gain_idx(ah) == 0)) - REG_WRITE_ARRAY(&ah->ini_modes_rxgain_5g_xlna, - modesIndex, regWrites); } if (AR_SREV_9550(ah) || AR_SREV_9561(ah)) REG_WRITE_ARRAY(&ah->ini_modes_rx_gain_bounds, modesIndex, regWrites); + if (AR_SREV_9561(ah) && (ar9003_hw_get_rx_gain_idx(ah) == 0)) + REG_WRITE_ARRAY(&ah->ini_modes_rxgain_xlna, + modesIndex, regWrites); /* * TXGAIN initvals. */ diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index e8454db17634..4f0a3f6b0c52 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -919,7 +919,7 @@ struct ath_hw { struct ar5416IniArray iniCckfirJapan2484; struct ar5416IniArray iniModes_9271_ANI_reg; struct ar5416IniArray ini_radio_post_sys2ant; - struct ar5416IniArray ini_modes_rxgain_5g_xlna; + struct ar5416IniArray ini_modes_rxgain_xlna; struct ar5416IniArray ini_modes_rxgain_bb_core; struct ar5416IniArray ini_modes_rxgain_bb_postamble; diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 5d532c7b813f..2e2b92ba96b8 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -881,6 +881,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) hw->max_rate_tries = 10; hw->sta_data_size = sizeof(struct ath_node); hw->vif_data_size = sizeof(struct ath_vif); + hw->extra_tx_headroom = 4; hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1; hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1; diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c index 9d687121b2bf..2303ef96299d 100644 --- a/drivers/net/wireless/ath/dfs_pattern_detector.c +++ b/drivers/net/wireless/ath/dfs_pattern_detector.c @@ -284,10 +284,10 @@ dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event) if (cd == NULL) return false; - dpd->last_pulse_ts = event->ts; /* reset detector on time stamp wraparound, caused by TSF reset */ if (event->ts < dpd->last_pulse_ts) dpd_reset(dpd); + dpd->last_pulse_ts = event->ts; /* do type individual pattern matching */ for (i = 0; i < dpd->num_radar_types; i++) { diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig index ce8c0381825e..6dfedc8bd6a3 100644 --- a/drivers/net/wireless/ath/wil6210/Kconfig +++ b/drivers/net/wireless/ath/wil6210/Kconfig @@ -1,5 +1,6 @@ config WIL6210 tristate "Wilocity 60g WiFi card wil6210 support" + select WANT_DEV_COREDUMP depends on CFG80211 depends on PCI default n diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile index 64b432625fbb..fdf63d5fe82b 100644 --- a/drivers/net/wireless/ath/wil6210/Makefile +++ b/drivers/net/wireless/ath/wil6210/Makefile @@ -17,6 +17,7 @@ wil6210-y += pmc.o wil6210-$(CONFIG_WIL6210_TRACING) += trace.o wil6210-y += wil_platform.o wil6210-y += ethtool.o +wil6210-y += wil_crash_dump.o # for tracing framework to find trace.h CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index d1a1e160ef31..97bc186f9728 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1373,6 +1373,12 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) } } spin_unlock_bh(&p->tid_rx_lock); + seq_printf(s, + "Rx invalid frame: non-data %lu, short %lu, large %lu\n", + p->stats.rx_non_data_frame, + p->stats.rx_short_frame, + p->stats.rx_large_frame); + seq_puts(s, "Rx/MCS:"); for (mcs = 0; mcs < ARRAY_SIZE(p->stats.rx_per_mcs); mcs++) diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index a371f036d054..06fc46f85c85 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -347,7 +347,12 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie) wil6210_mask_irq_misc(wil); if (isr & ISR_MISC_FW_ERROR) { - wil_err(wil, "Firmware error detected\n"); + u32 fw_assert_code = wil_r(wil, RGF_FW_ASSERT_CODE); + u32 ucode_assert_code = wil_r(wil, RGF_UCODE_ASSERT_CODE); + + wil_err(wil, + "Firmware error detected, assert codes FW 0x%08x, UCODE 0x%08x\n", + fw_assert_code, ucode_assert_code); clear_bit(wil_status_fwready, wil->status); /* * do not clear @isr here - we do 2-nd part in thread @@ -386,6 +391,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie) wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr); if (isr & ISR_MISC_FW_ERROR) { + wil_fw_core_dump(wil); wil_notify_fw_error(wil); isr &= ~ISR_MISC_FW_ERROR; wil_fw_error_recovery(wil); diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 2fb04c51da53..aade16b126c4 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -203,11 +203,13 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, * - disconnect single STA, already disconnected * - disconnect all * - * For "disconnect all", there are 2 options: + * For "disconnect all", there are 3 options: * - bssid == NULL + * - bssid is broadcast address (ff:ff:ff:ff:ff:ff) * - bssid is our MAC address */ - if (bssid && memcmp(ndev->dev_addr, bssid, ETH_ALEN)) { + if (bssid && !is_broadcast_ether_addr(bssid) && + !ether_addr_equal_unaligned(ndev->dev_addr, bssid)) { cid = wil_find_cid(wil, bssid); wil_dbg_misc(wil, "Disconnect %pM, CID=%d, reason=%d\n", bssid, cid, reason_code); @@ -765,6 +767,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) if (wil->hw_version == HW_VER_UNKNOWN) return -ENODEV; + set_bit(wil_status_resetting, wil->status); + cancel_work_sync(&wil->disconnect_worker); wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false); wil_bcast_fini(wil); @@ -851,6 +855,12 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) void wil_fw_error_recovery(struct wil6210_priv *wil) { wil_dbg_misc(wil, "starting fw error recovery\n"); + + if (test_bit(wil_status_resetting, wil->status)) { + wil_info(wil, "Reset already in progress\n"); + return; + } + wil->recovery_state = fw_recovery_pending; schedule_work(&wil->fw_error_worker); } diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index feff1ef10fb3..1a3142c332e1 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -260,6 +260,7 @@ static const struct pci_device_id wil6210_pcie_ids[] = { MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids); #ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int wil6210_suspend(struct device *dev, bool is_runtime) { @@ -307,7 +308,6 @@ static int wil6210_resume(struct device *dev, bool is_runtime) return rc; } -#ifdef CONFIG_PM_SLEEP static int wil6210_pm_suspend(struct device *dev) { return wil6210_suspend(dev, false); diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c index 8a8cdc61b25b..5ca0307a3274 100644 --- a/drivers/net/wireless/ath/wil6210/pmc.c +++ b/drivers/net/wireless/ath/wil6210/pmc.c @@ -110,7 +110,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil, */ for (i = 0; i < num_descriptors; i++) { struct vring_tx_desc *_d = &pmc->pring_va[i]; - struct vring_tx_desc dd, *d = ⅆ + struct vring_tx_desc dd = {}, *d = ⅆ int j = 0; pmc->descriptors[i].va = dma_alloc_coherent(dev, diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c index 9238c1ac23dd..e3d1be82f314 100644 --- a/drivers/net/wireless/ath/wil6210/rx_reorder.c +++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c @@ -205,6 +205,32 @@ out: spin_unlock(&sta->tid_rx_lock); } +/* process BAR frame, called in NAPI context */ +void wil_rx_bar(struct wil6210_priv *wil, u8 cid, u8 tid, u16 seq) +{ + struct wil_sta_info *sta = &wil->sta[cid]; + struct wil_tid_ampdu_rx *r; + + spin_lock(&sta->tid_rx_lock); + + r = sta->tid_rx[tid]; + if (!r) { + wil_err(wil, "BAR for non-existing CID %d TID %d\n", cid, tid); + goto out; + } + if (seq_less(seq, r->head_seq_num)) { + wil_err(wil, "BAR Seq 0x%03x preceding head 0x%03x\n", + seq, r->head_seq_num); + goto out; + } + wil_dbg_txrx(wil, "BAR: CID %d TID %d Seq 0x%03x head 0x%03x\n", + cid, tid, seq, r->head_seq_num); + wil_release_reorder_frames(wil, r, seq); + +out: + spin_unlock(&sta->tid_rx_lock); +} + struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil, int size, u16 ssn) { diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 6229110d558a..0f8b6877497e 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -358,6 +358,13 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil, } } +/* similar to ieee80211_ version, but FC contain only 1-st byte */ +static inline int wil_is_back_req(u8 fc) +{ + return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ); +} + /** * reap 1 frame from @swhead * @@ -379,14 +386,16 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, u16 dmalen; u8 ftype; int cid; - int i = (int)vring->swhead; + int i; struct wil_net_stats *stats; BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb)); +again: if (unlikely(wil_vring_is_empty(vring))) return NULL; + i = (int)vring->swhead; _d = &vring->va[i].rx; if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) { /* it is not error, we just reached end of Rx done area */ @@ -398,7 +407,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, wil_vring_advance_head(vring, 1); if (!skb) { wil_err(wil, "No Rx skb at [%d]\n", i); - return NULL; + goto again; } d = wil_skb_rxdesc(skb); *d = *_d; @@ -409,13 +418,17 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, trace_wil6210_rx(i, d); wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen); - wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4, + wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4, (const void *)d, sizeof(*d), false); + cid = wil_rxdesc_cid(d); + stats = &wil->sta[cid].stats; + if (unlikely(dmalen > sz)) { wil_err(wil, "Rx size too large: %d bytes!\n", dmalen); + stats->rx_large_frame++; kfree_skb(skb); - return NULL; + goto again; } skb_trim(skb, dmalen); @@ -424,8 +437,6 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, skb->data, skb_headlen(skb), false); - cid = wil_rxdesc_cid(d); - stats = &wil->sta[cid].stats; stats->last_mcs_rx = wil_rxdesc_mcs(d); if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs)) stats->rx_per_mcs[stats->last_mcs_rx]++; @@ -437,24 +448,47 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, /* no extra checks if in sniffer mode */ if (ndev->type != ARPHRD_ETHER) return skb; - /* - * Non-data frames may be delivered through Rx DMA channel (ex: BAR) + /* Non-data frames may be delivered through Rx DMA channel (ex: BAR) * Driver should recognize it by frame type, that is found * in Rx descriptor. If type is not data, it is 802.11 frame as is */ ftype = wil_rxdesc_ftype(d) << 2; if (unlikely(ftype != IEEE80211_FTYPE_DATA)) { - wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype); - /* TODO: process it */ + u8 fc1 = wil_rxdesc_fc1(d); + int mid = wil_rxdesc_mid(d); + int tid = wil_rxdesc_tid(d); + u16 seq = wil_rxdesc_seq(d); + + wil_dbg_txrx(wil, + "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n", + fc1, mid, cid, tid, seq); + stats->rx_non_data_frame++; + if (wil_is_back_req(fc1)) { + wil_dbg_txrx(wil, + "BAR: MID %d CID %d TID %d Seq 0x%03x\n", + mid, cid, tid, seq); + wil_rx_bar(wil, cid, tid, seq); + } else { + /* print again all info. One can enable only this + * without overhead for printing every Rx frame + */ + wil_dbg_txrx(wil, + "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n", + fc1, mid, cid, tid, seq); + wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4, + (const void *)d, sizeof(*d), false); + wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, + skb->data, skb_headlen(skb), false); + } kfree_skb(skb); - return NULL; + goto again; } if (unlikely(skb->len < ETH_HLEN + snaplen)) { wil_err(wil, "Short frame, len = %d\n", skb->len); - /* TODO: process it (i.e. BAR) */ + stats->rx_short_frame++; kfree_skb(skb); - return NULL; + goto again; } /* L4 IDENT is on when HW calculated checksum, check status @@ -1633,7 +1667,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) goto drop; } if (unlikely(!test_bit(wil_status_fwconnected, wil->status))) { - wil_err(wil, "FW not connected\n"); + wil_err_ratelimited(wil, "FW not connected\n"); goto drop; } if (unlikely(wil->wdev->iftype == NL80211_IFTYPE_MONITOR)) { diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h index 82a8f9a030e7..ee7c7b4b9a17 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.h +++ b/drivers/net/wireless/ath/wil6210/txrx.h @@ -464,6 +464,12 @@ static inline int wil_rxdesc_subtype(struct vring_rx_desc *d) return WIL_GET_BITS(d->mac.d0, 12, 15); } +/* 1-st byte (with frame type/subtype) of FC field */ +static inline u8 wil_rxdesc_fc1(struct vring_rx_desc *d) +{ + return (u8)(WIL_GET_BITS(d->mac.d0, 10, 15) << 2); +} + static inline int wil_rxdesc_seq(struct vring_rx_desc *d) { return WIL_GET_BITS(d->mac.d0, 16, 27); @@ -501,6 +507,7 @@ static inline struct vring_rx_desc *wil_skb_rxdesc(struct sk_buff *skb) void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev); void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb); +void wil_rx_bar(struct wil6210_priv *wil, u8 cid, u8 tid, u16 seq); struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil, int size, u16 ssn); void wil_tid_ampdu_rx_free(struct wil6210_priv *wil, diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index dd4ea926b8e3..f619bf234353 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -246,6 +246,10 @@ struct RGF_ICR { #define RGF_USER_JTAG_DEV_ID (0x880b34) /* device ID */ #define JTAG_DEV_ID_SPARROW_B0 (0x2632072f) +/* crash codes for FW/Ucode stored here */ +#define RGF_FW_ASSERT_CODE (0x91f020) +#define RGF_UCODE_ASSERT_CODE (0x91f028) + enum { HW_VER_UNKNOWN, HW_VER_SPARROW_B0, /* JTAG_DEV_ID_SPARROW_B0 */ @@ -405,6 +409,7 @@ enum { /* for wil6210_priv.status */ wil_status_reset_done, wil_status_irqen, /* FIXME: interrupts enabled - for debug */ wil_status_napi_en, /* NAPI enabled protected by wil->mutex */ + wil_status_resetting, /* reset in progress */ wil_status_last /* keep last */ }; @@ -465,6 +470,9 @@ struct wil_net_stats { unsigned long tx_bytes; unsigned long tx_errors; unsigned long rx_dropped; + unsigned long rx_non_data_frame; + unsigned long rx_short_frame; + unsigned long rx_large_frame; u16 last_mcs_rx; u64 rx_per_mcs[WIL_MCS_MAX + 1]; }; @@ -820,4 +828,6 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime); int wil_suspend(struct wil6210_priv *wil, bool is_runtime); int wil_resume(struct wil6210_priv *wil, bool is_runtime); +void wil_fw_core_dump(struct wil6210_priv *wil); + #endif /* __WIL6210_H__ */ diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c new file mode 100644 index 000000000000..7e70934990ae --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2015 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wil6210.h" +#include <linux/devcoredump.h> + +static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil, + u32 *out_dump_size, u32 *out_host_min) +{ + int i; + const struct fw_map *map; + u32 host_min, host_max, tmp_max; + + if (!out_dump_size) + return -EINVAL; + + /* calculate the total size of the unpacked crash dump */ + BUILD_BUG_ON(ARRAY_SIZE(fw_mapping) == 0); + map = &fw_mapping[0]; + host_min = map->host; + host_max = map->host + (map->to - map->from); + + for (i = 1; i < ARRAY_SIZE(fw_mapping); i++) { + map = &fw_mapping[i]; + + if (map->host < host_min) + host_min = map->host; + + tmp_max = map->host + (map->to - map->from); + if (tmp_max > host_max) + host_max = tmp_max; + } + + *out_dump_size = host_max - host_min; + if (out_host_min) + *out_host_min = host_min; + + return 0; +} + +static int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, + u32 size) +{ + int i; + const struct fw_map *map; + void *data; + u32 host_min, dump_size, offset, len; + + if (wil_fw_get_crash_dump_bounds(wil, &dump_size, &host_min)) { + wil_err(wil, "%s: fail to obtain crash dump size\n", __func__); + return -EINVAL; + } + + if (dump_size > size) { + wil_err(wil, "%s: not enough space for dump. Need %d have %d\n", + __func__, dump_size, size); + return -EINVAL; + } + + /* copy to crash dump area */ + for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) { + map = &fw_mapping[i]; + + data = (void * __force)wil->csr + HOSTADDR(map->host); + len = map->to - map->from; + offset = map->host - host_min; + + wil_dbg_misc(wil, "%s() - dump %s, size %d, offset %d\n", + __func__, fw_mapping[i].name, len, offset); + + wil_memcpy_fromio_32((void * __force)(dest + offset), + (const void __iomem * __force)data, len); + } + + return 0; +} + +void wil_fw_core_dump(struct wil6210_priv *wil) +{ + void *fw_dump_data; + u32 fw_dump_size; + + if (wil_fw_get_crash_dump_bounds(wil, &fw_dump_size, NULL)) { + wil_err(wil, "%s: fail to get fw dump size\n", __func__); + return; + } + + fw_dump_data = vzalloc(fw_dump_size); + if (!fw_dump_data) + return; + + if (wil_fw_copy_crash_dump(wil, fw_dump_data, fw_dump_size)) { + vfree(fw_dump_data); + return; + } + /* fw_dump_data will be free in device coredump release function + * after 5 min + */ + dev_coredumpv(wil_to_dev(wil), fw_dump_data, fw_dump_size, GFP_KERNEL); + wil_info(wil, "%s: fw core dumped, size %d bytes\n", __func__, + fw_dump_size); +} diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 2f35d4c51f34..61121892c6ca 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -1120,7 +1120,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring) cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP); cmd.sniffer_cfg.phy_support = cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL) - ? WMI_SNIFFER_CP : WMI_SNIFFER_DP); + ? WMI_SNIFFER_CP : WMI_SNIFFER_BOTH_PHYS); } else { /* Initialize offload (in non-sniffer mode). * Linux IP stack always calculates IP checksum diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 28490702124a..71d3e9adbf3c 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -120,6 +120,7 @@ MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if over #ifdef CONFIG_B43_BCMA static const struct bcma_device_id b43_bcma_tbl[] = { BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x11, BCMA_ANY_CLASS), + BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x15, BCMA_ANY_CLASS), BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x17, BCMA_ANY_CLASS), BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x18, BCMA_ANY_CLASS), BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1C, BCMA_ANY_CLASS), diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig index fe3dc126b149..ab42b1fea03c 100644 --- a/drivers/net/wireless/brcm80211/Kconfig +++ b/drivers/net/wireless/brcm80211/Kconfig @@ -82,5 +82,6 @@ config BRCM_TRACING config BRCMDBG bool "Broadcom driver debug functions" depends on BRCMSMAC || BRCMFMAC + select WANT_DEV_COREDUMP ---help--- Selecting this enables additional code for debug purposes. diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/brcm80211/brcmfmac/bus.h index 89e6a4dc105e..230cad788ace 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bus.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/bus.h @@ -65,6 +65,8 @@ struct brcmf_bus_dcmd { * @rxctl: receive a control response message from dongle. * @gettxq: obtain a reference of bus transmit queue (optional). * @wowl_config: specify if dongle is configured for wowl when going to suspend + * @get_ramsize: obtain size of device memory. + * @get_memdump: obtain device memory dump in provided buffer. * * This structure provides an abstract interface towards the * bus specific driver. For control messages to common driver @@ -79,6 +81,8 @@ struct brcmf_bus_ops { int (*rxctl)(struct device *dev, unsigned char *msg, uint len); struct pktq * (*gettxq)(struct device *dev); void (*wowl_config)(struct device *dev, bool enabled); + size_t (*get_ramsize)(struct device *dev); + int (*get_memdump)(struct device *dev, void *data, size_t len); }; @@ -185,6 +189,23 @@ void brcmf_bus_wowl_config(struct brcmf_bus *bus, bool enabled) bus->ops->wowl_config(bus->dev, enabled); } +static inline size_t brcmf_bus_get_ramsize(struct brcmf_bus *bus) +{ + if (!bus->ops->get_ramsize) + return 0; + + return bus->ops->get_ramsize(bus->dev); +} + +static inline +int brcmf_bus_get_memdump(struct brcmf_bus *bus, void *data, size_t len) +{ + if (!bus->ops->get_memdump) + return -EOPNOTSUPP; + + return bus->ops->get_memdump(bus->dev, data, len); +} + /* * interface functions from common layer */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c index 891f4ed8c5e3..deb5f78dcacc 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c @@ -840,7 +840,6 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev, err = brcmf_p2p_ifchange(cfg, BRCMF_FIL_P2P_IF_GO); } if (!err) { - set_bit(BRCMF_VIF_STATUS_AP_CREATING, &vif->sme_state); brcmf_dbg(INFO, "IF Type = AP\n"); } } else { @@ -2432,6 +2431,9 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, struct brcmf_sta_info_le sta_info_le; u32 sta_flags; u32 is_tdls_peer; + s32 total_rssi; + s32 count_rssi; + u32 i; brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac); if (!check_vif_up(ifp->vif)) @@ -2478,13 +2480,13 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, sinfo->rx_packets += le32_to_cpu(sta_info_le.rx_mcast_pkts); if (sinfo->tx_packets) { sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE); - sinfo->txrate.legacy = le32_to_cpu(sta_info_le.tx_rate); - sinfo->txrate.legacy /= 100; + sinfo->txrate.legacy = + le32_to_cpu(sta_info_le.tx_rate) / 100; } if (sinfo->rx_packets) { sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE); - sinfo->rxrate.legacy = le32_to_cpu(sta_info_le.rx_rate); - sinfo->rxrate.legacy /= 100; + sinfo->rxrate.legacy = + le32_to_cpu(sta_info_le.rx_rate) / 100; } if (le16_to_cpu(sta_info_le.ver) >= 4) { sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES); @@ -2492,12 +2494,61 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES); sinfo->rx_bytes = le64_to_cpu(sta_info_le.rx_tot_bytes); } + total_rssi = 0; + count_rssi = 0; + for (i = 0; i < BRCMF_ANT_MAX; i++) { + if (sta_info_le.rssi[i]) { + sinfo->chain_signal_avg[count_rssi] = + sta_info_le.rssi[i]; + sinfo->chain_signal[count_rssi] = + sta_info_le.rssi[i]; + total_rssi += sta_info_le.rssi[i]; + count_rssi++; + } + } + if (count_rssi) { + sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL); + sinfo->chains = count_rssi; + + sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); + total_rssi /= count_rssi; + sinfo->signal = total_rssi; + } } done: brcmf_dbg(TRACE, "Exit\n"); return err; } +static int +brcmf_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *ndev, + int idx, u8 *mac, struct station_info *sinfo) +{ + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct brcmf_if *ifp = netdev_priv(ndev); + s32 err; + + brcmf_dbg(TRACE, "Enter, idx %d\n", idx); + + if (idx == 0) { + cfg->assoclist.count = cpu_to_le32(BRCMF_MAX_ASSOCLIST); + err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_ASSOCLIST, + &cfg->assoclist, + sizeof(cfg->assoclist)); + if (err) { + brcmf_err("BRCMF_C_GET_ASSOCLIST unsupported, err=%d\n", + err); + cfg->assoclist.count = 0; + return -EOPNOTSUPP; + } + } + if (idx < le32_to_cpu(cfg->assoclist.count)) { + memcpy(mac, cfg->assoclist.mac[idx], ETH_ALEN); + return brcmf_cfg80211_get_station(wiphy, ndev, mac, sinfo); + } + return -ENOENT; +} + static s32 brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev, bool enabled, s32 timeout) @@ -4199,8 +4250,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, brcmf_dbg(TRACE, "GO mode configuration complete\n"); } - clear_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state); set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); + brcmf_net_setcarrier(ifp, true); exit: if ((err) && (!mbss)) { @@ -4264,8 +4315,8 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) } brcmf_set_mpc(ifp, 1); brcmf_configure_arp_offload(ifp, true); - set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state); clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); + brcmf_net_setcarrier(ifp, false); return err; } @@ -4597,6 +4648,7 @@ static struct cfg80211_ops wl_cfg80211_ops = { .join_ibss = brcmf_cfg80211_join_ibss, .leave_ibss = brcmf_cfg80211_leave_ibss, .get_station = brcmf_cfg80211_get_station, + .dump_station = brcmf_cfg80211_dump_station, .set_tx_power = brcmf_cfg80211_set_tx_power, .get_tx_power = brcmf_cfg80211_get_tx_power, .add_key = brcmf_cfg80211_add_key, @@ -4974,6 +5026,7 @@ brcmf_notify_connect_status(struct brcmf_if *ifp, &ifp->vif->sme_state); } else brcmf_bss_connect_done(cfg, ndev, e, true); + brcmf_net_setcarrier(ifp, true); } else if (brcmf_is_linkdown(e)) { brcmf_dbg(CONN, "Linkdown\n"); if (!brcmf_is_ibssmode(ifp->vif)) { @@ -4983,6 +5036,7 @@ brcmf_notify_connect_status(struct brcmf_if *ifp, brcmf_init_prof(ndev_to_prof(ndev)); if (ndev != cfg_to_ndev(cfg)) complete(&cfg->vif_disabled); + brcmf_net_setcarrier(ifp, false); } else if (brcmf_is_nonetwork(cfg, e)) { if (brcmf_is_ibssmode(ifp->vif)) clear_bit(BRCMF_VIF_STATUS_CONNECTING, @@ -6238,6 +6292,17 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, else *cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; } + /* p2p might require that "if-events" get processed by fweh. So + * activate the already registered event handlers now and activate + * the rest when initialization has completed. drvr->config needs to + * be assigned before activating events. + */ + drvr->config = cfg; + err = brcmf_fweh_activate_events(ifp); + if (err) { + brcmf_err("FWEH activation failed (%d)\n", err); + goto wiphy_unreg_out; + } err = brcmf_p2p_attach(cfg, p2pdev_forced); if (err) { @@ -6260,6 +6325,13 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, brcmf_notify_tdls_peer_event); } + /* (re-) activate FWEH event handling */ + err = brcmf_fweh_activate_events(ifp); + if (err) { + brcmf_err("FWEH activation failed (%d)\n", err); + goto wiphy_unreg_out; + } + return cfg; wiphy_unreg_out: diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h index 3f5e5505d329..6a878c8f883f 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h @@ -143,7 +143,6 @@ struct brcmf_cfg80211_profile { * @BRCMF_VIF_STATUS_CONNECTING: connect/join in progress. * @BRCMF_VIF_STATUS_CONNECTED: connected/joined succesfully. * @BRCMF_VIF_STATUS_DISCONNECTING: disconnect/disable in progress. - * @BRCMF_VIF_STATUS_AP_CREATING: interface configured for AP operation. * @BRCMF_VIF_STATUS_AP_CREATED: AP operation started. */ enum brcmf_vif_status { @@ -151,7 +150,6 @@ enum brcmf_vif_status { BRCMF_VIF_STATUS_CONNECTING, BRCMF_VIF_STATUS_CONNECTED, BRCMF_VIF_STATUS_DISCONNECTING, - BRCMF_VIF_STATUS_AP_CREATING, BRCMF_VIF_STATUS_AP_CREATED }; @@ -407,6 +405,7 @@ struct brcmf_cfg80211_info { struct brcmu_d11inf d11inf; bool wowl_enabled; u32 pre_wowl_pmmode; + struct brcmf_assoclist_le assoclist; }; /** diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c index ffc3ace24903..f04833db2fd0 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/chip.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c @@ -682,6 +682,7 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci) case BRCM_CC_43570_CHIP_ID: case BRCM_CC_4358_CHIP_ID: case BRCM_CC_43602_CHIP_ID: + case BRCM_CC_4371_CHIP_ID: return 0x180000; case BRCM_CC_4365_CHIP_ID: case BRCM_CC_4366_CHIP_ID: diff --git a/drivers/net/wireless/brcm80211/brcmfmac/common.h b/drivers/net/wireless/brcm80211/brcmfmac/common.h index 0d39d80cee28..21c7488b4732 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/common.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/common.h @@ -17,4 +17,7 @@ extern const u8 ALLFFMAC[ETH_ALEN]; +/* Sets dongle media info (drv_version, mac address). */ +int brcmf_c_preinit_dcmds(struct brcmf_if *ifp); + #endif /* BRCMFMAC_COMMON_H */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.c b/drivers/net/wireless/brcm80211/brcmfmac/core.c index 8c2a280f0c98..b5ab98ee1445 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/core.c @@ -33,6 +33,7 @@ #include "feature.h" #include "proto.h" #include "pcie.h" +#include "common.h" MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver."); @@ -634,8 +635,7 @@ static int brcmf_netdev_stop(struct net_device *ndev) brcmf_cfg80211_down(ndev); - /* Set state and stop OS transmissions */ - netif_stop_queue(ndev); + brcmf_net_setcarrier(ifp, false); return 0; } @@ -669,8 +669,8 @@ static int brcmf_netdev_open(struct net_device *ndev) return -EIO; } - /* Allow transmit calls */ - netif_start_queue(ndev); + /* Clear, carrier, set when connected or AP mode. */ + netif_carrier_off(ndev); return 0; } @@ -735,6 +735,24 @@ static void brcmf_net_detach(struct net_device *ndev) brcmf_cfg80211_free_netdev(ndev); } +void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on) +{ + struct net_device *ndev; + + brcmf_dbg(TRACE, "Enter, idx=%d carrier=%d\n", ifp->bssidx, on); + + ndev = ifp->ndev; + brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_DISCONNECTED, !on); + if (on) { + if (!netif_carrier_ok(ndev)) + netif_carrier_on(ndev); + + } else { + if (netif_carrier_ok(ndev)) + netif_carrier_off(ndev); + } +} + static int brcmf_net_p2p_open(struct net_device *ndev) { brcmf_dbg(TRACE, "Enter\n"); @@ -828,8 +846,8 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx, } else { brcmf_dbg(INFO, "allocate netdev interface\n"); /* Allocate netdev, including space for private structure */ - ndev = alloc_netdev(sizeof(*ifp), name, NET_NAME_UNKNOWN, - ether_setup); + ndev = alloc_netdev(sizeof(*ifp), is_p2pdev ? "p2p%d" : name, + NET_NAME_UNKNOWN, ether_setup); if (!ndev) return ERR_PTR(-ENOMEM); @@ -957,8 +975,8 @@ int brcmf_attach(struct device *dev) drvr->bus_if = dev_get_drvdata(dev); drvr->bus_if->drvr = drvr; - /* create device debugfs folder */ - brcmf_debugfs_attach(drvr); + /* attach debug facilities */ + brcmf_debug_attach(drvr); /* Attach and link in the protocol */ ret = brcmf_proto_attach(drvr); @@ -1021,12 +1039,7 @@ int brcmf_bus_start(struct device *dev) if (IS_ERR(ifp)) return PTR_ERR(ifp); - if (brcmf_p2p_enable) - p2p_ifp = brcmf_add_if(drvr, 1, 0, false, "p2p%d", NULL); - else - p2p_ifp = NULL; - if (IS_ERR(p2p_ifp)) - p2p_ifp = NULL; + p2p_ifp = NULL; /* signal bus ready */ brcmf_bus_change_state(bus_if, BRCMF_BUS_UP); @@ -1060,11 +1073,13 @@ int brcmf_bus_start(struct device *dev) goto fail; } - ret = brcmf_fweh_activate_events(ifp); - if (ret < 0) - goto fail; - ret = brcmf_net_attach(ifp, false); + + if ((!ret) && (brcmf_p2p_enable)) { + p2p_ifp = drvr->iflist[1]; + if (p2p_ifp) + ret = brcmf_net_p2p_attach(p2p_ifp); + } fail: if (ret < 0) { brcmf_err("failed: %d\n", ret); @@ -1076,20 +1091,12 @@ fail: brcmf_fws_del_interface(ifp); brcmf_fws_deinit(drvr); } - if (drvr->iflist[0]) { + if (ifp) brcmf_net_detach(ifp->ndev); - drvr->iflist[0] = NULL; - } - if (p2p_ifp) { + if (p2p_ifp) brcmf_net_detach(p2p_ifp->ndev); - drvr->iflist[1] = NULL; - } return ret; } - if ((brcmf_p2p_enable) && (p2p_ifp)) - if (brcmf_net_p2p_attach(p2p_ifp) < 0) - brcmf_p2p_enable = 0; - return 0; } @@ -1155,7 +1162,7 @@ void brcmf_detach(struct device *dev) brcmf_proto_detach(drvr); - brcmf_debugfs_detach(drvr); + brcmf_debug_detach(drvr); bus_if->drvr = NULL; kfree(drvr); } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.h b/drivers/net/wireless/brcm80211/brcmfmac/core.h index d81ff95acab5..2f9101b2ad34 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/core.h @@ -154,10 +154,13 @@ struct brcmf_fws_mac_descriptor; * netif stopped due to firmware signalling flow control. * @BRCMF_NETIF_STOP_REASON_FLOW: * netif stopped due to flowring full. + * @BRCMF_NETIF_STOP_REASON_DISCONNECTED: + * netif stopped due to not being connected (STA mode). */ enum brcmf_netif_stop_reason { - BRCMF_NETIF_STOP_REASON_FWS_FC = 1, - BRCMF_NETIF_STOP_REASON_FLOW = 2 + BRCMF_NETIF_STOP_REASON_FWS_FC = BIT(0), + BRCMF_NETIF_STOP_REASON_FLOW = BIT(1), + BRCMF_NETIF_STOP_REASON_DISCONNECTED = BIT(2) }; /** @@ -213,8 +216,6 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp, enum brcmf_netif_stop_reason reason, bool state); void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success); void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb); - -/* Sets dongle media info (drv_version, mac address). */ -int brcmf_c_preinit_dcmds(struct brcmf_if *ifp); +void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on); #endif /* BRCMFMAC_CORE_H */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/debug.c b/drivers/net/wireless/brcm80211/brcmfmac/debug.c index 2d6d00553858..1299dccc78b4 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/debug.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/debug.c @@ -16,15 +16,45 @@ #include <linux/debugfs.h> #include <linux/netdevice.h> #include <linux/module.h> +#include <linux/devcoredump.h> #include <brcmu_wifi.h> #include <brcmu_utils.h> #include "core.h" #include "bus.h" +#include "fweh.h" #include "debug.h" static struct dentry *root_folder; +static int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data, + size_t len) +{ + void *dump; + size_t ramsize; + + ramsize = brcmf_bus_get_ramsize(bus); + if (ramsize) { + dump = vzalloc(len + ramsize); + if (!dump) + return -ENOMEM; + memcpy(dump, data, len); + brcmf_bus_get_memdump(bus, dump + len, ramsize); + dev_coredumpv(bus->dev, dump, len + ramsize, GFP_KERNEL); + } + return 0; +} + +static int brcmf_debug_psm_watchdog_notify(struct brcmf_if *ifp, + const struct brcmf_event_msg *evtmsg, + void *data) +{ + brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx); + + return brcmf_debug_create_memdump(ifp->drvr->bus_if, data, + evtmsg->datalen); +} + void brcmf_debugfs_init(void) { root_folder = debugfs_create_dir(KBUILD_MODNAME, NULL); @@ -41,7 +71,7 @@ void brcmf_debugfs_exit(void) root_folder = NULL; } -int brcmf_debugfs_attach(struct brcmf_pub *drvr) +int brcmf_debug_attach(struct brcmf_pub *drvr) { struct device *dev = drvr->bus_if->dev; @@ -49,12 +79,18 @@ int brcmf_debugfs_attach(struct brcmf_pub *drvr) return -ENODEV; drvr->dbgfs_dir = debugfs_create_dir(dev_name(dev), root_folder); + if (IS_ERR(drvr->dbgfs_dir)) + return PTR_ERR(drvr->dbgfs_dir); - return PTR_ERR_OR_ZERO(drvr->dbgfs_dir); + + return brcmf_fweh_register(drvr, BRCMF_E_PSM_WATCHDOG, + brcmf_debug_psm_watchdog_notify); } -void brcmf_debugfs_detach(struct brcmf_pub *drvr) +void brcmf_debug_detach(struct brcmf_pub *drvr) { + brcmf_fweh_unregister(drvr, BRCMF_E_PSM_WATCHDOG); + if (!IS_ERR_OR_NULL(drvr->dbgfs_dir)) debugfs_remove_recursive(drvr->dbgfs_dir); } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/debug.h b/drivers/net/wireless/brcm80211/brcmfmac/debug.h index 48648ca44ba8..d0d9676f7f9d 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/debug.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/debug.h @@ -109,8 +109,8 @@ struct brcmf_pub; #ifdef DEBUG void brcmf_debugfs_init(void); void brcmf_debugfs_exit(void); -int brcmf_debugfs_attach(struct brcmf_pub *drvr); -void brcmf_debugfs_detach(struct brcmf_pub *drvr); +int brcmf_debug_attach(struct brcmf_pub *drvr); +void brcmf_debug_detach(struct brcmf_pub *drvr); struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr); int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn, int (*read_fn)(struct seq_file *seq, void *data)); @@ -121,11 +121,11 @@ static inline void brcmf_debugfs_init(void) static inline void brcmf_debugfs_exit(void) { } -static inline int brcmf_debugfs_attach(struct brcmf_pub *drvr) +static inline int brcmf_debug_attach(struct brcmf_pub *drvr) { return 0; } -static inline void brcmf_debugfs_detach(struct brcmf_pub *drvr) +static inline void brcmf_debug_detach(struct brcmf_pub *drvr) { } static inline diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c index 971920f77b68..4248f3c80e78 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c @@ -29,7 +29,7 @@ #define BRCMF_FW_NVRAM_PCIEDEV_LEN 10 /* pcie/1/4/ + \0 */ char brcmf_firmware_path[BRCMF_FW_PATH_LEN]; -module_param_string(firmware_path, brcmf_firmware_path, +module_param_string(alternative_fw_path, brcmf_firmware_path, BRCMF_FW_PATH_LEN, 0440); enum nvram_parser_state { diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c index 383d6faf426b..3878b6f6cfce 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c @@ -213,7 +213,8 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr, is_p2pdev, emsg->ifname, emsg->addr); if (IS_ERR(ifp)) return; - brcmf_fws_add_interface(ifp); + if (!is_p2pdev) + brcmf_fws_add_interface(ifp); if (!drvr->fweh.evt_handler[BRCMF_E_IF]) if (brcmf_net_attach(ifp, false) < 0) return; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h index 5434dcf64f7d..b20fc0f82a48 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h @@ -72,6 +72,7 @@ #define BRCMF_C_GET_BSS_INFO 136 #define BRCMF_C_GET_BANDLIST 140 #define BRCMF_C_SET_SCB_TIMEOUT 158 +#define BRCMF_C_GET_ASSOCLIST 159 #define BRCMF_C_GET_PHYLIST 180 #define BRCMF_C_SET_SCAN_CHANNEL_TIME 185 #define BRCMF_C_SET_SCAN_UNASSOC_TIME 187 diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h index 297911f38fa0..daa427b46712 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h @@ -119,6 +119,8 @@ #define BRCMF_COUNTRY_BUF_SZ 4 #define BRCMF_ANT_MAX 4 +#define BRCMF_MAX_ASSOCLIST 128 + /* join preference types for join_pref iovar */ enum brcmf_join_pref_types { BRCMF_JOIN_PREF_RSSI = 1, @@ -621,4 +623,15 @@ struct brcmf_rev_info_le { __le32 nvramrev; }; +/** + * struct brcmf_assoclist_le - request assoc list. + * + * @count: indicates number of stations. + * @mac: MAC addresses of stations. + */ +struct brcmf_assoclist_le { + __le32 count; + u8 mac[BRCMF_MAX_ASSOCLIST][ETH_ALEN]; +}; + #endif /* FWIL_TYPES_H_ */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c index 7eff9de6885b..44e618f9d890 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c @@ -873,9 +873,6 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf) commonring = msgbuf->flowrings[flowid]; atomic_dec(&commonring->outstanding_tx); - /* Hante: i believe this was a bug as tx_status->msg.ifidx was used - * in brcmf_txfinalize as index in drvr->iflist. Can you confirm/deny? - */ brcmf_txfinalize(brcmf_get_ifp(msgbuf->drvr, tx_status->msg.ifidx), skb, true); } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c index 37a8c352e077..d224b3dd72ed 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c @@ -2353,83 +2353,30 @@ void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev) * brcmf_p2p_attach() - attach for P2P. * * @cfg: driver private data for cfg80211 interface. + * @p2pdev_forced: create p2p device interface at attach. */ s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced) { - struct brcmf_if *pri_ifp; - struct brcmf_if *p2p_ifp; - struct brcmf_cfg80211_vif *p2p_vif; struct brcmf_p2p_info *p2p; - struct brcmf_pub *drvr; - s32 bssidx; + struct brcmf_if *pri_ifp; s32 err = 0; + void *err_ptr; p2p = &cfg->p2p; p2p->cfg = cfg; - drvr = cfg->pub; - - pri_ifp = brcmf_get_ifp(drvr, 0); + pri_ifp = brcmf_get_ifp(cfg->pub, 0); p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif = pri_ifp->vif; if (p2pdev_forced) { - p2p_ifp = drvr->iflist[1]; + err_ptr = brcmf_p2p_create_p2pdev(p2p, NULL, NULL); + if (IS_ERR(err_ptr)) { + brcmf_err("P2P device creation failed.\n"); + err = PTR_ERR(err_ptr); + } } else { - p2p_ifp = NULL; p2p->p2pdev_dynamically = true; } - if (p2p_ifp) { - p2p_vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_P2P_DEVICE, - false); - if (IS_ERR(p2p_vif)) { - brcmf_err("could not create discovery vif\n"); - err = -ENOMEM; - goto exit; - } - - p2p_vif->ifp = p2p_ifp; - p2p_ifp->vif = p2p_vif; - p2p_vif->wdev.netdev = p2p_ifp->ndev; - p2p_ifp->ndev->ieee80211_ptr = &p2p_vif->wdev; - SET_NETDEV_DEV(p2p_ifp->ndev, wiphy_dev(cfg->wiphy)); - - p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = p2p_vif; - - brcmf_p2p_generate_bss_mac(p2p, NULL); - memcpy(p2p_ifp->mac_addr, p2p->dev_addr, ETH_ALEN); - brcmf_p2p_set_firmware(pri_ifp, p2p->dev_addr); - - brcmf_fweh_p2pdev_setup(pri_ifp, true); - - /* Initialize P2P Discovery in the firmware */ - err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1); - if (err < 0) { - brcmf_err("set p2p_disc error\n"); - brcmf_free_vif(p2p_vif); - goto exit; - } - /* obtain bsscfg index for P2P discovery */ - err = brcmf_fil_iovar_int_get(pri_ifp, "p2p_dev", &bssidx); - if (err < 0) { - brcmf_err("retrieving discover bsscfg index failed\n"); - brcmf_free_vif(p2p_vif); - goto exit; - } - /* Verify that firmware uses same bssidx as driver !! */ - if (p2p_ifp->bssidx != bssidx) { - brcmf_err("Incorrect bssidx=%d, compared to p2p_ifp->bssidx=%d\n", - bssidx, p2p_ifp->bssidx); - brcmf_free_vif(p2p_vif); - goto exit; - } - - init_completion(&p2p->send_af_done); - INIT_WORK(&p2p->afx_hdl.afx_work, brcmf_p2p_afx_handler); - init_completion(&p2p->afx_hdl.act_frm_scan); - init_completion(&p2p->wait_next_af); -exit: - brcmf_fweh_p2pdev_setup(pri_ifp, false); - } return err; } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c index 30baf352e234..83d804221715 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c @@ -59,6 +59,8 @@ enum brcmf_pcie_state { #define BRCMF_PCIE_4365_NVRAM_NAME "brcm/brcmfmac4365b-pcie.txt" #define BRCMF_PCIE_4366_FW_NAME "brcm/brcmfmac4366b-pcie.bin" #define BRCMF_PCIE_4366_NVRAM_NAME "brcm/brcmfmac4366b-pcie.txt" +#define BRCMF_PCIE_4371_FW_NAME "brcm/brcmfmac4371-pcie.bin" +#define BRCMF_PCIE_4371_NVRAM_NAME "brcm/brcmfmac4371-pcie.txt" #define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */ @@ -212,6 +214,8 @@ MODULE_FIRMWARE(BRCMF_PCIE_4365_FW_NAME); MODULE_FIRMWARE(BRCMF_PCIE_4365_NVRAM_NAME); MODULE_FIRMWARE(BRCMF_PCIE_4366_FW_NAME); MODULE_FIRMWARE(BRCMF_PCIE_4366_NVRAM_NAME); +MODULE_FIRMWARE(BRCMF_PCIE_4371_FW_NAME); +MODULE_FIRMWARE(BRCMF_PCIE_4371_NVRAM_NAME); struct brcmf_pcie_console { @@ -448,6 +452,47 @@ brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset, } +static void +brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset, + void *dstaddr, u32 len) +{ + void __iomem *address = devinfo->tcm + mem_offset; + __le32 *dst32; + __le16 *dst16; + u8 *dst8; + + if (((ulong)address & 4) || ((ulong)dstaddr & 4) || (len & 4)) { + if (((ulong)address & 2) || ((ulong)dstaddr & 2) || (len & 2)) { + dst8 = (u8 *)dstaddr; + while (len) { + *dst8 = ioread8(address); + address++; + dst8++; + len--; + } + } else { + len = len / 2; + dst16 = (__le16 *)dstaddr; + while (len) { + *dst16 = cpu_to_le16(ioread16(address)); + address += 2; + dst16++; + len--; + } + } + } else { + len = len / 4; + dst32 = (__le32 *)dstaddr; + while (len) { + *dst32 = cpu_to_le32(ioread32(address)); + address += 4; + dst32++; + len--; + } + } +} + + #define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \ CHIPCREGOFFS(reg), value) @@ -1352,12 +1397,36 @@ static void brcmf_pcie_wowl_config(struct device *dev, bool enabled) } +static size_t brcmf_pcie_get_ramsize(struct device *dev) +{ + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie; + struct brcmf_pciedev_info *devinfo = buspub->devinfo; + + return devinfo->ci->ramsize - devinfo->ci->srsize; +} + + +static int brcmf_pcie_get_memdump(struct device *dev, void *data, size_t len) +{ + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie; + struct brcmf_pciedev_info *devinfo = buspub->devinfo; + + brcmf_dbg(PCIE, "dump at 0x%08X: len=%zu\n", devinfo->ci->rambase, len); + brcmf_pcie_copy_dev_tomem(devinfo, devinfo->ci->rambase, data, len); + return 0; +} + + static struct brcmf_bus_ops brcmf_pcie_bus_ops = { .txdata = brcmf_pcie_tx, .stop = brcmf_pcie_down, .txctl = brcmf_pcie_tx_ctlpkt, .rxctl = brcmf_pcie_rx_ctlpkt, .wowl_config = brcmf_pcie_wowl_config, + .get_ramsize = brcmf_pcie_get_ramsize, + .get_memdump = brcmf_pcie_get_memdump, }; @@ -1456,6 +1525,10 @@ static int brcmf_pcie_get_fwnames(struct brcmf_pciedev_info *devinfo) fw_name = BRCMF_PCIE_4366_FW_NAME; nvram_name = BRCMF_PCIE_4366_NVRAM_NAME; break; + case BRCM_CC_4371_CHIP_ID: + fw_name = BRCMF_PCIE_4371_FW_NAME; + nvram_name = BRCMF_PCIE_4371_NVRAM_NAME; + break; default: brcmf_err("Unsupported chip 0x%04x\n", devinfo->ci->chip); return -ENODEV; @@ -1995,6 +2068,7 @@ static struct pci_device_id brcmf_pcie_devid_table[] = { BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID), + BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID), { /* end: all zeroes */ } }; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c index 7f574f26cdef..7e74ac3ad815 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c @@ -3539,6 +3539,51 @@ done: return err; } +static size_t brcmf_sdio_bus_get_ramsize(struct device *dev) +{ + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + struct brcmf_sdio *bus = sdiodev->bus; + + return bus->ci->ramsize - bus->ci->srsize; +} + +static int brcmf_sdio_bus_get_memdump(struct device *dev, void *data, + size_t mem_size) +{ + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + struct brcmf_sdio *bus = sdiodev->bus; + int err; + int address; + int offset; + int len; + + brcmf_dbg(INFO, "dump at 0x%08x: size=%zu\n", bus->ci->rambase, + mem_size); + + address = bus->ci->rambase; + offset = err = 0; + sdio_claim_host(sdiodev->func[1]); + while (offset < mem_size) { + len = ((offset + MEMBLOCK) < mem_size) ? MEMBLOCK : + mem_size - offset; + err = brcmf_sdiod_ramrw(sdiodev, false, address, data, len); + if (err) { + brcmf_err("error %d on reading %d membytes at 0x%08x\n", + err, len, address); + goto done; + } + data += len; + offset += len; + address += len; + } + +done: + sdio_release_host(sdiodev->func[1]); + return err; +} + void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus) { if (!bus->dpc_triggered) { @@ -3987,7 +4032,9 @@ static struct brcmf_bus_ops brcmf_sdio_bus_ops = { .txctl = brcmf_sdio_bus_txctl, .rxctl = brcmf_sdio_bus_rxctl, .gettxq = brcmf_sdio_bus_gettxq, - .wowl_config = brcmf_sdio_wowl_config + .wowl_config = brcmf_sdio_wowl_config, + .get_ramsize = brcmf_sdio_bus_get_ramsize, + .get_memdump = brcmf_sdio_bus_get_memdump, }; static void brcmf_sdio_firmware_callback(struct device *dev, diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c index daba86d881bc..689e64d004bc 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c @@ -144,6 +144,7 @@ struct brcmf_usbdev_info { struct usb_device *usbdev; struct device *dev; + struct mutex dev_init_lock; int ctl_in_pipe, ctl_out_pipe; struct urb *ctl_urb; /* URB for control endpoint */ @@ -1204,6 +1205,8 @@ static void brcmf_usb_probe_phase2(struct device *dev, int ret; brcmf_dbg(USB, "Start fw downloading\n"); + + devinfo = bus->bus_priv.usb->devinfo; ret = check_file(fw->data); if (ret < 0) { brcmf_err("invalid firmware\n"); @@ -1211,7 +1214,6 @@ static void brcmf_usb_probe_phase2(struct device *dev, goto error; } - devinfo = bus->bus_priv.usb->devinfo; devinfo->image = fw->data; devinfo->image_len = fw->size; @@ -1224,9 +1226,11 @@ static void brcmf_usb_probe_phase2(struct device *dev, if (ret) goto error; + mutex_unlock(&devinfo->dev_init_lock); return; error: brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret); + mutex_unlock(&devinfo->dev_init_lock); device_release_driver(dev); } @@ -1264,6 +1268,7 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo) if (ret) goto fail; /* we are done */ + mutex_unlock(&devinfo->dev_init_lock); return 0; } bus->chip = bus_pub->devid; @@ -1317,6 +1322,12 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) devinfo->usbdev = usb; devinfo->dev = &usb->dev; + /* Take an init lock, to protect for disconnect while still loading. + * Necessary because of the asynchronous firmware load construction + */ + mutex_init(&devinfo->dev_init_lock); + mutex_lock(&devinfo->dev_init_lock); + usb_set_intfdata(intf, devinfo); /* Check that the device supports only one configuration */ @@ -1391,6 +1402,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) return 0; fail: + mutex_unlock(&devinfo->dev_init_lock); kfree(devinfo); usb_set_intfdata(intf, NULL); return ret; @@ -1403,8 +1415,19 @@ brcmf_usb_disconnect(struct usb_interface *intf) brcmf_dbg(USB, "Enter\n"); devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf); - brcmf_usb_disconnect_cb(devinfo); - kfree(devinfo); + + if (devinfo) { + mutex_lock(&devinfo->dev_init_lock); + /* Make sure that devinfo still exists. Firmware probe routines + * may have released the device and cleared the intfdata. + */ + if (!usb_get_intfdata(intf)) + goto done; + + brcmf_usb_disconnect_cb(devinfo); + kfree(devinfo); + } +done: brcmf_dbg(USB, "Exit\n"); } diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h index d823734a4713..aa06ea231db3 100644 --- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h +++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h @@ -50,6 +50,7 @@ #define BRCM_CC_43602_CHIP_ID 43602 #define BRCM_CC_4365_CHIP_ID 0x4365 #define BRCM_CC_4366_CHIP_ID 0x4366 +#define BRCM_CC_4371_CHIP_ID 0x4371 /* USB Device IDs */ #define BRCM_USB_43143_DEVICE_ID 0xbd1e @@ -75,6 +76,7 @@ #define BRCM_PCIE_4366_DEVICE_ID 0x43c3 #define BRCM_PCIE_4366_2G_DEVICE_ID 0x43c4 #define BRCM_PCIE_4366_5G_DEVICE_ID 0x43c5 +#define BRCM_PCIE_4371_DEVICE_ID 0x440d /* brcmsmac IDs */ diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index 39f3e6f5cbcd..ed0adaf1eec4 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c @@ -10470,7 +10470,6 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev, vers, date); strlcpy(info->bus_info, pci_name(p->pci_dev), sizeof(info->bus_info)); - info->eedump_len = IPW_EEPROM_IMAGE_SIZE; } static u32 ipw_ethtool_get_link(struct net_device *dev) diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c index ab45819c1fbb..e18629a16fb0 100644 --- a/drivers/net/wireless/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/iwlwifi/dvm/lib.c @@ -1020,7 +1020,7 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw, u8 *pn = seq.ccmp.pn; ieee80211_get_key_rx_seq(key, i, &seq); - aes_sc->pn = cpu_to_le64( + aes_sc[i].pn = cpu_to_le64( (u64)pn[5] | ((u64)pn[4] << 8) | ((u64)pn[3] << 16) | diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index d561181f2cff..1a73c7a1da77 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -341,6 +341,6 @@ const struct iwl_cfg iwl7265d_n_cfg = { }; MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); -MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); +MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index 1d54355ad76a..85ae902df7c0 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -274,18 +274,13 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, break; case WLAN_CIPHER_SUITE_CCMP: if (sta) { - u8 *pn = seq.ccmp.pn; + u64 pn64; aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc; aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc; - ieee80211_get_key_tx_seq(key, &seq); - aes_tx_sc->pn = cpu_to_le64((u64)pn[5] | - ((u64)pn[4] << 8) | - ((u64)pn[3] << 16) | - ((u64)pn[2] << 24) | - ((u64)pn[1] << 32) | - ((u64)pn[0] << 40)); + pn64 = atomic64_read(&key->tx_pn); + aes_tx_sc->pn = cpu_to_le64(pn64); } else { aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc; } @@ -298,12 +293,12 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, u8 *pn = seq.ccmp.pn; ieee80211_get_key_rx_seq(key, i, &seq); - aes_sc->pn = cpu_to_le64((u64)pn[5] | - ((u64)pn[4] << 8) | - ((u64)pn[3] << 16) | - ((u64)pn[2] << 24) | - ((u64)pn[1] << 32) | - ((u64)pn[0] << 40)); + aes_sc[i].pn = cpu_to_le64((u64)pn[5] | + ((u64)pn[4] << 8) | + ((u64)pn[3] << 16) | + ((u64)pn[2] << 24) | + ((u64)pn[1] << 32) | + ((u64)pn[0] << 40)); } data->use_rsc_tsc = true; break; @@ -1456,15 +1451,15 @@ static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw, switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: - iwl_mvm_aes_sc_to_seq(&sc->aes.tsc, &seq); iwl_mvm_set_aes_rx_seq(sc->aes.unicast_rsc, key); + atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn)); break; case WLAN_CIPHER_SUITE_TKIP: iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq); iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key); + ieee80211_set_key_tx_seq(key, &seq); break; } - ieee80211_set_key_tx_seq(key, &seq); /* that's it for this key */ return; diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c index 834641e250fb..d906fa13ba97 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c @@ -699,7 +699,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) * abort after reading the nvm in case RF Kill is on, we will complete * the init seq later when RF kill will switch to off */ - if (iwl_mvm_is_radio_killed(mvm)) { + if (iwl_mvm_is_radio_hw_killed(mvm)) { IWL_DEBUG_RF_KILL(mvm, "jump over all phy activities due to RF kill\n"); iwl_remove_notification(&mvm->notif_wait, &calib_wait); @@ -732,7 +732,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait, MVM_UCODE_CALIB_TIMEOUT); - if (ret && iwl_mvm_is_radio_killed(mvm)) { + if (ret && iwl_mvm_is_radio_hw_killed(mvm)) { IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n"); ret = 1; } diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 1d21e380ca11..1fb684693040 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -2435,6 +2435,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); RCU_INIT_POINTER(mvm->csa_vif, NULL); + mvmvif->csa_countdown = false; } if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) { diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 4485bdb56b34..c6327cd1d071 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -870,6 +870,11 @@ static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm) test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); } +static inline bool iwl_mvm_is_radio_hw_killed(struct iwl_mvm *mvm) +{ + return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); +} + /* Must be called with rcu_read_lock() held and it can only be * released when mvmsta is not needed anymore. */ diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index f0728b784edb..13c97f665ba8 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -602,6 +602,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ieee80211_unregister_hw(mvm->hw); iwl_mvm_leds_exit(mvm); out_free: + flush_delayed_work(&mvm->fw_dump_wk); iwl_phy_db_free(mvm->phy_db); kfree(mvm->scan_cmd); if (!cfg->no_power_up_nic_in_init || !mvm->nvm_file_name) diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index b0825c402c73..644b58bc5226 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -414,6 +414,11 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5212, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)}, /* 8000 Series */ {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig index 317d99189556..279167ddd293 100644 --- a/drivers/net/wireless/mwifiex/Kconfig +++ b/drivers/net/wireless/mwifiex/Kconfig @@ -33,12 +33,12 @@ config MWIFIEX_PCIE mwifiex_pcie. config MWIFIEX_USB - tristate "Marvell WiFi-Ex Driver for USB8766/8797/8897/8997" + tristate "Marvell WiFi-Ex Driver for USB8766/8797/8997" depends on MWIFIEX && USB select FW_LOADER ---help--- This adds support for wireless adapters based on Marvell - 8797/8897/8997 chipset with USB interface. + 8797/8997 chipset with USB interface. If you choose to build it as a module, it will be called mwifiex_usb. diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index 30cbafbd17c6..b7ac45f324d7 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -2374,7 +2374,7 @@ mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) * CFG802.11 operation handler for scan request. * * This function issues a scan request to the firmware based upon - * the user specified scan configuration. On successfull completion, + * the user specified scan configuration. On successful completion, * it also informs the results. */ static int diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c index 5583856fc5c4..9824d8dd2b44 100644 --- a/drivers/net/wireless/mwifiex/debugfs.c +++ b/drivers/net/wireless/mwifiex/debugfs.c @@ -856,6 +856,56 @@ mwifiex_hscfg_read(struct file *file, char __user *ubuf, return ret; } +static ssize_t +mwifiex_timeshare_coex_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct mwifiex_private *priv = file->private_data; + char buf[3]; + bool timeshare_coex; + int ret; + unsigned int len; + + if (priv->adapter->fw_api_ver != MWIFIEX_FW_V15) + return -EOPNOTSUPP; + + ret = mwifiex_send_cmd(priv, HostCmd_CMD_ROBUST_COEX, + HostCmd_ACT_GEN_GET, 0, ×hare_coex, true); + if (ret) + return ret; + + len = sprintf(buf, "%d\n", timeshare_coex); + return simple_read_from_buffer(ubuf, count, ppos, buf, len); +} + +static ssize_t +mwifiex_timeshare_coex_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + bool timeshare_coex; + struct mwifiex_private *priv = file->private_data; + char kbuf[16]; + int ret; + + if (priv->adapter->fw_api_ver != MWIFIEX_FW_V15) + return -EOPNOTSUPP; + + memset(kbuf, 0, sizeof(kbuf)); + + if (copy_from_user(&kbuf, ubuf, min_t(size_t, sizeof(kbuf) - 1, count))) + return -EFAULT; + + if (strtobool(kbuf, ×hare_coex)) + return -EINVAL; + + ret = mwifiex_send_cmd(priv, HostCmd_CMD_ROBUST_COEX, + HostCmd_ACT_GEN_SET, 0, ×hare_coex, true); + if (ret) + return ret; + else + return count; +} + #define MWIFIEX_DFS_ADD_FILE(name) do { \ if (!debugfs_create_file(#name, 0644, priv->dfs_dev_dir, \ priv, &mwifiex_dfs_##name##_fops)) \ @@ -892,6 +942,7 @@ MWIFIEX_DFS_FILE_OPS(memrw); MWIFIEX_DFS_FILE_OPS(hscfg); MWIFIEX_DFS_FILE_OPS(histogram); MWIFIEX_DFS_FILE_OPS(debug_mask); +MWIFIEX_DFS_FILE_OPS(timeshare_coex); /* * This function creates the debug FS directory structure and the files. @@ -918,6 +969,7 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv) MWIFIEX_DFS_ADD_FILE(hscfg); MWIFIEX_DFS_ADD_FILE(histogram); MWIFIEX_DFS_ADD_FILE(debug_mask); + MWIFIEX_DFS_ADD_FILE(timeshare_coex); } /* diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index 0e6f029458d5..1e1e81a0a8d4 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h @@ -101,6 +101,9 @@ enum KEY_TYPE_ID { #define FIRMWARE_READY_SDIO 0xfedc #define FIRMWARE_READY_PCIE 0xfedcba00 +#define MWIFIEX_COEX_MODE_TIMESHARE 0x01 +#define MWIFIEX_COEX_MODE_SPATIAL 0x82 + enum mwifiex_usb_ep { MWIFIEX_USB_EP_CMD_EVENT = 1, MWIFIEX_USB_EP_DATA = 2, @@ -163,6 +166,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define TLV_TYPE_CHANRPT_11H_BASIC (PROPRIETARY_TLV_BASE_ID + 91) #define TLV_TYPE_UAP_RETRY_LIMIT (PROPRIETARY_TLV_BASE_ID + 93) #define TLV_TYPE_WAPI_IE (PROPRIETARY_TLV_BASE_ID + 94) +#define TLV_TYPE_ROBUST_COEX (PROPRIETARY_TLV_BASE_ID + 96) #define TLV_TYPE_UAP_MGMT_FRAME (PROPRIETARY_TLV_BASE_ID + 104) #define TLV_TYPE_MGMT_IE (PROPRIETARY_TLV_BASE_ID + 105) #define TLV_TYPE_AUTO_DS_PARAM (PROPRIETARY_TLV_BASE_ID + 113) @@ -354,6 +358,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define HostCmd_CMD_AMSDU_AGGR_CTRL 0x00df #define HostCmd_CMD_TXPWR_CFG 0x00d1 #define HostCmd_CMD_TX_RATE_CFG 0x00d6 +#define HostCmd_CMD_ROBUST_COEX 0x00e0 #define HostCmd_CMD_802_11_PS_MODE_ENH 0x00e4 #define HostCmd_CMD_802_11_HS_CFG_ENH 0x00e5 #define HostCmd_CMD_P2P_MODE_CFG 0x00eb @@ -1877,6 +1882,11 @@ struct mwifiex_ie_types_btcoex_aggr_win_size { u8 reserved; } __packed; +struct mwifiex_ie_types_robust_coex { + struct mwifiex_ie_types_header header; + __le32 mode; +} __packed; + struct host_cmd_ds_version_ext { u8 version_str_sel; char version_str[128]; @@ -2078,6 +2088,11 @@ struct host_cmd_ds_multi_chan_policy { __le16 policy; } __packed; +struct host_cmd_ds_robust_coex { + __le16 action; + __le16 reserved; +} __packed; + struct host_cmd_ds_command { __le16 command; __le16 size; @@ -2147,6 +2162,7 @@ struct host_cmd_ds_command { struct host_cmd_ds_chan_rpt_req chan_rpt_req; struct host_cmd_sdio_sp_rx_aggr_cfg sdio_rx_aggr_cfg; struct host_cmd_ds_multi_chan_policy mc_policy; + struct host_cmd_ds_robust_coex coex; } params; } __packed; diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 6e3faa74389c..969ca1e1f3e9 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -1199,6 +1199,7 @@ static const struct net_device_ops mwifiex_netdev_ops = { .ndo_stop = mwifiex_close, .ndo_start_xmit = mwifiex_hard_start_xmit, .ndo_set_mac_address = mwifiex_set_mac_address, + .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = mwifiex_tx_timeout, .ndo_get_stats = mwifiex_get_stats, .ndo_set_rx_mode = mwifiex_set_multicast_list, diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c index 504b321301ec..e486867a4c67 100644 --- a/drivers/net/wireless/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/mwifiex/sta_cmd.c @@ -1531,6 +1531,33 @@ mwifiex_cmd_set_mc_policy(struct mwifiex_private *priv, return 0; } +static int mwifiex_cmd_robust_coex(struct mwifiex_private *priv, + struct host_cmd_ds_command *cmd, + u16 cmd_action, bool *is_timeshare) +{ + struct host_cmd_ds_robust_coex *coex = &cmd->params.coex; + struct mwifiex_ie_types_robust_coex *coex_tlv; + + cmd->command = cpu_to_le16(HostCmd_CMD_ROBUST_COEX); + cmd->size = cpu_to_le16(sizeof(*coex) + sizeof(*coex_tlv) + S_DS_GEN); + + coex->action = cpu_to_le16(cmd_action); + coex_tlv = (struct mwifiex_ie_types_robust_coex *) + ((u8 *)coex + sizeof(*coex)); + coex_tlv->header.type = cpu_to_le16(TLV_TYPE_ROBUST_COEX); + coex_tlv->header.len = cpu_to_le16(sizeof(coex_tlv->mode)); + + if (coex->action == HostCmd_ACT_GEN_GET) + return 0; + + if (*is_timeshare) + coex_tlv->mode = cpu_to_le32(MWIFIEX_COEX_MODE_TIMESHARE); + else + coex_tlv->mode = cpu_to_le32(MWIFIEX_COEX_MODE_SPATIAL); + + return 0; +} + static int mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, @@ -2040,6 +2067,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, ret = mwifiex_cmd_set_mc_policy(priv, cmd_ptr, cmd_action, data_buf); break; + case HostCmd_CMD_ROBUST_COEX: + ret = mwifiex_cmd_robust_coex(priv, cmd_ptr, cmd_action, + data_buf); + break; default: mwifiex_dbg(priv->adapter, ERROR, "PREP_CMD: unknown cmd- %#x\n", cmd_no); diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c index d0961635c7b3..9ac7aa2431b4 100644 --- a/drivers/net/wireless/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c @@ -1007,6 +1007,28 @@ static int mwifiex_ret_sdio_rx_aggr_cfg(struct mwifiex_private *priv, return 0; } +static int mwifiex_ret_robust_coex(struct mwifiex_private *priv, + struct host_cmd_ds_command *resp, + bool *is_timeshare) +{ + struct host_cmd_ds_robust_coex *coex = &resp->params.coex; + struct mwifiex_ie_types_robust_coex *coex_tlv; + u16 action = le16_to_cpu(coex->action); + u32 mode; + + coex_tlv = (struct mwifiex_ie_types_robust_coex + *)((u8 *)coex + sizeof(struct host_cmd_ds_robust_coex)); + if (action == HostCmd_ACT_GEN_GET) { + mode = le32_to_cpu(coex_tlv->mode); + if (mode == MWIFIEX_COEX_MODE_TIMESHARE) + *is_timeshare = true; + else + *is_timeshare = false; + } + + return 0; +} + /* * This function handles the command responses. * @@ -1213,6 +1235,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, break; case HostCmd_CMD_TDLS_CONFIG: break; + case HostCmd_CMD_ROBUST_COEX: + ret = mwifiex_ret_robust_coex(priv, resp, data_buf); + break; default: mwifiex_dbg(adapter, ERROR, "CMD_RESP: unknown cmd response %#x\n", diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c index 4d5a6e3b6361..759a6ada5b0f 100644 --- a/drivers/net/wireless/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/mwifiex/uap_cmd.c @@ -846,22 +846,6 @@ int mwifiex_config_start_uap(struct mwifiex_private *priv, { enum state_11d_t state_11d; - if (mwifiex_del_mgmt_ies(priv)) - mwifiex_dbg(priv->adapter, ERROR, - "Failed to delete mgmt IEs!\n"); - - if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP, - HostCmd_ACT_GEN_SET, 0, NULL, true)) { - mwifiex_dbg(priv->adapter, ERROR, "Failed to stop the BSS\n"); - return -1; - } - - if (mwifiex_send_cmd(priv, HOST_CMD_APCMD_SYS_RESET, - HostCmd_ACT_GEN_SET, 0, NULL, true)) { - mwifiex_dbg(priv->adapter, ERROR, "Failed to reset BSS\n"); - return -1; - } - if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG, HostCmd_ACT_GEN_SET, UAP_BSS_PARAMS_I, bss_cfg, false)) { diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c index 9f5356ef0531..e43aff932360 100644 --- a/drivers/net/wireless/mwifiex/usb.c +++ b/drivers/net/wireless/mwifiex/usb.c @@ -42,11 +42,6 @@ static struct usb_device_id mwifiex_usb_table[] = { {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8801_PID_2, USB_CLASS_VENDOR_SPEC, USB_SUBCLASS_VENDOR_SPEC, 0xff)}, - /* 8897 */ - {USB_DEVICE(USB8XXX_VID, USB8897_PID_1)}, - {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8897_PID_2, - USB_CLASS_VENDOR_SPEC, - USB_SUBCLASS_VENDOR_SPEC, 0xff)}, /* 8997 */ {USB_DEVICE(USB8XXX_VID, USB8997_PID_1)}, {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8997_PID_2, @@ -403,14 +398,12 @@ static int mwifiex_usb_probe(struct usb_interface *intf, case USB8766_PID_1: case USB8797_PID_1: case USB8801_PID_1: - case USB8897_PID_1: case USB8997_PID_1: card->usb_boot_state = USB8XXX_FW_DNLD; break; case USB8766_PID_2: case USB8797_PID_2: case USB8801_PID_2: - case USB8897_PID_2: case USB8997_PID_2: card->usb_boot_state = USB8XXX_FW_READY; break; @@ -964,12 +957,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) strcpy(adapter->fw_name, USB8997_DEFAULT_FW_NAME); adapter->ext_scan = true; break; - case USB8897_PID_1: - case USB8897_PID_2: - adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K; - strcpy(adapter->fw_name, USB8897_DEFAULT_FW_NAME); - adapter->ext_scan = true; - break; case USB8766_PID_1: case USB8766_PID_2: adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K; @@ -1277,5 +1264,4 @@ MODULE_LICENSE("GPL v2"); MODULE_FIRMWARE(USB8766_DEFAULT_FW_NAME); MODULE_FIRMWARE(USB8797_DEFAULT_FW_NAME); MODULE_FIRMWARE(USB8801_DEFAULT_FW_NAME); -MODULE_FIRMWARE(USB8897_DEFAULT_FW_NAME); MODULE_FIRMWARE(USB8997_DEFAULT_FW_NAME); diff --git a/drivers/net/wireless/mwifiex/usb.h b/drivers/net/wireless/mwifiex/usb.h index bab10ee41923..b4e9246bbcdc 100644 --- a/drivers/net/wireless/mwifiex/usb.h +++ b/drivers/net/wireless/mwifiex/usb.h @@ -28,11 +28,9 @@ #define USB8766_PID_2 0x2042 #define USB8797_PID_1 0x2043 #define USB8797_PID_2 0x2044 -#define USB8897_PID_1 0x2045 -#define USB8897_PID_2 0x2046 #define USB8801_PID_1 0x2049 #define USB8801_PID_2 0x204a -#define USB8997_PID_1 0x204d +#define USB8997_PID_1 0x2052 #define USB8997_PID_2 0x204e @@ -48,7 +46,6 @@ #define USB8766_DEFAULT_FW_NAME "mrvl/usb8766_uapsta.bin" #define USB8797_DEFAULT_FW_NAME "mrvl/usb8797_uapsta.bin" #define USB8801_DEFAULT_FW_NAME "mrvl/usb8801_uapsta.bin" -#define USB8897_DEFAULT_FW_NAME "mrvl/usb8897_uapsta.bin" #define USB8997_DEFAULT_FW_NAME "mrvl/usb8997_uapsta.bin" #define FW_DNLD_TX_BUF_SIZE 620 diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index 57c13ec3d3de..acccd6734e3b 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c @@ -684,7 +684,7 @@ void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv, if (!memcmp(ra_list->ra, mac, ETH_ALEN)) continue; - if (ra_list && ra_list->tx_paused != tx_pause) { + if (ra_list->tx_paused != tx_pause) { pkt_cnt += ra_list->total_pkt_count; ra_list->tx_paused = tx_pause; if (tx_pause) diff --git a/drivers/net/wireless/realtek/Makefile b/drivers/net/wireless/realtek/Makefile new file mode 100644 index 000000000000..9c78deb5eea9 --- /dev/null +++ b/drivers/net/wireless/realtek/Makefile @@ -0,0 +1,9 @@ +# +# Makefile for the Linux Wireless network device drivers for Realtek units +# + +obj-$(CONFIG_RTL8180) += rtl818x/ +obj-$(CONFIG_RTL8187) += rtl818x/ +obj-$(CONFIG_RTLWIFI) += rtlwifi/ +obj-$(CONFIG_RTL8XXXU) += rtl8xxxu/ + diff --git a/drivers/net/wireless/rtl818x/Kconfig b/drivers/net/wireless/realtek/rtl818x/Kconfig index 1ce1d55f0010..1ce1d55f0010 100644 --- a/drivers/net/wireless/rtl818x/Kconfig +++ b/drivers/net/wireless/realtek/rtl818x/Kconfig diff --git a/drivers/net/wireless/rtl818x/Makefile b/drivers/net/wireless/realtek/rtl818x/Makefile index 997569076923..997569076923 100644 --- a/drivers/net/wireless/rtl818x/Makefile +++ b/drivers/net/wireless/realtek/rtl818x/Makefile diff --git a/drivers/net/wireless/rtl818x/rtl8180/Makefile b/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile index 21005bd8b43c..2966681efaef 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/Makefile +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile @@ -2,4 +2,4 @@ rtl818x_pci-objs := dev.o rtl8225.o sa2400.o max2820.o grf5101.o rtl8225se.o obj-$(CONFIG_RTL8180) += rtl818x_pci.o -ccflags-y += -Idrivers/net/wireless/rtl818x +ccflags-y += -Idrivers/net/wireless/realtek/rtl818x diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c index a43a16fde59d..a43a16fde59d 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/dev.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c diff --git a/drivers/net/wireless/rtl818x/rtl8180/grf5101.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/grf5101.c index b1bfee738937..b1bfee738937 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/grf5101.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/grf5101.c diff --git a/drivers/net/wireless/rtl818x/rtl8180/grf5101.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/grf5101.h index 4d80a2785123..4d80a2785123 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/grf5101.h +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/grf5101.h diff --git a/drivers/net/wireless/rtl818x/rtl8180/max2820.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/max2820.c index eebf23976524..eebf23976524 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/max2820.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/max2820.c diff --git a/drivers/net/wireless/rtl818x/rtl8180/max2820.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/max2820.h index 8e982b72b690..8e982b72b690 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/max2820.h +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/max2820.h diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8180.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h index e8243a44d6b6..e8243a44d6b6 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/rtl8180.h +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c index 9bda5bc78eda..9bda5bc78eda 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.h index 310013a2d726..310013a2d726 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.h +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.h diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c index fde89866fa8d..fde89866fa8d 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.h index 229400264088..229400264088 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.h +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.h diff --git a/drivers/net/wireless/rtl818x/rtl8180/sa2400.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.c index 959b049827de..959b049827de 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/sa2400.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.c diff --git a/drivers/net/wireless/rtl818x/rtl8180/sa2400.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.h index fb0093f35148..fb0093f35148 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/sa2400.h +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.h diff --git a/drivers/net/wireless/rtl818x/rtl8187/Makefile b/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile index 7b6299268ecf..ff074912a095 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/Makefile +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile @@ -2,4 +2,4 @@ rtl8187-objs := dev.o rtl8225.o leds.o rfkill.o obj-$(CONFIG_RTL8187) += rtl8187.o -ccflags-y += -Idrivers/net/wireless/rtl818x +ccflags-y += -Idrivers/net/wireless/realtek/rtl818x diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c index b7f72f9c7988..b7f72f9c7988 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/dev.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c diff --git a/drivers/net/wireless/rtl818x/rtl8187/leds.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c index c2d5b495c179..c2d5b495c179 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/leds.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c diff --git a/drivers/net/wireless/rtl818x/rtl8187/leds.h b/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.h index d743c96d4a20..d743c96d4a20 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/leds.h +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.h diff --git a/drivers/net/wireless/rtl818x/rtl8187/rfkill.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.c index 34116719974a..34116719974a 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/rfkill.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.c diff --git a/drivers/net/wireless/rtl818x/rtl8187/rfkill.h b/drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.h index e12575e96d11..e12575e96d11 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/rfkill.h +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.h diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8187.h index a6ad79f61bf9..a6ad79f61bf9 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8187.h diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c index 5ecf18ed67b8..5ecf18ed67b8 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.h b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.h index 141afb09a5b4..141afb09a5b4 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.h +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.h diff --git a/drivers/net/wireless/rtl818x/rtl818x.h b/drivers/net/wireless/realtek/rtl818x/rtl818x.h index 7abef95d278b..7abef95d278b 100644 --- a/drivers/net/wireless/rtl818x/rtl818x.h +++ b/drivers/net/wireless/realtek/rtl818x/rtl818x.h diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Kconfig b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig new file mode 100644 index 000000000000..dd4d626aecbc --- /dev/null +++ b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig @@ -0,0 +1,34 @@ +# +# RTL8XXXU Wireless LAN device configuration +# +config RTL8XXXU + tristate "RTL8723AU/RTL8188[CR]U/RTL819[12]CU (mac80211) support" + depends on MAC80211 && USB + ---help--- + This is an alternative driver for various Realtek RTL8XXX + parts written to utilize the Linux mac80211 stack. + The driver is known to work with a number of RTL8723AU, + RL8188CU, RTL8188RU, RTL8191CU, and RTL8192CU devices + + This driver is under development and has a limited feature + set. In particular it does not yet support 40MHz channels + and power management. However it should have a smaller + memory footprint than the vendor drivers and benetifs + from the in kernel mac80211 stack. + + It can coexist with drivers from drivers/staging/rtl8723au, + drivers/staging/rtl8192u, and drivers/net/wireless/rtlwifi, + but you will need to control which module you wish to load. + + To compile this driver as a module, choose M here: the module will + be called r8xxxu. If unsure, say N. + +config RTL8XXXU_UNTESTED + bool "Include support for untested Realtek 8xxx USB devices (EXPERIMENTAL)" + depends on RTL8XXXU + ---help--- + This option enables detection of Realtek 8723/8188/8191/8192 WiFi + USB devices which have not been tested directly by the driver + author or reported to be working by third parties. + + Please report your results! diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Makefile b/drivers/net/wireless/realtek/rtl8xxxu/Makefile new file mode 100644 index 000000000000..5dea3bb93069 --- /dev/null +++ b/drivers/net/wireless/realtek/rtl8xxxu/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_RTL8XXXU) += rtl8xxxu.o diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c new file mode 100644 index 000000000000..6aed923a709a --- /dev/null +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -0,0 +1,5993 @@ +/* + * RTL8XXXU mac80211 USB driver + * + * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com> + * + * Portions, notably calibration code: + * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. + * + * This driver was written as a replacement for the vendor provided + * rtl8723au driver. As the Realtek 8xxx chips are very similar in + * their programming interface, I have started adding support for + * additional 8xxx chips like the 8192cu, 8188cus, etc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/spinlock.h> +#include <linux/list.h> +#include <linux/usb.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/wireless.h> +#include <linux/firmware.h> +#include <linux/moduleparam.h> +#include <net/mac80211.h> +#include "rtl8xxxu.h" +#include "rtl8xxxu_regs.h" + +#define DRIVER_NAME "rtl8xxxu" + +static int rtl8xxxu_debug; +static bool rtl8xxxu_ht40_2g; + +MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@redhat.com>"); +MODULE_DESCRIPTION("RTL8XXXu USB mac80211 Wireless LAN Driver"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE("rtlwifi/rtl8723aufw_A.bin"); +MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B.bin"); +MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B_NoBT.bin"); +MODULE_FIRMWARE("rtlwifi/rtl8192cufw_A.bin"); +MODULE_FIRMWARE("rtlwifi/rtl8192cufw_B.bin"); +MODULE_FIRMWARE("rtlwifi/rtl8192cufw_TMSC.bin"); + +module_param_named(debug, rtl8xxxu_debug, int, 0600); +MODULE_PARM_DESC(debug, "Set debug mask"); +module_param_named(ht40_2g, rtl8xxxu_ht40_2g, bool, 0600); +MODULE_PARM_DESC(ht40_2g, "Enable HT40 support on the 2.4GHz band"); + +#define USB_VENDOR_ID_REALTEK 0x0bda +/* Minimum IEEE80211_MAX_FRAME_LEN */ +#define RTL_RX_BUFFER_SIZE IEEE80211_MAX_FRAME_LEN +#define RTL8XXXU_RX_URBS 32 +#define RTL8XXXU_RX_URB_PENDING_WATER 8 +#define RTL8XXXU_TX_URBS 64 +#define RTL8XXXU_TX_URB_LOW_WATER 25 +#define RTL8XXXU_TX_URB_HIGH_WATER 32 + +static int rtl8xxxu_submit_rx_urb(struct rtl8xxxu_priv *priv, + struct rtl8xxxu_rx_urb *rx_urb); + +static struct ieee80211_rate rtl8xxxu_rates[] = { + { .bitrate = 10, .hw_value = DESC_RATE_1M, .flags = 0 }, + { .bitrate = 20, .hw_value = DESC_RATE_2M, .flags = 0 }, + { .bitrate = 55, .hw_value = DESC_RATE_5_5M, .flags = 0 }, + { .bitrate = 110, .hw_value = DESC_RATE_11M, .flags = 0 }, + { .bitrate = 60, .hw_value = DESC_RATE_6M, .flags = 0 }, + { .bitrate = 90, .hw_value = DESC_RATE_9M, .flags = 0 }, + { .bitrate = 120, .hw_value = DESC_RATE_12M, .flags = 0 }, + { .bitrate = 180, .hw_value = DESC_RATE_18M, .flags = 0 }, + { .bitrate = 240, .hw_value = DESC_RATE_24M, .flags = 0 }, + { .bitrate = 360, .hw_value = DESC_RATE_36M, .flags = 0 }, + { .bitrate = 480, .hw_value = DESC_RATE_48M, .flags = 0 }, + { .bitrate = 540, .hw_value = DESC_RATE_54M, .flags = 0 }, +}; + +static struct ieee80211_channel rtl8xxxu_channels_2g[] = { + { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412, + .hw_value = 1, .max_power = 30 }, + { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417, + .hw_value = 2, .max_power = 30 }, + { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422, + .hw_value = 3, .max_power = 30 }, + { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427, + .hw_value = 4, .max_power = 30 }, + { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432, + .hw_value = 5, .max_power = 30 }, + { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437, + .hw_value = 6, .max_power = 30 }, + { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442, + .hw_value = 7, .max_power = 30 }, + { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447, + .hw_value = 8, .max_power = 30 }, + { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452, + .hw_value = 9, .max_power = 30 }, + { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457, + .hw_value = 10, .max_power = 30 }, + { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462, + .hw_value = 11, .max_power = 30 }, + { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467, + .hw_value = 12, .max_power = 30 }, + { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472, + .hw_value = 13, .max_power = 30 }, + { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484, + .hw_value = 14, .max_power = 30 } +}; + +static struct ieee80211_supported_band rtl8xxxu_supported_band = { + .channels = rtl8xxxu_channels_2g, + .n_channels = ARRAY_SIZE(rtl8xxxu_channels_2g), + .bitrates = rtl8xxxu_rates, + .n_bitrates = ARRAY_SIZE(rtl8xxxu_rates), +}; + +static struct rtl8xxxu_reg8val rtl8723a_mac_init_table[] = { + {0x420, 0x80}, {0x423, 0x00}, {0x430, 0x00}, {0x431, 0x00}, + {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05}, + {0x436, 0x06}, {0x437, 0x07}, {0x438, 0x00}, {0x439, 0x00}, + {0x43a, 0x00}, {0x43b, 0x01}, {0x43c, 0x04}, {0x43d, 0x05}, + {0x43e, 0x06}, {0x43f, 0x07}, {0x440, 0x5d}, {0x441, 0x01}, + {0x442, 0x00}, {0x444, 0x15}, {0x445, 0xf0}, {0x446, 0x0f}, + {0x447, 0x00}, {0x458, 0x41}, {0x459, 0xa8}, {0x45a, 0x72}, + {0x45b, 0xb9}, {0x460, 0x66}, {0x461, 0x66}, {0x462, 0x08}, + {0x463, 0x03}, {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff}, + {0x4cd, 0xff}, {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2}, + {0x502, 0x2f}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3}, + {0x506, 0x5e}, {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4}, + {0x50a, 0x5e}, {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4}, + {0x50e, 0x00}, {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a}, + {0x515, 0x10}, {0x516, 0x0a}, {0x517, 0x10}, {0x51a, 0x16}, + {0x524, 0x0f}, {0x525, 0x4f}, {0x546, 0x40}, {0x547, 0x00}, + {0x550, 0x10}, {0x551, 0x10}, {0x559, 0x02}, {0x55a, 0x02}, + {0x55d, 0xff}, {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a}, + {0x652, 0x20}, {0x63c, 0x0a}, {0x63d, 0x0a}, {0x63e, 0x0e}, + {0x63f, 0x0e}, {0x66e, 0x05}, {0x700, 0x21}, {0x701, 0x43}, + {0x702, 0x65}, {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43}, + {0x70a, 0x65}, {0x70b, 0x87}, {0xffff, 0xff}, +}; + +static struct rtl8xxxu_reg32val rtl8723a_phy_1t_init_table[] = { + {0x800, 0x80040000}, {0x804, 0x00000003}, + {0x808, 0x0000fc00}, {0x80c, 0x0000000a}, + {0x810, 0x10001331}, {0x814, 0x020c3d10}, + {0x818, 0x02200385}, {0x81c, 0x00000000}, + {0x820, 0x01000100}, {0x824, 0x00390004}, + {0x828, 0x00000000}, {0x82c, 0x00000000}, + {0x830, 0x00000000}, {0x834, 0x00000000}, + {0x838, 0x00000000}, {0x83c, 0x00000000}, + {0x840, 0x00010000}, {0x844, 0x00000000}, + {0x848, 0x00000000}, {0x84c, 0x00000000}, + {0x850, 0x00000000}, {0x854, 0x00000000}, + {0x858, 0x569a569a}, {0x85c, 0x001b25a4}, + {0x860, 0x66f60110}, {0x864, 0x061f0130}, + {0x868, 0x00000000}, {0x86c, 0x32323200}, + {0x870, 0x07000760}, {0x874, 0x22004000}, + {0x878, 0x00000808}, {0x87c, 0x00000000}, + {0x880, 0xc0083070}, {0x884, 0x000004d5}, + {0x888, 0x00000000}, {0x88c, 0xccc000c0}, + {0x890, 0x00000800}, {0x894, 0xfffffffe}, + {0x898, 0x40302010}, {0x89c, 0x00706050}, + {0x900, 0x00000000}, {0x904, 0x00000023}, + {0x908, 0x00000000}, {0x90c, 0x81121111}, + {0xa00, 0x00d047c8}, {0xa04, 0x80ff000c}, + {0xa08, 0x8c838300}, {0xa0c, 0x2e68120f}, + {0xa10, 0x9500bb78}, {0xa14, 0x11144028}, + {0xa18, 0x00881117}, {0xa1c, 0x89140f00}, + {0xa20, 0x1a1b0000}, {0xa24, 0x090e1317}, + {0xa28, 0x00000204}, {0xa2c, 0x00d30000}, + {0xa70, 0x101fbf00}, {0xa74, 0x00000007}, + {0xa78, 0x00000900}, + {0xc00, 0x48071d40}, {0xc04, 0x03a05611}, + {0xc08, 0x000000e4}, {0xc0c, 0x6c6c6c6c}, + {0xc10, 0x08800000}, {0xc14, 0x40000100}, + {0xc18, 0x08800000}, {0xc1c, 0x40000100}, + {0xc20, 0x00000000}, {0xc24, 0x00000000}, + {0xc28, 0x00000000}, {0xc2c, 0x00000000}, + {0xc30, 0x69e9ac44}, {0xc34, 0x469652af}, + {0xc38, 0x49795994}, {0xc3c, 0x0a97971c}, + {0xc40, 0x1f7c403f}, {0xc44, 0x000100b7}, + {0xc48, 0xec020107}, {0xc4c, 0x007f037f}, + {0xc50, 0x69543420}, {0xc54, 0x43bc0094}, + {0xc58, 0x69543420}, {0xc5c, 0x433c0094}, + {0xc60, 0x00000000}, {0xc64, 0x7112848b}, + {0xc68, 0x47c00bff}, {0xc6c, 0x00000036}, + {0xc70, 0x2c7f000d}, {0xc74, 0x018610db}, + {0xc78, 0x0000001f}, {0xc7c, 0x00b91612}, + {0xc80, 0x40000100}, {0xc84, 0x20f60000}, + {0xc88, 0x40000100}, {0xc8c, 0x20200000}, + {0xc90, 0x00121820}, {0xc94, 0x00000000}, + {0xc98, 0x00121820}, {0xc9c, 0x00007f7f}, + {0xca0, 0x00000000}, {0xca4, 0x00000080}, + {0xca8, 0x00000000}, {0xcac, 0x00000000}, + {0xcb0, 0x00000000}, {0xcb4, 0x00000000}, + {0xcb8, 0x00000000}, {0xcbc, 0x28000000}, + {0xcc0, 0x00000000}, {0xcc4, 0x00000000}, + {0xcc8, 0x00000000}, {0xccc, 0x00000000}, + {0xcd0, 0x00000000}, {0xcd4, 0x00000000}, + {0xcd8, 0x64b22427}, {0xcdc, 0x00766932}, + {0xce0, 0x00222222}, {0xce4, 0x00000000}, + {0xce8, 0x37644302}, {0xcec, 0x2f97d40c}, + {0xd00, 0x00080740}, {0xd04, 0x00020401}, + {0xd08, 0x0000907f}, {0xd0c, 0x20010201}, + {0xd10, 0xa0633333}, {0xd14, 0x3333bc43}, + {0xd18, 0x7a8f5b6b}, {0xd2c, 0xcc979975}, + {0xd30, 0x00000000}, {0xd34, 0x80608000}, + {0xd38, 0x00000000}, {0xd3c, 0x00027293}, + {0xd40, 0x00000000}, {0xd44, 0x00000000}, + {0xd48, 0x00000000}, {0xd4c, 0x00000000}, + {0xd50, 0x6437140a}, {0xd54, 0x00000000}, + {0xd58, 0x00000000}, {0xd5c, 0x30032064}, + {0xd60, 0x4653de68}, {0xd64, 0x04518a3c}, + {0xd68, 0x00002101}, {0xd6c, 0x2a201c16}, + {0xd70, 0x1812362e}, {0xd74, 0x322c2220}, + {0xd78, 0x000e3c24}, {0xe00, 0x2a2a2a2a}, + {0xe04, 0x2a2a2a2a}, {0xe08, 0x03902a2a}, + {0xe10, 0x2a2a2a2a}, {0xe14, 0x2a2a2a2a}, + {0xe18, 0x2a2a2a2a}, {0xe1c, 0x2a2a2a2a}, + {0xe28, 0x00000000}, {0xe30, 0x1000dc1f}, + {0xe34, 0x10008c1f}, {0xe38, 0x02140102}, + {0xe3c, 0x681604c2}, {0xe40, 0x01007c00}, + {0xe44, 0x01004800}, {0xe48, 0xfb000000}, + {0xe4c, 0x000028d1}, {0xe50, 0x1000dc1f}, + {0xe54, 0x10008c1f}, {0xe58, 0x02140102}, + {0xe5c, 0x28160d05}, {0xe60, 0x00000008}, + {0xe68, 0x001b25a4}, {0xe6c, 0x631b25a0}, + {0xe70, 0x631b25a0}, {0xe74, 0x081b25a0}, + {0xe78, 0x081b25a0}, {0xe7c, 0x081b25a0}, + {0xe80, 0x081b25a0}, {0xe84, 0x631b25a0}, + {0xe88, 0x081b25a0}, {0xe8c, 0x631b25a0}, + {0xed0, 0x631b25a0}, {0xed4, 0x631b25a0}, + {0xed8, 0x631b25a0}, {0xedc, 0x001b25a0}, + {0xee0, 0x001b25a0}, {0xeec, 0x6b1b25a0}, + {0xf14, 0x00000003}, {0xf4c, 0x00000000}, + {0xf00, 0x00000300}, + {0xffff, 0xffffffff}, +}; + +static struct rtl8xxxu_reg32val rtl8192cu_phy_2t_init_table[] = { + {0x024, 0x0011800f}, {0x028, 0x00ffdb83}, + {0x800, 0x80040002}, {0x804, 0x00000003}, + {0x808, 0x0000fc00}, {0x80c, 0x0000000a}, + {0x810, 0x10000330}, {0x814, 0x020c3d10}, + {0x818, 0x02200385}, {0x81c, 0x00000000}, + {0x820, 0x01000100}, {0x824, 0x00390004}, + {0x828, 0x01000100}, {0x82c, 0x00390004}, + {0x830, 0x27272727}, {0x834, 0x27272727}, + {0x838, 0x27272727}, {0x83c, 0x27272727}, + {0x840, 0x00010000}, {0x844, 0x00010000}, + {0x848, 0x27272727}, {0x84c, 0x27272727}, + {0x850, 0x00000000}, {0x854, 0x00000000}, + {0x858, 0x569a569a}, {0x85c, 0x0c1b25a4}, + {0x860, 0x66e60230}, {0x864, 0x061f0130}, + {0x868, 0x27272727}, {0x86c, 0x2b2b2b27}, + {0x870, 0x07000700}, {0x874, 0x22184000}, + {0x878, 0x08080808}, {0x87c, 0x00000000}, + {0x880, 0xc0083070}, {0x884, 0x000004d5}, + {0x888, 0x00000000}, {0x88c, 0xcc0000c0}, + {0x890, 0x00000800}, {0x894, 0xfffffffe}, + {0x898, 0x40302010}, {0x89c, 0x00706050}, + {0x900, 0x00000000}, {0x904, 0x00000023}, + {0x908, 0x00000000}, {0x90c, 0x81121313}, + {0xa00, 0x00d047c8}, {0xa04, 0x80ff000c}, + {0xa08, 0x8c838300}, {0xa0c, 0x2e68120f}, + {0xa10, 0x9500bb78}, {0xa14, 0x11144028}, + {0xa18, 0x00881117}, {0xa1c, 0x89140f00}, + {0xa20, 0x1a1b0000}, {0xa24, 0x090e1317}, + {0xa28, 0x00000204}, {0xa2c, 0x00d30000}, + {0xa70, 0x101fbf00}, {0xa74, 0x00000007}, + {0xc00, 0x48071d40}, {0xc04, 0x03a05633}, + {0xc08, 0x000000e4}, {0xc0c, 0x6c6c6c6c}, + {0xc10, 0x08800000}, {0xc14, 0x40000100}, + {0xc18, 0x08800000}, {0xc1c, 0x40000100}, + {0xc20, 0x00000000}, {0xc24, 0x00000000}, + {0xc28, 0x00000000}, {0xc2c, 0x00000000}, + {0xc30, 0x69e9ac44}, {0xc34, 0x469652cf}, + {0xc38, 0x49795994}, {0xc3c, 0x0a97971c}, + {0xc40, 0x1f7c403f}, {0xc44, 0x000100b7}, + {0xc48, 0xec020107}, {0xc4c, 0x007f037f}, + {0xc50, 0x69543420}, {0xc54, 0x43bc0094}, + {0xc58, 0x69543420}, {0xc5c, 0x433c0094}, + {0xc60, 0x00000000}, {0xc64, 0x5116848b}, + {0xc68, 0x47c00bff}, {0xc6c, 0x00000036}, + {0xc70, 0x2c7f000d}, {0xc74, 0x2186115b}, + {0xc78, 0x0000001f}, {0xc7c, 0x00b99612}, + {0xc80, 0x40000100}, {0xc84, 0x20f60000}, + {0xc88, 0x40000100}, {0xc8c, 0xa0e40000}, + {0xc90, 0x00121820}, {0xc94, 0x00000000}, + {0xc98, 0x00121820}, {0xc9c, 0x00007f7f}, + {0xca0, 0x00000000}, {0xca4, 0x00000080}, + {0xca8, 0x00000000}, {0xcac, 0x00000000}, + {0xcb0, 0x00000000}, {0xcb4, 0x00000000}, + {0xcb8, 0x00000000}, {0xcbc, 0x28000000}, + {0xcc0, 0x00000000}, {0xcc4, 0x00000000}, + {0xcc8, 0x00000000}, {0xccc, 0x00000000}, + {0xcd0, 0x00000000}, {0xcd4, 0x00000000}, + {0xcd8, 0x64b22427}, {0xcdc, 0x00766932}, + {0xce0, 0x00222222}, {0xce4, 0x00000000}, + {0xce8, 0x37644302}, {0xcec, 0x2f97d40c}, + {0xd00, 0x00080740}, {0xd04, 0x00020403}, + {0xd08, 0x0000907f}, {0xd0c, 0x20010201}, + {0xd10, 0xa0633333}, {0xd14, 0x3333bc43}, + {0xd18, 0x7a8f5b6b}, {0xd2c, 0xcc979975}, + {0xd30, 0x00000000}, {0xd34, 0x80608000}, + {0xd38, 0x00000000}, {0xd3c, 0x00027293}, + {0xd40, 0x00000000}, {0xd44, 0x00000000}, + {0xd48, 0x00000000}, {0xd4c, 0x00000000}, + {0xd50, 0x6437140a}, {0xd54, 0x00000000}, + {0xd58, 0x00000000}, {0xd5c, 0x30032064}, + {0xd60, 0x4653de68}, {0xd64, 0x04518a3c}, + {0xd68, 0x00002101}, {0xd6c, 0x2a201c16}, + {0xd70, 0x1812362e}, {0xd74, 0x322c2220}, + {0xd78, 0x000e3c24}, {0xe00, 0x2a2a2a2a}, + {0xe04, 0x2a2a2a2a}, {0xe08, 0x03902a2a}, + {0xe10, 0x2a2a2a2a}, {0xe14, 0x2a2a2a2a}, + {0xe18, 0x2a2a2a2a}, {0xe1c, 0x2a2a2a2a}, + {0xe28, 0x00000000}, {0xe30, 0x1000dc1f}, + {0xe34, 0x10008c1f}, {0xe38, 0x02140102}, + {0xe3c, 0x681604c2}, {0xe40, 0x01007c00}, + {0xe44, 0x01004800}, {0xe48, 0xfb000000}, + {0xe4c, 0x000028d1}, {0xe50, 0x1000dc1f}, + {0xe54, 0x10008c1f}, {0xe58, 0x02140102}, + {0xe5c, 0x28160d05}, {0xe60, 0x00000010}, + {0xe68, 0x001b25a4}, {0xe6c, 0x63db25a4}, + {0xe70, 0x63db25a4}, {0xe74, 0x0c1b25a4}, + {0xe78, 0x0c1b25a4}, {0xe7c, 0x0c1b25a4}, + {0xe80, 0x0c1b25a4}, {0xe84, 0x63db25a4}, + {0xe88, 0x0c1b25a4}, {0xe8c, 0x63db25a4}, + {0xed0, 0x63db25a4}, {0xed4, 0x63db25a4}, + {0xed8, 0x63db25a4}, {0xedc, 0x001b25a4}, + {0xee0, 0x001b25a4}, {0xeec, 0x6fdb25a4}, + {0xf14, 0x00000003}, {0xf4c, 0x00000000}, + {0xf00, 0x00000300}, + {0xffff, 0xffffffff}, +}; + +static struct rtl8xxxu_reg32val rtl8188ru_phy_1t_highpa_table[] = { + {0x024, 0x0011800f}, {0x028, 0x00ffdb83}, + {0x040, 0x000c0004}, {0x800, 0x80040000}, + {0x804, 0x00000001}, {0x808, 0x0000fc00}, + {0x80c, 0x0000000a}, {0x810, 0x10005388}, + {0x814, 0x020c3d10}, {0x818, 0x02200385}, + {0x81c, 0x00000000}, {0x820, 0x01000100}, + {0x824, 0x00390204}, {0x828, 0x00000000}, + {0x82c, 0x00000000}, {0x830, 0x00000000}, + {0x834, 0x00000000}, {0x838, 0x00000000}, + {0x83c, 0x00000000}, {0x840, 0x00010000}, + {0x844, 0x00000000}, {0x848, 0x00000000}, + {0x84c, 0x00000000}, {0x850, 0x00000000}, + {0x854, 0x00000000}, {0x858, 0x569a569a}, + {0x85c, 0x001b25a4}, {0x860, 0x66e60230}, + {0x864, 0x061f0130}, {0x868, 0x00000000}, + {0x86c, 0x20202000}, {0x870, 0x03000300}, + {0x874, 0x22004000}, {0x878, 0x00000808}, + {0x87c, 0x00ffc3f1}, {0x880, 0xc0083070}, + {0x884, 0x000004d5}, {0x888, 0x00000000}, + {0x88c, 0xccc000c0}, {0x890, 0x00000800}, + {0x894, 0xfffffffe}, {0x898, 0x40302010}, + {0x89c, 0x00706050}, {0x900, 0x00000000}, + {0x904, 0x00000023}, {0x908, 0x00000000}, + {0x90c, 0x81121111}, {0xa00, 0x00d047c8}, + {0xa04, 0x80ff000c}, {0xa08, 0x8c838300}, + {0xa0c, 0x2e68120f}, {0xa10, 0x9500bb78}, + {0xa14, 0x11144028}, {0xa18, 0x00881117}, + {0xa1c, 0x89140f00}, {0xa20, 0x15160000}, + {0xa24, 0x070b0f12}, {0xa28, 0x00000104}, + {0xa2c, 0x00d30000}, {0xa70, 0x101fbf00}, + {0xa74, 0x00000007}, {0xc00, 0x48071d40}, + {0xc04, 0x03a05611}, {0xc08, 0x000000e4}, + {0xc0c, 0x6c6c6c6c}, {0xc10, 0x08800000}, + {0xc14, 0x40000100}, {0xc18, 0x08800000}, + {0xc1c, 0x40000100}, {0xc20, 0x00000000}, + {0xc24, 0x00000000}, {0xc28, 0x00000000}, + {0xc2c, 0x00000000}, {0xc30, 0x69e9ac44}, + {0xc34, 0x469652cf}, {0xc38, 0x49795994}, + {0xc3c, 0x0a97971c}, {0xc40, 0x1f7c403f}, + {0xc44, 0x000100b7}, {0xc48, 0xec020107}, + {0xc4c, 0x007f037f}, {0xc50, 0x6954342e}, + {0xc54, 0x43bc0094}, {0xc58, 0x6954342f}, + {0xc5c, 0x433c0094}, {0xc60, 0x00000000}, + {0xc64, 0x5116848b}, {0xc68, 0x47c00bff}, + {0xc6c, 0x00000036}, {0xc70, 0x2c46000d}, + {0xc74, 0x018610db}, {0xc78, 0x0000001f}, + {0xc7c, 0x00b91612}, {0xc80, 0x24000090}, + {0xc84, 0x20f60000}, {0xc88, 0x24000090}, + {0xc8c, 0x20200000}, {0xc90, 0x00121820}, + {0xc94, 0x00000000}, {0xc98, 0x00121820}, + {0xc9c, 0x00007f7f}, {0xca0, 0x00000000}, + {0xca4, 0x00000080}, {0xca8, 0x00000000}, + {0xcac, 0x00000000}, {0xcb0, 0x00000000}, + {0xcb4, 0x00000000}, {0xcb8, 0x00000000}, + {0xcbc, 0x28000000}, {0xcc0, 0x00000000}, + {0xcc4, 0x00000000}, {0xcc8, 0x00000000}, + {0xccc, 0x00000000}, {0xcd0, 0x00000000}, + {0xcd4, 0x00000000}, {0xcd8, 0x64b22427}, + {0xcdc, 0x00766932}, {0xce0, 0x00222222}, + {0xce4, 0x00000000}, {0xce8, 0x37644302}, + {0xcec, 0x2f97d40c}, {0xd00, 0x00080740}, + {0xd04, 0x00020401}, {0xd08, 0x0000907f}, + {0xd0c, 0x20010201}, {0xd10, 0xa0633333}, + {0xd14, 0x3333bc43}, {0xd18, 0x7a8f5b6b}, + {0xd2c, 0xcc979975}, {0xd30, 0x00000000}, + {0xd34, 0x80608000}, {0xd38, 0x00000000}, + {0xd3c, 0x00027293}, {0xd40, 0x00000000}, + {0xd44, 0x00000000}, {0xd48, 0x00000000}, + {0xd4c, 0x00000000}, {0xd50, 0x6437140a}, + {0xd54, 0x00000000}, {0xd58, 0x00000000}, + {0xd5c, 0x30032064}, {0xd60, 0x4653de68}, + {0xd64, 0x04518a3c}, {0xd68, 0x00002101}, + {0xd6c, 0x2a201c16}, {0xd70, 0x1812362e}, + {0xd74, 0x322c2220}, {0xd78, 0x000e3c24}, + {0xe00, 0x24242424}, {0xe04, 0x24242424}, + {0xe08, 0x03902024}, {0xe10, 0x24242424}, + {0xe14, 0x24242424}, {0xe18, 0x24242424}, + {0xe1c, 0x24242424}, {0xe28, 0x00000000}, + {0xe30, 0x1000dc1f}, {0xe34, 0x10008c1f}, + {0xe38, 0x02140102}, {0xe3c, 0x681604c2}, + {0xe40, 0x01007c00}, {0xe44, 0x01004800}, + {0xe48, 0xfb000000}, {0xe4c, 0x000028d1}, + {0xe50, 0x1000dc1f}, {0xe54, 0x10008c1f}, + {0xe58, 0x02140102}, {0xe5c, 0x28160d05}, + {0xe60, 0x00000008}, {0xe68, 0x001b25a4}, + {0xe6c, 0x631b25a0}, {0xe70, 0x631b25a0}, + {0xe74, 0x081b25a0}, {0xe78, 0x081b25a0}, + {0xe7c, 0x081b25a0}, {0xe80, 0x081b25a0}, + {0xe84, 0x631b25a0}, {0xe88, 0x081b25a0}, + {0xe8c, 0x631b25a0}, {0xed0, 0x631b25a0}, + {0xed4, 0x631b25a0}, {0xed8, 0x631b25a0}, + {0xedc, 0x001b25a0}, {0xee0, 0x001b25a0}, + {0xeec, 0x6b1b25a0}, {0xee8, 0x31555448}, + {0xf14, 0x00000003}, {0xf4c, 0x00000000}, + {0xf00, 0x00000300}, + {0xffff, 0xffffffff}, +}; + +static struct rtl8xxxu_reg32val rtl8xxx_agc_standard_table[] = { + {0xc78, 0x7b000001}, {0xc78, 0x7b010001}, + {0xc78, 0x7b020001}, {0xc78, 0x7b030001}, + {0xc78, 0x7b040001}, {0xc78, 0x7b050001}, + {0xc78, 0x7a060001}, {0xc78, 0x79070001}, + {0xc78, 0x78080001}, {0xc78, 0x77090001}, + {0xc78, 0x760a0001}, {0xc78, 0x750b0001}, + {0xc78, 0x740c0001}, {0xc78, 0x730d0001}, + {0xc78, 0x720e0001}, {0xc78, 0x710f0001}, + {0xc78, 0x70100001}, {0xc78, 0x6f110001}, + {0xc78, 0x6e120001}, {0xc78, 0x6d130001}, + {0xc78, 0x6c140001}, {0xc78, 0x6b150001}, + {0xc78, 0x6a160001}, {0xc78, 0x69170001}, + {0xc78, 0x68180001}, {0xc78, 0x67190001}, + {0xc78, 0x661a0001}, {0xc78, 0x651b0001}, + {0xc78, 0x641c0001}, {0xc78, 0x631d0001}, + {0xc78, 0x621e0001}, {0xc78, 0x611f0001}, + {0xc78, 0x60200001}, {0xc78, 0x49210001}, + {0xc78, 0x48220001}, {0xc78, 0x47230001}, + {0xc78, 0x46240001}, {0xc78, 0x45250001}, + {0xc78, 0x44260001}, {0xc78, 0x43270001}, + {0xc78, 0x42280001}, {0xc78, 0x41290001}, + {0xc78, 0x402a0001}, {0xc78, 0x262b0001}, + {0xc78, 0x252c0001}, {0xc78, 0x242d0001}, + {0xc78, 0x232e0001}, {0xc78, 0x222f0001}, + {0xc78, 0x21300001}, {0xc78, 0x20310001}, + {0xc78, 0x06320001}, {0xc78, 0x05330001}, + {0xc78, 0x04340001}, {0xc78, 0x03350001}, + {0xc78, 0x02360001}, {0xc78, 0x01370001}, + {0xc78, 0x00380001}, {0xc78, 0x00390001}, + {0xc78, 0x003a0001}, {0xc78, 0x003b0001}, + {0xc78, 0x003c0001}, {0xc78, 0x003d0001}, + {0xc78, 0x003e0001}, {0xc78, 0x003f0001}, + {0xc78, 0x7b400001}, {0xc78, 0x7b410001}, + {0xc78, 0x7b420001}, {0xc78, 0x7b430001}, + {0xc78, 0x7b440001}, {0xc78, 0x7b450001}, + {0xc78, 0x7a460001}, {0xc78, 0x79470001}, + {0xc78, 0x78480001}, {0xc78, 0x77490001}, + {0xc78, 0x764a0001}, {0xc78, 0x754b0001}, + {0xc78, 0x744c0001}, {0xc78, 0x734d0001}, + {0xc78, 0x724e0001}, {0xc78, 0x714f0001}, + {0xc78, 0x70500001}, {0xc78, 0x6f510001}, + {0xc78, 0x6e520001}, {0xc78, 0x6d530001}, + {0xc78, 0x6c540001}, {0xc78, 0x6b550001}, + {0xc78, 0x6a560001}, {0xc78, 0x69570001}, + {0xc78, 0x68580001}, {0xc78, 0x67590001}, + {0xc78, 0x665a0001}, {0xc78, 0x655b0001}, + {0xc78, 0x645c0001}, {0xc78, 0x635d0001}, + {0xc78, 0x625e0001}, {0xc78, 0x615f0001}, + {0xc78, 0x60600001}, {0xc78, 0x49610001}, + {0xc78, 0x48620001}, {0xc78, 0x47630001}, + {0xc78, 0x46640001}, {0xc78, 0x45650001}, + {0xc78, 0x44660001}, {0xc78, 0x43670001}, + {0xc78, 0x42680001}, {0xc78, 0x41690001}, + {0xc78, 0x406a0001}, {0xc78, 0x266b0001}, + {0xc78, 0x256c0001}, {0xc78, 0x246d0001}, + {0xc78, 0x236e0001}, {0xc78, 0x226f0001}, + {0xc78, 0x21700001}, {0xc78, 0x20710001}, + {0xc78, 0x06720001}, {0xc78, 0x05730001}, + {0xc78, 0x04740001}, {0xc78, 0x03750001}, + {0xc78, 0x02760001}, {0xc78, 0x01770001}, + {0xc78, 0x00780001}, {0xc78, 0x00790001}, + {0xc78, 0x007a0001}, {0xc78, 0x007b0001}, + {0xc78, 0x007c0001}, {0xc78, 0x007d0001}, + {0xc78, 0x007e0001}, {0xc78, 0x007f0001}, + {0xc78, 0x3800001e}, {0xc78, 0x3801001e}, + {0xc78, 0x3802001e}, {0xc78, 0x3803001e}, + {0xc78, 0x3804001e}, {0xc78, 0x3805001e}, + {0xc78, 0x3806001e}, {0xc78, 0x3807001e}, + {0xc78, 0x3808001e}, {0xc78, 0x3c09001e}, + {0xc78, 0x3e0a001e}, {0xc78, 0x400b001e}, + {0xc78, 0x440c001e}, {0xc78, 0x480d001e}, + {0xc78, 0x4c0e001e}, {0xc78, 0x500f001e}, + {0xc78, 0x5210001e}, {0xc78, 0x5611001e}, + {0xc78, 0x5a12001e}, {0xc78, 0x5e13001e}, + {0xc78, 0x6014001e}, {0xc78, 0x6015001e}, + {0xc78, 0x6016001e}, {0xc78, 0x6217001e}, + {0xc78, 0x6218001e}, {0xc78, 0x6219001e}, + {0xc78, 0x621a001e}, {0xc78, 0x621b001e}, + {0xc78, 0x621c001e}, {0xc78, 0x621d001e}, + {0xc78, 0x621e001e}, {0xc78, 0x621f001e}, + {0xffff, 0xffffffff} +}; + +static struct rtl8xxxu_reg32val rtl8xxx_agc_highpa_table[] = { + {0xc78, 0x7b000001}, {0xc78, 0x7b010001}, + {0xc78, 0x7b020001}, {0xc78, 0x7b030001}, + {0xc78, 0x7b040001}, {0xc78, 0x7b050001}, + {0xc78, 0x7b060001}, {0xc78, 0x7b070001}, + {0xc78, 0x7b080001}, {0xc78, 0x7a090001}, + {0xc78, 0x790a0001}, {0xc78, 0x780b0001}, + {0xc78, 0x770c0001}, {0xc78, 0x760d0001}, + {0xc78, 0x750e0001}, {0xc78, 0x740f0001}, + {0xc78, 0x73100001}, {0xc78, 0x72110001}, + {0xc78, 0x71120001}, {0xc78, 0x70130001}, + {0xc78, 0x6f140001}, {0xc78, 0x6e150001}, + {0xc78, 0x6d160001}, {0xc78, 0x6c170001}, + {0xc78, 0x6b180001}, {0xc78, 0x6a190001}, + {0xc78, 0x691a0001}, {0xc78, 0x681b0001}, + {0xc78, 0x671c0001}, {0xc78, 0x661d0001}, + {0xc78, 0x651e0001}, {0xc78, 0x641f0001}, + {0xc78, 0x63200001}, {0xc78, 0x62210001}, + {0xc78, 0x61220001}, {0xc78, 0x60230001}, + {0xc78, 0x46240001}, {0xc78, 0x45250001}, + {0xc78, 0x44260001}, {0xc78, 0x43270001}, + {0xc78, 0x42280001}, {0xc78, 0x41290001}, + {0xc78, 0x402a0001}, {0xc78, 0x262b0001}, + {0xc78, 0x252c0001}, {0xc78, 0x242d0001}, + {0xc78, 0x232e0001}, {0xc78, 0x222f0001}, + {0xc78, 0x21300001}, {0xc78, 0x20310001}, + {0xc78, 0x06320001}, {0xc78, 0x05330001}, + {0xc78, 0x04340001}, {0xc78, 0x03350001}, + {0xc78, 0x02360001}, {0xc78, 0x01370001}, + {0xc78, 0x00380001}, {0xc78, 0x00390001}, + {0xc78, 0x003a0001}, {0xc78, 0x003b0001}, + {0xc78, 0x003c0001}, {0xc78, 0x003d0001}, + {0xc78, 0x003e0001}, {0xc78, 0x003f0001}, + {0xc78, 0x7b400001}, {0xc78, 0x7b410001}, + {0xc78, 0x7b420001}, {0xc78, 0x7b430001}, + {0xc78, 0x7b440001}, {0xc78, 0x7b450001}, + {0xc78, 0x7b460001}, {0xc78, 0x7b470001}, + {0xc78, 0x7b480001}, {0xc78, 0x7a490001}, + {0xc78, 0x794a0001}, {0xc78, 0x784b0001}, + {0xc78, 0x774c0001}, {0xc78, 0x764d0001}, + {0xc78, 0x754e0001}, {0xc78, 0x744f0001}, + {0xc78, 0x73500001}, {0xc78, 0x72510001}, + {0xc78, 0x71520001}, {0xc78, 0x70530001}, + {0xc78, 0x6f540001}, {0xc78, 0x6e550001}, + {0xc78, 0x6d560001}, {0xc78, 0x6c570001}, + {0xc78, 0x6b580001}, {0xc78, 0x6a590001}, + {0xc78, 0x695a0001}, {0xc78, 0x685b0001}, + {0xc78, 0x675c0001}, {0xc78, 0x665d0001}, + {0xc78, 0x655e0001}, {0xc78, 0x645f0001}, + {0xc78, 0x63600001}, {0xc78, 0x62610001}, + {0xc78, 0x61620001}, {0xc78, 0x60630001}, + {0xc78, 0x46640001}, {0xc78, 0x45650001}, + {0xc78, 0x44660001}, {0xc78, 0x43670001}, + {0xc78, 0x42680001}, {0xc78, 0x41690001}, + {0xc78, 0x406a0001}, {0xc78, 0x266b0001}, + {0xc78, 0x256c0001}, {0xc78, 0x246d0001}, + {0xc78, 0x236e0001}, {0xc78, 0x226f0001}, + {0xc78, 0x21700001}, {0xc78, 0x20710001}, + {0xc78, 0x06720001}, {0xc78, 0x05730001}, + {0xc78, 0x04740001}, {0xc78, 0x03750001}, + {0xc78, 0x02760001}, {0xc78, 0x01770001}, + {0xc78, 0x00780001}, {0xc78, 0x00790001}, + {0xc78, 0x007a0001}, {0xc78, 0x007b0001}, + {0xc78, 0x007c0001}, {0xc78, 0x007d0001}, + {0xc78, 0x007e0001}, {0xc78, 0x007f0001}, + {0xc78, 0x3800001e}, {0xc78, 0x3801001e}, + {0xc78, 0x3802001e}, {0xc78, 0x3803001e}, + {0xc78, 0x3804001e}, {0xc78, 0x3805001e}, + {0xc78, 0x3806001e}, {0xc78, 0x3807001e}, + {0xc78, 0x3808001e}, {0xc78, 0x3c09001e}, + {0xc78, 0x3e0a001e}, {0xc78, 0x400b001e}, + {0xc78, 0x440c001e}, {0xc78, 0x480d001e}, + {0xc78, 0x4c0e001e}, {0xc78, 0x500f001e}, + {0xc78, 0x5210001e}, {0xc78, 0x5611001e}, + {0xc78, 0x5a12001e}, {0xc78, 0x5e13001e}, + {0xc78, 0x6014001e}, {0xc78, 0x6015001e}, + {0xc78, 0x6016001e}, {0xc78, 0x6217001e}, + {0xc78, 0x6218001e}, {0xc78, 0x6219001e}, + {0xc78, 0x621a001e}, {0xc78, 0x621b001e}, + {0xc78, 0x621c001e}, {0xc78, 0x621d001e}, + {0xc78, 0x621e001e}, {0xc78, 0x621f001e}, + {0xffff, 0xffffffff} +}; + +static struct rtl8xxxu_rfregval rtl8723au_radioa_1t_init_table[] = { + {0x00, 0x00030159}, {0x01, 0x00031284}, + {0x02, 0x00098000}, {0x03, 0x00039c63}, + {0x04, 0x000210e7}, {0x09, 0x0002044f}, + {0x0a, 0x0001a3f1}, {0x0b, 0x00014787}, + {0x0c, 0x000896fe}, {0x0d, 0x0000e02c}, + {0x0e, 0x00039ce7}, {0x0f, 0x00000451}, + {0x19, 0x00000000}, {0x1a, 0x00030355}, + {0x1b, 0x00060a00}, {0x1c, 0x000fc378}, + {0x1d, 0x000a1250}, {0x1e, 0x0000024f}, + {0x1f, 0x00000000}, {0x20, 0x0000b614}, + {0x21, 0x0006c000}, {0x22, 0x00000000}, + {0x23, 0x00001558}, {0x24, 0x00000060}, + {0x25, 0x00000483}, {0x26, 0x0004f000}, + {0x27, 0x000ec7d9}, {0x28, 0x00057730}, + {0x29, 0x00004783}, {0x2a, 0x00000001}, + {0x2b, 0x00021334}, {0x2a, 0x00000000}, + {0x2b, 0x00000054}, {0x2a, 0x00000001}, + {0x2b, 0x00000808}, {0x2b, 0x00053333}, + {0x2c, 0x0000000c}, {0x2a, 0x00000002}, + {0x2b, 0x00000808}, {0x2b, 0x0005b333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000003}, + {0x2b, 0x00000808}, {0x2b, 0x00063333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000004}, + {0x2b, 0x00000808}, {0x2b, 0x0006b333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000005}, + {0x2b, 0x00000808}, {0x2b, 0x00073333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000006}, + {0x2b, 0x00000709}, {0x2b, 0x0005b333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000007}, + {0x2b, 0x00000709}, {0x2b, 0x00063333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000008}, + {0x2b, 0x0000060a}, {0x2b, 0x0004b333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000009}, + {0x2b, 0x0000060a}, {0x2b, 0x00053333}, + {0x2c, 0x0000000d}, {0x2a, 0x0000000a}, + {0x2b, 0x0000060a}, {0x2b, 0x0005b333}, + {0x2c, 0x0000000d}, {0x2a, 0x0000000b}, + {0x2b, 0x0000060a}, {0x2b, 0x00063333}, + {0x2c, 0x0000000d}, {0x2a, 0x0000000c}, + {0x2b, 0x0000060a}, {0x2b, 0x0006b333}, + {0x2c, 0x0000000d}, {0x2a, 0x0000000d}, + {0x2b, 0x0000060a}, {0x2b, 0x00073333}, + {0x2c, 0x0000000d}, {0x2a, 0x0000000e}, + {0x2b, 0x0000050b}, {0x2b, 0x00066666}, + {0x2c, 0x0000001a}, {0x2a, 0x000e0000}, + {0x10, 0x0004000f}, {0x11, 0x000e31fc}, + {0x10, 0x0006000f}, {0x11, 0x000ff9f8}, + {0x10, 0x0002000f}, {0x11, 0x000203f9}, + {0x10, 0x0003000f}, {0x11, 0x000ff500}, + {0x10, 0x00000000}, {0x11, 0x00000000}, + {0x10, 0x0008000f}, {0x11, 0x0003f100}, + {0x10, 0x0009000f}, {0x11, 0x00023100}, + {0x12, 0x00032000}, {0x12, 0x00071000}, + {0x12, 0x000b0000}, {0x12, 0x000fc000}, + {0x13, 0x000287b3}, {0x13, 0x000244b7}, + {0x13, 0x000204ab}, {0x13, 0x0001c49f}, + {0x13, 0x00018493}, {0x13, 0x0001429b}, + {0x13, 0x00010299}, {0x13, 0x0000c29c}, + {0x13, 0x000081a0}, {0x13, 0x000040ac}, + {0x13, 0x00000020}, {0x14, 0x0001944c}, + {0x14, 0x00059444}, {0x14, 0x0009944c}, + {0x14, 0x000d9444}, {0x15, 0x0000f474}, + {0x15, 0x0004f477}, {0x15, 0x0008f455}, + {0x15, 0x000cf455}, {0x16, 0x00000339}, + {0x16, 0x00040339}, {0x16, 0x00080339}, + {0x16, 0x000c0366}, {0x00, 0x00010159}, + {0x18, 0x0000f401}, {0xfe, 0x00000000}, + {0xfe, 0x00000000}, {0x1f, 0x00000003}, + {0xfe, 0x00000000}, {0xfe, 0x00000000}, + {0x1e, 0x00000247}, {0x1f, 0x00000000}, + {0x00, 0x00030159}, + {0xff, 0xffffffff} +}; + +static struct rtl8xxxu_rfregval rtl8192cu_radioa_2t_init_table[] = { + {0x00, 0x00030159}, {0x01, 0x00031284}, + {0x02, 0x00098000}, {0x03, 0x00018c63}, + {0x04, 0x000210e7}, {0x09, 0x0002044f}, + {0x0a, 0x0001adb1}, {0x0b, 0x00054867}, + {0x0c, 0x0008992e}, {0x0d, 0x0000e52c}, + {0x0e, 0x00039ce7}, {0x0f, 0x00000451}, + {0x19, 0x00000000}, {0x1a, 0x00010255}, + {0x1b, 0x00060a00}, {0x1c, 0x000fc378}, + {0x1d, 0x000a1250}, {0x1e, 0x0004445f}, + {0x1f, 0x00080001}, {0x20, 0x0000b614}, + {0x21, 0x0006c000}, {0x22, 0x00000000}, + {0x23, 0x00001558}, {0x24, 0x00000060}, + {0x25, 0x00000483}, {0x26, 0x0004f000}, + {0x27, 0x000ec7d9}, {0x28, 0x000577c0}, + {0x29, 0x00004783}, {0x2a, 0x00000001}, + {0x2b, 0x00021334}, {0x2a, 0x00000000}, + {0x2b, 0x00000054}, {0x2a, 0x00000001}, + {0x2b, 0x00000808}, {0x2b, 0x00053333}, + {0x2c, 0x0000000c}, {0x2a, 0x00000002}, + {0x2b, 0x00000808}, {0x2b, 0x0005b333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000003}, + {0x2b, 0x00000808}, {0x2b, 0x00063333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000004}, + {0x2b, 0x00000808}, {0x2b, 0x0006b333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000005}, + {0x2b, 0x00000808}, {0x2b, 0x00073333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000006}, + {0x2b, 0x00000709}, {0x2b, 0x0005b333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000007}, + {0x2b, 0x00000709}, {0x2b, 0x00063333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000008}, + {0x2b, 0x0000060a}, {0x2b, 0x0004b333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000009}, + {0x2b, 0x0000060a}, {0x2b, 0x00053333}, + {0x2c, 0x0000000d}, {0x2a, 0x0000000a}, + {0x2b, 0x0000060a}, {0x2b, 0x0005b333}, + {0x2c, 0x0000000d}, {0x2a, 0x0000000b}, + {0x2b, 0x0000060a}, {0x2b, 0x00063333}, + {0x2c, 0x0000000d}, {0x2a, 0x0000000c}, + {0x2b, 0x0000060a}, {0x2b, 0x0006b333}, + {0x2c, 0x0000000d}, {0x2a, 0x0000000d}, + {0x2b, 0x0000060a}, {0x2b, 0x00073333}, + {0x2c, 0x0000000d}, {0x2a, 0x0000000e}, + {0x2b, 0x0000050b}, {0x2b, 0x00066666}, + {0x2c, 0x0000001a}, {0x2a, 0x000e0000}, + {0x10, 0x0004000f}, {0x11, 0x000e31fc}, + {0x10, 0x0006000f}, {0x11, 0x000ff9f8}, + {0x10, 0x0002000f}, {0x11, 0x000203f9}, + {0x10, 0x0003000f}, {0x11, 0x000ff500}, + {0x10, 0x00000000}, {0x11, 0x00000000}, + {0x10, 0x0008000f}, {0x11, 0x0003f100}, + {0x10, 0x0009000f}, {0x11, 0x00023100}, + {0x12, 0x00032000}, {0x12, 0x00071000}, + {0x12, 0x000b0000}, {0x12, 0x000fc000}, + {0x13, 0x000287b3}, {0x13, 0x000244b7}, + {0x13, 0x000204ab}, {0x13, 0x0001c49f}, + {0x13, 0x00018493}, {0x13, 0x0001429b}, + {0x13, 0x00010299}, {0x13, 0x0000c29c}, + {0x13, 0x000081a0}, {0x13, 0x000040ac}, + {0x13, 0x00000020}, {0x14, 0x0001944c}, + {0x14, 0x00059444}, {0x14, 0x0009944c}, + {0x14, 0x000d9444}, {0x15, 0x0000f424}, + {0x15, 0x0004f424}, {0x15, 0x0008f424}, + {0x15, 0x000cf424}, {0x16, 0x000e0330}, + {0x16, 0x000a0330}, {0x16, 0x00060330}, + {0x16, 0x00020330}, {0x00, 0x00010159}, + {0x18, 0x0000f401}, {0xfe, 0x00000000}, + {0xfe, 0x00000000}, {0x1f, 0x00080003}, + {0xfe, 0x00000000}, {0xfe, 0x00000000}, + {0x1e, 0x00044457}, {0x1f, 0x00080000}, + {0x00, 0x00030159}, + {0xff, 0xffffffff} +}; + +static struct rtl8xxxu_rfregval rtl8192cu_radiob_2t_init_table[] = { + {0x00, 0x00030159}, {0x01, 0x00031284}, + {0x02, 0x00098000}, {0x03, 0x00018c63}, + {0x04, 0x000210e7}, {0x09, 0x0002044f}, + {0x0a, 0x0001adb1}, {0x0b, 0x00054867}, + {0x0c, 0x0008992e}, {0x0d, 0x0000e52c}, + {0x0e, 0x00039ce7}, {0x0f, 0x00000451}, + {0x12, 0x00032000}, {0x12, 0x00071000}, + {0x12, 0x000b0000}, {0x12, 0x000fc000}, + {0x13, 0x000287af}, {0x13, 0x000244b7}, + {0x13, 0x000204ab}, {0x13, 0x0001c49f}, + {0x13, 0x00018493}, {0x13, 0x00014297}, + {0x13, 0x00010295}, {0x13, 0x0000c298}, + {0x13, 0x0000819c}, {0x13, 0x000040a8}, + {0x13, 0x0000001c}, {0x14, 0x0001944c}, + {0x14, 0x00059444}, {0x14, 0x0009944c}, + {0x14, 0x000d9444}, {0x15, 0x0000f424}, + {0x15, 0x0004f424}, {0x15, 0x0008f424}, + {0x15, 0x000cf424}, {0x16, 0x000e0330}, + {0x16, 0x000a0330}, {0x16, 0x00060330}, + {0x16, 0x00020330}, + {0xff, 0xffffffff} +}; + +static struct rtl8xxxu_rfregval rtl8192cu_radioa_1t_init_table[] = { + {0x00, 0x00030159}, {0x01, 0x00031284}, + {0x02, 0x00098000}, {0x03, 0x00018c63}, + {0x04, 0x000210e7}, {0x09, 0x0002044f}, + {0x0a, 0x0001adb1}, {0x0b, 0x00054867}, + {0x0c, 0x0008992e}, {0x0d, 0x0000e52c}, + {0x0e, 0x00039ce7}, {0x0f, 0x00000451}, + {0x19, 0x00000000}, {0x1a, 0x00010255}, + {0x1b, 0x00060a00}, {0x1c, 0x000fc378}, + {0x1d, 0x000a1250}, {0x1e, 0x0004445f}, + {0x1f, 0x00080001}, {0x20, 0x0000b614}, + {0x21, 0x0006c000}, {0x22, 0x00000000}, + {0x23, 0x00001558}, {0x24, 0x00000060}, + {0x25, 0x00000483}, {0x26, 0x0004f000}, + {0x27, 0x000ec7d9}, {0x28, 0x000577c0}, + {0x29, 0x00004783}, {0x2a, 0x00000001}, + {0x2b, 0x00021334}, {0x2a, 0x00000000}, + {0x2b, 0x00000054}, {0x2a, 0x00000001}, + {0x2b, 0x00000808}, {0x2b, 0x00053333}, + {0x2c, 0x0000000c}, {0x2a, 0x00000002}, + {0x2b, 0x00000808}, {0x2b, 0x0005b333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000003}, + {0x2b, 0x00000808}, {0x2b, 0x00063333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000004}, + {0x2b, 0x00000808}, {0x2b, 0x0006b333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000005}, + {0x2b, 0x00000808}, {0x2b, 0x00073333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000006}, + {0x2b, 0x00000709}, {0x2b, 0x0005b333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000007}, + {0x2b, 0x00000709}, {0x2b, 0x00063333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000008}, + {0x2b, 0x0000060a}, {0x2b, 0x0004b333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000009}, + {0x2b, 0x0000060a}, {0x2b, 0x00053333}, + {0x2c, 0x0000000d}, {0x2a, 0x0000000a}, + {0x2b, 0x0000060a}, {0x2b, 0x0005b333}, + {0x2c, 0x0000000d}, {0x2a, 0x0000000b}, + {0x2b, 0x0000060a}, {0x2b, 0x00063333}, + {0x2c, 0x0000000d}, {0x2a, 0x0000000c}, + {0x2b, 0x0000060a}, {0x2b, 0x0006b333}, + {0x2c, 0x0000000d}, {0x2a, 0x0000000d}, + {0x2b, 0x0000060a}, {0x2b, 0x00073333}, + {0x2c, 0x0000000d}, {0x2a, 0x0000000e}, + {0x2b, 0x0000050b}, {0x2b, 0x00066666}, + {0x2c, 0x0000001a}, {0x2a, 0x000e0000}, + {0x10, 0x0004000f}, {0x11, 0x000e31fc}, + {0x10, 0x0006000f}, {0x11, 0x000ff9f8}, + {0x10, 0x0002000f}, {0x11, 0x000203f9}, + {0x10, 0x0003000f}, {0x11, 0x000ff500}, + {0x10, 0x00000000}, {0x11, 0x00000000}, + {0x10, 0x0008000f}, {0x11, 0x0003f100}, + {0x10, 0x0009000f}, {0x11, 0x00023100}, + {0x12, 0x00032000}, {0x12, 0x00071000}, + {0x12, 0x000b0000}, {0x12, 0x000fc000}, + {0x13, 0x000287b3}, {0x13, 0x000244b7}, + {0x13, 0x000204ab}, {0x13, 0x0001c49f}, + {0x13, 0x00018493}, {0x13, 0x0001429b}, + {0x13, 0x00010299}, {0x13, 0x0000c29c}, + {0x13, 0x000081a0}, {0x13, 0x000040ac}, + {0x13, 0x00000020}, {0x14, 0x0001944c}, + {0x14, 0x00059444}, {0x14, 0x0009944c}, + {0x14, 0x000d9444}, {0x15, 0x0000f405}, + {0x15, 0x0004f405}, {0x15, 0x0008f405}, + {0x15, 0x000cf405}, {0x16, 0x000e0330}, + {0x16, 0x000a0330}, {0x16, 0x00060330}, + {0x16, 0x00020330}, {0x00, 0x00010159}, + {0x18, 0x0000f401}, {0xfe, 0x00000000}, + {0xfe, 0x00000000}, {0x1f, 0x00080003}, + {0xfe, 0x00000000}, {0xfe, 0x00000000}, + {0x1e, 0x00044457}, {0x1f, 0x00080000}, + {0x00, 0x00030159}, + {0xff, 0xffffffff} +}; + +static struct rtl8xxxu_rfregval rtl8188ru_radioa_1t_highpa_table[] = { + {0x00, 0x00030159}, {0x01, 0x00031284}, + {0x02, 0x00098000}, {0x03, 0x00018c63}, + {0x04, 0x000210e7}, {0x09, 0x0002044f}, + {0x0a, 0x0001adb0}, {0x0b, 0x00054867}, + {0x0c, 0x0008992e}, {0x0d, 0x0000e529}, + {0x0e, 0x00039ce7}, {0x0f, 0x00000451}, + {0x19, 0x00000000}, {0x1a, 0x00000255}, + {0x1b, 0x00060a00}, {0x1c, 0x000fc378}, + {0x1d, 0x000a1250}, {0x1e, 0x0004445f}, + {0x1f, 0x00080001}, {0x20, 0x0000b614}, + {0x21, 0x0006c000}, {0x22, 0x0000083c}, + {0x23, 0x00001558}, {0x24, 0x00000060}, + {0x25, 0x00000483}, {0x26, 0x0004f000}, + {0x27, 0x000ec7d9}, {0x28, 0x000977c0}, + {0x29, 0x00004783}, {0x2a, 0x00000001}, + {0x2b, 0x00021334}, {0x2a, 0x00000000}, + {0x2b, 0x00000054}, {0x2a, 0x00000001}, + {0x2b, 0x00000808}, {0x2b, 0x00053333}, + {0x2c, 0x0000000c}, {0x2a, 0x00000002}, + {0x2b, 0x00000808}, {0x2b, 0x0005b333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000003}, + {0x2b, 0x00000808}, {0x2b, 0x00063333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000004}, + {0x2b, 0x00000808}, {0x2b, 0x0006b333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000005}, + {0x2b, 0x00000808}, {0x2b, 0x00073333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000006}, + {0x2b, 0x00000709}, {0x2b, 0x0005b333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000007}, + {0x2b, 0x00000709}, {0x2b, 0x00063333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000008}, + {0x2b, 0x0000060a}, {0x2b, 0x0004b333}, + {0x2c, 0x0000000d}, {0x2a, 0x00000009}, + {0x2b, 0x0000060a}, {0x2b, 0x00053333}, + {0x2c, 0x0000000d}, {0x2a, 0x0000000a}, + {0x2b, 0x0000060a}, {0x2b, 0x0005b333}, + {0x2c, 0x0000000d}, {0x2a, 0x0000000b}, + {0x2b, 0x0000060a}, {0x2b, 0x00063333}, + {0x2c, 0x0000000d}, {0x2a, 0x0000000c}, + {0x2b, 0x0000060a}, {0x2b, 0x0006b333}, + {0x2c, 0x0000000d}, {0x2a, 0x0000000d}, + {0x2b, 0x0000060a}, {0x2b, 0x00073333}, + {0x2c, 0x0000000d}, {0x2a, 0x0000000e}, + {0x2b, 0x0000050b}, {0x2b, 0x00066666}, + {0x2c, 0x0000001a}, {0x2a, 0x000e0000}, + {0x10, 0x0004000f}, {0x11, 0x000e31fc}, + {0x10, 0x0006000f}, {0x11, 0x000ff9f8}, + {0x10, 0x0002000f}, {0x11, 0x000203f9}, + {0x10, 0x0003000f}, {0x11, 0x000ff500}, + {0x10, 0x00000000}, {0x11, 0x00000000}, + {0x10, 0x0008000f}, {0x11, 0x0003f100}, + {0x10, 0x0009000f}, {0x11, 0x00023100}, + {0x12, 0x000d8000}, {0x12, 0x00090000}, + {0x12, 0x00051000}, {0x12, 0x00012000}, + {0x13, 0x00028fb4}, {0x13, 0x00024fa8}, + {0x13, 0x000207a4}, {0x13, 0x0001c3b0}, + {0x13, 0x000183a4}, {0x13, 0x00014398}, + {0x13, 0x000101a4}, {0x13, 0x0000c198}, + {0x13, 0x000080a4}, {0x13, 0x00004098}, + {0x13, 0x00000000}, {0x14, 0x0001944c}, + {0x14, 0x00059444}, {0x14, 0x0009944c}, + {0x14, 0x000d9444}, {0x15, 0x0000f405}, + {0x15, 0x0004f405}, {0x15, 0x0008f405}, + {0x15, 0x000cf405}, {0x16, 0x000e0330}, + {0x16, 0x000a0330}, {0x16, 0x00060330}, + {0x16, 0x00020330}, {0x00, 0x00010159}, + {0x18, 0x0000f401}, {0xfe, 0x00000000}, + {0xfe, 0x00000000}, {0x1f, 0x00080003}, + {0xfe, 0x00000000}, {0xfe, 0x00000000}, + {0x1e, 0x00044457}, {0x1f, 0x00080000}, + {0x00, 0x00030159}, + {0xff, 0xffffffff} +}; + +static struct rtl8xxxu_rfregs rtl8xxxu_rfregs[] = { + { /* RF_A */ + .hssiparm1 = REG_FPGA0_XA_HSSI_PARM1, + .hssiparm2 = REG_FPGA0_XA_HSSI_PARM2, + .lssiparm = REG_FPGA0_XA_LSSI_PARM, + .hspiread = REG_HSPI_XA_READBACK, + .lssiread = REG_FPGA0_XA_LSSI_READBACK, + .rf_sw_ctrl = REG_FPGA0_XA_RF_SW_CTRL, + }, + { /* RF_B */ + .hssiparm1 = REG_FPGA0_XB_HSSI_PARM1, + .hssiparm2 = REG_FPGA0_XB_HSSI_PARM2, + .lssiparm = REG_FPGA0_XB_LSSI_PARM, + .hspiread = REG_HSPI_XB_READBACK, + .lssiread = REG_FPGA0_XB_LSSI_READBACK, + .rf_sw_ctrl = REG_FPGA0_XB_RF_SW_CTRL, + }, +}; + +static const u32 rtl8723au_iqk_phy_iq_bb_reg[RTL8XXXU_BB_REGS] = { + REG_OFDM0_XA_RX_IQ_IMBALANCE, + REG_OFDM0_XB_RX_IQ_IMBALANCE, + REG_OFDM0_ENERGY_CCA_THRES, + REG_OFDM0_AGCR_SSI_TABLE, + REG_OFDM0_XA_TX_IQ_IMBALANCE, + REG_OFDM0_XB_TX_IQ_IMBALANCE, + REG_OFDM0_XC_TX_AFE, + REG_OFDM0_XD_TX_AFE, + REG_OFDM0_RX_IQ_EXT_ANTA +}; + +static u8 rtl8xxxu_read8(struct rtl8xxxu_priv *priv, u16 addr) +{ + struct usb_device *udev = priv->udev; + int len; + u8 data; + + mutex_lock(&priv->usb_buf_mutex); + len = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), + REALTEK_USB_CMD_REQ, REALTEK_USB_READ, + addr, 0, &priv->usb_buf.val8, sizeof(u8), + RTW_USB_CONTROL_MSG_TIMEOUT); + data = priv->usb_buf.val8; + mutex_unlock(&priv->usb_buf_mutex); + + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_READ) + dev_info(&udev->dev, "%s(%04x) = 0x%02x, len %i\n", + __func__, addr, data, len); + return data; +} + +static u16 rtl8xxxu_read16(struct rtl8xxxu_priv *priv, u16 addr) +{ + struct usb_device *udev = priv->udev; + int len; + u16 data; + + mutex_lock(&priv->usb_buf_mutex); + len = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), + REALTEK_USB_CMD_REQ, REALTEK_USB_READ, + addr, 0, &priv->usb_buf.val16, sizeof(u16), + RTW_USB_CONTROL_MSG_TIMEOUT); + data = le16_to_cpu(priv->usb_buf.val16); + mutex_unlock(&priv->usb_buf_mutex); + + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_READ) + dev_info(&udev->dev, "%s(%04x) = 0x%04x, len %i\n", + __func__, addr, data, len); + return data; +} + +static u32 rtl8xxxu_read32(struct rtl8xxxu_priv *priv, u16 addr) +{ + struct usb_device *udev = priv->udev; + int len; + u32 data; + + mutex_lock(&priv->usb_buf_mutex); + len = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), + REALTEK_USB_CMD_REQ, REALTEK_USB_READ, + addr, 0, &priv->usb_buf.val32, sizeof(u32), + RTW_USB_CONTROL_MSG_TIMEOUT); + data = le32_to_cpu(priv->usb_buf.val32); + mutex_unlock(&priv->usb_buf_mutex); + + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_READ) + dev_info(&udev->dev, "%s(%04x) = 0x%08x, len %i\n", + __func__, addr, data, len); + return data; +} + +static int rtl8xxxu_write8(struct rtl8xxxu_priv *priv, u16 addr, u8 val) +{ + struct usb_device *udev = priv->udev; + int ret; + + mutex_lock(&priv->usb_buf_mutex); + priv->usb_buf.val8 = val; + ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + REALTEK_USB_CMD_REQ, REALTEK_USB_WRITE, + addr, 0, &priv->usb_buf.val8, sizeof(u8), + RTW_USB_CONTROL_MSG_TIMEOUT); + + mutex_unlock(&priv->usb_buf_mutex); + + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_WRITE) + dev_info(&udev->dev, "%s(%04x) = 0x%02x\n", + __func__, addr, val); + return ret; +} + +static int rtl8xxxu_write16(struct rtl8xxxu_priv *priv, u16 addr, u16 val) +{ + struct usb_device *udev = priv->udev; + int ret; + + mutex_lock(&priv->usb_buf_mutex); + priv->usb_buf.val16 = cpu_to_le16(val); + ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + REALTEK_USB_CMD_REQ, REALTEK_USB_WRITE, + addr, 0, &priv->usb_buf.val16, sizeof(u16), + RTW_USB_CONTROL_MSG_TIMEOUT); + mutex_unlock(&priv->usb_buf_mutex); + + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_WRITE) + dev_info(&udev->dev, "%s(%04x) = 0x%04x\n", + __func__, addr, val); + return ret; +} + +static int rtl8xxxu_write32(struct rtl8xxxu_priv *priv, u16 addr, u32 val) +{ + struct usb_device *udev = priv->udev; + int ret; + + mutex_lock(&priv->usb_buf_mutex); + priv->usb_buf.val32 = cpu_to_le32(val); + ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + REALTEK_USB_CMD_REQ, REALTEK_USB_WRITE, + addr, 0, &priv->usb_buf.val32, sizeof(u32), + RTW_USB_CONTROL_MSG_TIMEOUT); + mutex_unlock(&priv->usb_buf_mutex); + + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_WRITE) + dev_info(&udev->dev, "%s(%04x) = 0x%08x\n", + __func__, addr, val); + return ret; +} + +static int +rtl8xxxu_writeN(struct rtl8xxxu_priv *priv, u16 addr, u8 *buf, u16 len) +{ + struct usb_device *udev = priv->udev; + int blocksize = priv->fops->writeN_block_size; + int ret, i, count, remainder; + + count = len / blocksize; + remainder = len % blocksize; + + for (i = 0; i < count; i++) { + ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + REALTEK_USB_CMD_REQ, REALTEK_USB_WRITE, + addr, 0, buf, blocksize, + RTW_USB_CONTROL_MSG_TIMEOUT); + if (ret != blocksize) + goto write_error; + + addr += blocksize; + buf += blocksize; + } + + if (remainder) { + ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + REALTEK_USB_CMD_REQ, REALTEK_USB_WRITE, + addr, 0, buf, remainder, + RTW_USB_CONTROL_MSG_TIMEOUT); + if (ret != remainder) + goto write_error; + } + + return len; + +write_error: + dev_info(&udev->dev, + "%s: Failed to write block at addr: %04x size: %04x\n", + __func__, addr, blocksize); + return -EAGAIN; +} + +static u32 rtl8xxxu_read_rfreg(struct rtl8xxxu_priv *priv, + enum rtl8xxxu_rfpath path, u8 reg) +{ + u32 hssia, val32, retval; + + hssia = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM2); + if (path != RF_A) + val32 = rtl8xxxu_read32(priv, rtl8xxxu_rfregs[path].hssiparm2); + else + val32 = hssia; + + val32 &= ~FPGA0_HSSI_PARM2_ADDR_MASK; + val32 |= (reg << FPGA0_HSSI_PARM2_ADDR_SHIFT); + val32 |= FPGA0_HSSI_PARM2_EDGE_READ; + hssia &= ~FPGA0_HSSI_PARM2_EDGE_READ; + rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM2, hssia); + + udelay(10); + + rtl8xxxu_write32(priv, rtl8xxxu_rfregs[path].hssiparm2, val32); + udelay(100); + + hssia |= FPGA0_HSSI_PARM2_EDGE_READ; + rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM2, hssia); + udelay(10); + + val32 = rtl8xxxu_read32(priv, rtl8xxxu_rfregs[path].hssiparm1); + if (val32 & FPGA0_HSSI_PARM1_PI) + retval = rtl8xxxu_read32(priv, rtl8xxxu_rfregs[path].hspiread); + else + retval = rtl8xxxu_read32(priv, rtl8xxxu_rfregs[path].lssiread); + + retval &= 0xfffff; + + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_RFREG_READ) + dev_info(&priv->udev->dev, "%s(%02x) = 0x%06x\n", + __func__, reg, retval); + return retval; +} + +static int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv, + enum rtl8xxxu_rfpath path, u8 reg, u32 data) +{ + int ret, retval; + u32 dataaddr; + + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_RFREG_WRITE) + dev_info(&priv->udev->dev, "%s(%02x) = 0x%06x\n", + __func__, reg, data); + + data &= FPGA0_LSSI_PARM_DATA_MASK; + dataaddr = (reg << FPGA0_LSSI_PARM_ADDR_SHIFT) | data; + + /* Use XB for path B */ + ret = rtl8xxxu_write32(priv, rtl8xxxu_rfregs[path].lssiparm, dataaddr); + if (ret != sizeof(dataaddr)) + retval = -EIO; + else + retval = 0; + + udelay(1); + + return retval; +} + +static int rtl8723a_h2c_cmd(struct rtl8xxxu_priv *priv, struct h2c_cmd *h2c) +{ + struct device *dev = &priv->udev->dev; + int mbox_nr, retry, retval = 0; + int mbox_reg, mbox_ext_reg; + u8 val8; + + mutex_lock(&priv->h2c_mutex); + + mbox_nr = priv->next_mbox; + mbox_reg = REG_HMBOX_0 + (mbox_nr * 4); + mbox_ext_reg = REG_HMBOX_EXT_0 + (mbox_nr * 2); + + /* + * MBOX ready? + */ + retry = 100; + do { + val8 = rtl8xxxu_read8(priv, REG_HMTFR); + if (!(val8 & BIT(mbox_nr))) + break; + } while (retry--); + + if (!retry) { + dev_dbg(dev, "%s: Mailbox busy\n", __func__); + retval = -EBUSY; + goto error; + } + + /* + * Need to swap as it's being swapped again by rtl8xxxu_write16/32() + */ + if (h2c->cmd.cmd & H2C_EXT) { + rtl8xxxu_write16(priv, mbox_ext_reg, + le16_to_cpu(h2c->raw.ext)); + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C) + dev_info(dev, "H2C_EXT %04x\n", + le16_to_cpu(h2c->raw.ext)); + } + rtl8xxxu_write32(priv, mbox_reg, le32_to_cpu(h2c->raw.data)); + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C) + dev_info(dev, "H2C %08x\n", le32_to_cpu(h2c->raw.data)); + + priv->next_mbox = (mbox_nr + 1) % H2C_MAX_MBOX; + +error: + mutex_unlock(&priv->h2c_mutex); + return retval; +} + +static void rtl8723a_enable_rf(struct rtl8xxxu_priv *priv) +{ + u8 val8; + u32 val32; + + val8 = rtl8xxxu_read8(priv, REG_SPS0_CTRL); + val8 |= BIT(0) | BIT(3); + rtl8xxxu_write8(priv, REG_SPS0_CTRL, val8); + + val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_PARM); + val32 &= ~(BIT(4) | BIT(5)); + val32 |= BIT(3); + if (priv->rf_paths == 2) { + val32 &= ~(BIT(20) | BIT(21)); + val32 |= BIT(19); + } + rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_PARM, val32); + + val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE); + val32 &= ~OFDM_RF_PATH_TX_MASK; + if (priv->tx_paths == 2) + val32 |= OFDM_RF_PATH_TX_A | OFDM_RF_PATH_TX_B; + else if (priv->rtlchip == 0x8192c || priv->rtlchip == 0x8191c) + val32 |= OFDM_RF_PATH_TX_B; + else + val32 |= OFDM_RF_PATH_TX_A; + rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32); + + val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE); + val32 &= ~FPGA_RF_MODE_JAPAN; + rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32); + + if (priv->rf_paths == 2) + rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x63db25a0); + else + rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x631b25a0); + + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0x32d95); + if (priv->rf_paths == 2) + rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC, 0x32d95); + + rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00); +} + +static void rtl8723a_disable_rf(struct rtl8xxxu_priv *priv) +{ + u8 sps0; + u32 val32; + + rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff); + + sps0 = rtl8xxxu_read8(priv, REG_SPS0_CTRL); + + /* RF RX code for preamble power saving */ + val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_PARM); + val32 &= ~(BIT(3) | BIT(4) | BIT(5)); + if (priv->rf_paths == 2) + val32 &= ~(BIT(19) | BIT(20) | BIT(21)); + rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_PARM, val32); + + /* Disable TX for four paths */ + val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE); + val32 &= ~OFDM_RF_PATH_TX_MASK; + rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32); + + /* Enable power saving */ + val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE); + val32 |= FPGA_RF_MODE_JAPAN; + rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32); + + /* AFE control register to power down bits [30:22] */ + if (priv->rf_paths == 2) + rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x00db25a0); + else + rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x001b25a0); + + /* Power down RF module */ + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0); + if (priv->rf_paths == 2) + rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC, 0); + + sps0 &= ~(BIT(0) | BIT(3)); + rtl8xxxu_write8(priv, REG_SPS0_CTRL, sps0); +} + + +static void rtl8723a_stop_tx_beacon(struct rtl8xxxu_priv *priv) +{ + u8 val8; + + val8 = rtl8xxxu_read8(priv, REG_FWHW_TXQ_CTRL + 2); + val8 &= ~BIT(6); + rtl8xxxu_write8(priv, REG_FWHW_TXQ_CTRL + 2, val8); + + rtl8xxxu_write8(priv, REG_TBTT_PROHIBIT + 1, 0x64); + val8 = rtl8xxxu_read8(priv, REG_TBTT_PROHIBIT + 2); + val8 &= ~BIT(0); + rtl8xxxu_write8(priv, REG_TBTT_PROHIBIT + 2, val8); +} + + +/* + * The rtl8723a has 3 channel groups for it's efuse settings. It only + * supports the 2.4GHz band, so channels 1 - 14: + * group 0: channels 1 - 3 + * group 1: channels 4 - 9 + * group 2: channels 10 - 14 + * + * Note: We index from 0 in the code + */ +static int rtl8723a_channel_to_group(int channel) +{ + int group; + + if (channel < 4) + group = 0; + else if (channel < 10) + group = 1; + else + group = 2; + + return group; +} + +static void rtl8723au_config_channel(struct ieee80211_hw *hw) +{ + struct rtl8xxxu_priv *priv = hw->priv; + u32 val32, rsr; + u8 val8, opmode; + bool ht = true; + int sec_ch_above, channel; + int i; + + opmode = rtl8xxxu_read8(priv, REG_BW_OPMODE); + rsr = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET); + channel = hw->conf.chandef.chan->hw_value; + + switch (hw->conf.chandef.width) { + case NL80211_CHAN_WIDTH_20_NOHT: + ht = false; + case NL80211_CHAN_WIDTH_20: + opmode |= BW_OPMODE_20MHZ; + rtl8xxxu_write8(priv, REG_BW_OPMODE, opmode); + + val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE); + val32 &= ~FPGA_RF_MODE; + rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32); + + val32 = rtl8xxxu_read32(priv, REG_FPGA1_RF_MODE); + val32 &= ~FPGA_RF_MODE; + rtl8xxxu_write32(priv, REG_FPGA1_RF_MODE, val32); + + val32 = rtl8xxxu_read32(priv, REG_FPGA0_ANALOG2); + val32 |= FPGA0_ANALOG2_20MHZ; + rtl8xxxu_write32(priv, REG_FPGA0_ANALOG2, val32); + break; + case NL80211_CHAN_WIDTH_40: + if (hw->conf.chandef.center_freq1 > + hw->conf.chandef.chan->center_freq) { + sec_ch_above = 1; + channel += 2; + } else { + sec_ch_above = 0; + channel -= 2; + } + + opmode &= ~BW_OPMODE_20MHZ; + rtl8xxxu_write8(priv, REG_BW_OPMODE, opmode); + rsr &= ~RSR_RSC_BANDWIDTH_40M; + if (sec_ch_above) + rsr |= RSR_RSC_UPPER_SUB_CHANNEL; + else + rsr |= RSR_RSC_LOWER_SUB_CHANNEL; + rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, rsr); + + val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE); + val32 |= FPGA_RF_MODE; + rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32); + + val32 = rtl8xxxu_read32(priv, REG_FPGA1_RF_MODE); + val32 |= FPGA_RF_MODE; + rtl8xxxu_write32(priv, REG_FPGA1_RF_MODE, val32); + + /* + * Set Control channel to upper or lower. These settings + * are required only for 40MHz + */ + val32 = rtl8xxxu_read32(priv, REG_CCK0_SYSTEM); + val32 &= ~CCK0_SIDEBAND; + if (!sec_ch_above) + val32 |= CCK0_SIDEBAND; + rtl8xxxu_write32(priv, REG_CCK0_SYSTEM, val32); + + val32 = rtl8xxxu_read32(priv, REG_OFDM1_LSTF); + val32 &= ~OFDM_LSTF_PRIME_CH_MASK; /* 0xc00 */ + if (sec_ch_above) + val32 |= OFDM_LSTF_PRIME_CH_LOW; + else + val32 |= OFDM_LSTF_PRIME_CH_HIGH; + rtl8xxxu_write32(priv, REG_OFDM1_LSTF, val32); + + val32 = rtl8xxxu_read32(priv, REG_FPGA0_ANALOG2); + val32 &= ~FPGA0_ANALOG2_20MHZ; + rtl8xxxu_write32(priv, REG_FPGA0_ANALOG2, val32); + + val32 = rtl8xxxu_read32(priv, REG_FPGA0_POWER_SAVE); + val32 &= ~(FPGA0_PS_LOWER_CHANNEL | FPGA0_PS_UPPER_CHANNEL); + if (sec_ch_above) + val32 |= FPGA0_PS_UPPER_CHANNEL; + else + val32 |= FPGA0_PS_LOWER_CHANNEL; + rtl8xxxu_write32(priv, REG_FPGA0_POWER_SAVE, val32); + break; + + default: + break; + } + + for (i = RF_A; i < priv->rf_paths; i++) { + val32 = rtl8xxxu_read_rfreg(priv, i, RF6052_REG_MODE_AG); + val32 &= ~MODE_AG_CHANNEL_MASK; + val32 |= channel; + rtl8xxxu_write_rfreg(priv, i, RF6052_REG_MODE_AG, val32); + } + + if (ht) + val8 = 0x0e; + else + val8 = 0x0a; + + rtl8xxxu_write8(priv, REG_SIFS_CCK + 1, val8); + rtl8xxxu_write8(priv, REG_SIFS_OFDM + 1, val8); + + rtl8xxxu_write16(priv, REG_R2T_SIFS, 0x0808); + rtl8xxxu_write16(priv, REG_T2T_SIFS, 0x0a0a); + + for (i = RF_A; i < priv->rf_paths; i++) { + val32 = rtl8xxxu_read_rfreg(priv, i, RF6052_REG_MODE_AG); + if (hw->conf.chandef.width == NL80211_CHAN_WIDTH_40) + val32 &= ~MODE_AG_CHANNEL_20MHZ; + else + val32 |= MODE_AG_CHANNEL_20MHZ; + rtl8xxxu_write_rfreg(priv, i, RF6052_REG_MODE_AG, val32); + } +} + +static void +rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) +{ + u8 cck[RTL8723A_MAX_RF_PATHS], ofdm[RTL8723A_MAX_RF_PATHS]; + u8 ofdmbase[RTL8723A_MAX_RF_PATHS], mcsbase[RTL8723A_MAX_RF_PATHS]; + u32 val32, ofdm_a, ofdm_b, mcs_a, mcs_b; + u8 val8; + int group, i; + + group = rtl8723a_channel_to_group(channel); + + cck[0] = priv->cck_tx_power_index_A[group]; + cck[1] = priv->cck_tx_power_index_B[group]; + + ofdm[0] = priv->ht40_1s_tx_power_index_A[group]; + ofdm[1] = priv->ht40_1s_tx_power_index_B[group]; + + ofdmbase[0] = ofdm[0] + priv->ofdm_tx_power_index_diff[group].a; + ofdmbase[1] = ofdm[1] + priv->ofdm_tx_power_index_diff[group].b; + + mcsbase[0] = ofdm[0]; + mcsbase[1] = ofdm[1]; + if (!ht40) { + mcsbase[0] += priv->ht20_tx_power_index_diff[group].a; + mcsbase[1] += priv->ht20_tx_power_index_diff[group].b; + } + + if (priv->tx_paths > 1) { + if (ofdm[0] > priv->ht40_2s_tx_power_index_diff[group].a) + ofdm[0] -= priv->ht40_2s_tx_power_index_diff[group].a; + if (ofdm[1] > priv->ht40_2s_tx_power_index_diff[group].b) + ofdm[1] -= priv->ht40_2s_tx_power_index_diff[group].b; + } + + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_CHANNEL) + dev_info(&priv->udev->dev, + "%s: Setting TX power CCK A: %02x, " + "CCK B: %02x, OFDM A: %02x, OFDM B: %02x\n", + __func__, cck[0], cck[1], ofdm[0], ofdm[1]); + + for (i = 0; i < RTL8723A_MAX_RF_PATHS; i++) { + if (cck[i] > RF6052_MAX_TX_PWR) + cck[i] = RF6052_MAX_TX_PWR; + if (ofdm[i] > RF6052_MAX_TX_PWR) + ofdm[i] = RF6052_MAX_TX_PWR; + } + + val32 = rtl8xxxu_read32(priv, REG_TX_AGC_A_CCK1_MCS32); + val32 &= 0xffff00ff; + val32 |= (cck[0] << 8); + rtl8xxxu_write32(priv, REG_TX_AGC_A_CCK1_MCS32, val32); + + val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11); + val32 &= 0xff; + val32 |= ((cck[0] << 8) | (cck[0] << 16) | (cck[0] << 24)); + rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32); + + val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11); + val32 &= 0xffffff00; + val32 |= cck[1]; + rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32); + + val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK1_55_MCS32); + val32 &= 0xff; + val32 |= ((cck[1] << 8) | (cck[1] << 16) | (cck[1] << 24)); + rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK1_55_MCS32, val32); + + ofdm_a = ofdmbase[0] | ofdmbase[0] << 8 | + ofdmbase[0] << 16 | ofdmbase[0] << 24; + ofdm_b = ofdmbase[1] | ofdmbase[1] << 8 | + ofdmbase[1] << 16 | ofdmbase[1] << 24; + rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, ofdm_a); + rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE18_06, ofdm_b); + + rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, ofdm_a); + rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE54_24, ofdm_b); + + mcs_a = mcsbase[0] | mcsbase[0] << 8 | + mcsbase[0] << 16 | mcsbase[0] << 24; + mcs_b = mcsbase[1] | mcsbase[1] << 8 | + mcsbase[1] << 16 | mcsbase[1] << 24; + + rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00, mcs_a); + rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS03_MCS00, mcs_b); + + rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs_a); + rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS07_MCS04, mcs_b); + + rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS11_MCS08, mcs_a); + rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS11_MCS08, mcs_b); + + rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12, mcs_a); + for (i = 0; i < 3; i++) { + if (i != 2) + val8 = (mcsbase[0] > 8) ? (mcsbase[0] - 8) : 0; + else + val8 = (mcsbase[0] > 6) ? (mcsbase[0] - 6) : 0; + rtl8xxxu_write8(priv, REG_OFDM0_XC_TX_IQ_IMBALANCE + i, val8); + } + rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12, mcs_b); + for (i = 0; i < 3; i++) { + if (i != 2) + val8 = (mcsbase[1] > 8) ? (mcsbase[1] - 8) : 0; + else + val8 = (mcsbase[1] > 6) ? (mcsbase[1] - 6) : 0; + rtl8xxxu_write8(priv, REG_OFDM0_XD_TX_IQ_IMBALANCE + i, val8); + } +} + +static void rtl8xxxu_set_linktype(struct rtl8xxxu_priv *priv, + enum nl80211_iftype linktype) +{ + u16 val8; + + val8 = rtl8xxxu_read16(priv, REG_MSR); + val8 &= ~MSR_LINKTYPE_MASK; + + switch (linktype) { + case NL80211_IFTYPE_UNSPECIFIED: + val8 |= MSR_LINKTYPE_NONE; + break; + case NL80211_IFTYPE_ADHOC: + val8 |= MSR_LINKTYPE_ADHOC; + break; + case NL80211_IFTYPE_STATION: + val8 |= MSR_LINKTYPE_STATION; + break; + case NL80211_IFTYPE_AP: + val8 |= MSR_LINKTYPE_AP; + break; + default: + goto out; + } + + rtl8xxxu_write8(priv, REG_MSR, val8); +out: + return; +} + +static void +rtl8xxxu_set_retry(struct rtl8xxxu_priv *priv, u16 short_retry, u16 long_retry) +{ + u16 val16; + + val16 = ((short_retry << RETRY_LIMIT_SHORT_SHIFT) & + RETRY_LIMIT_SHORT_MASK) | + ((long_retry << RETRY_LIMIT_LONG_SHIFT) & + RETRY_LIMIT_LONG_MASK); + + rtl8xxxu_write16(priv, REG_RETRY_LIMIT, val16); +} + +static void +rtl8xxxu_set_spec_sifs(struct rtl8xxxu_priv *priv, u16 cck, u16 ofdm) +{ + u16 val16; + + val16 = ((cck << SPEC_SIFS_CCK_SHIFT) & SPEC_SIFS_CCK_MASK) | + ((ofdm << SPEC_SIFS_OFDM_SHIFT) & SPEC_SIFS_OFDM_MASK); + + rtl8xxxu_write16(priv, REG_SPEC_SIFS, val16); +} + +static void rtl8xxxu_print_chipinfo(struct rtl8xxxu_priv *priv) +{ + struct device *dev = &priv->udev->dev; + char *cut; + + switch (priv->chip_cut) { + case 0: + cut = "A"; + break; + case 1: + cut = "B"; + break; + default: + cut = "unknown"; + } + + dev_info(dev, + "RTL%s rev %s (%s) %iT%iR, TX queues %i, WiFi=%i, BT=%i, GPS=%i, HI PA=%i\n", + priv->chip_name, cut, priv->vendor_umc ? "UMC" : "TSMC", + priv->tx_paths, priv->rx_paths, priv->ep_tx_count, + priv->has_wifi, priv->has_bluetooth, priv->has_gps, + priv->hi_pa); + + dev_info(dev, "RTL%s MAC: %pM\n", priv->chip_name, priv->mac_addr); +} + +static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) +{ + struct device *dev = &priv->udev->dev; + u32 val32, bonding; + u16 val16; + + val32 = rtl8xxxu_read32(priv, REG_SYS_CFG); + priv->chip_cut = (val32 & SYS_CFG_CHIP_VERSION_MASK) >> + SYS_CFG_CHIP_VERSION_SHIFT; + if (val32 & SYS_CFG_TRP_VAUX_EN) { + dev_info(dev, "Unsupported test chip\n"); + return -ENOTSUPP; + } + + if (val32 & SYS_CFG_BT_FUNC) { + sprintf(priv->chip_name, "8723AU"); + priv->rf_paths = 1; + priv->rx_paths = 1; + priv->tx_paths = 1; + priv->rtlchip = 0x8723a; + + val32 = rtl8xxxu_read32(priv, REG_MULTI_FUNC_CTRL); + if (val32 & MULTI_WIFI_FUNC_EN) + priv->has_wifi = 1; + if (val32 & MULTI_BT_FUNC_EN) + priv->has_bluetooth = 1; + if (val32 & MULTI_GPS_FUNC_EN) + priv->has_gps = 1; + } else if (val32 & SYS_CFG_TYPE_ID) { + bonding = rtl8xxxu_read32(priv, REG_HPON_FSM); + bonding &= HPON_FSM_BONDING_MASK; + if (bonding == HPON_FSM_BONDING_1T2R) { + sprintf(priv->chip_name, "8191CU"); + priv->rf_paths = 2; + priv->rx_paths = 2; + priv->tx_paths = 1; + priv->rtlchip = 0x8191c; + } else { + sprintf(priv->chip_name, "8192CU"); + priv->rf_paths = 2; + priv->rx_paths = 2; + priv->tx_paths = 2; + priv->rtlchip = 0x8192c; + } + priv->has_wifi = 1; + } else { + sprintf(priv->chip_name, "8188CU"); + priv->rf_paths = 1; + priv->rx_paths = 1; + priv->tx_paths = 1; + priv->rtlchip = 0x8188c; + priv->has_wifi = 1; + } + + if (val32 & SYS_CFG_VENDOR_ID) + priv->vendor_umc = 1; + + val32 = rtl8xxxu_read32(priv, REG_GPIO_OUTSTS); + priv->rom_rev = (val32 & GPIO_RF_RL_ID) >> 28; + + val16 = rtl8xxxu_read16(priv, REG_NORMAL_SIE_EP_TX); + if (val16 & NORMAL_SIE_EP_TX_HIGH_MASK) { + priv->ep_tx_high_queue = 1; + priv->ep_tx_count++; + } + + if (val16 & NORMAL_SIE_EP_TX_NORMAL_MASK) { + priv->ep_tx_normal_queue = 1; + priv->ep_tx_count++; + } + + if (val16 & NORMAL_SIE_EP_TX_LOW_MASK) { + priv->ep_tx_low_queue = 1; + priv->ep_tx_count++; + } + + /* + * Fallback for devices that do not provide REG_NORMAL_SIE_EP_TX + */ + if (!priv->ep_tx_count) { + switch (priv->nr_out_eps) { + case 3: + priv->ep_tx_low_queue = 1; + priv->ep_tx_count++; + case 2: + priv->ep_tx_normal_queue = 1; + priv->ep_tx_count++; + case 1: + priv->ep_tx_high_queue = 1; + priv->ep_tx_count++; + break; + default: + dev_info(dev, "Unsupported USB TX end-points\n"); + return -ENOTSUPP; + } + } + + return 0; +} + +static int rtl8723au_parse_efuse(struct rtl8xxxu_priv *priv) +{ + if (priv->efuse_wifi.efuse8723.rtl_id != cpu_to_le16(0x8129)) + return -EINVAL; + + ether_addr_copy(priv->mac_addr, priv->efuse_wifi.efuse8723.mac_addr); + + memcpy(priv->cck_tx_power_index_A, + priv->efuse_wifi.efuse8723.cck_tx_power_index_A, + sizeof(priv->cck_tx_power_index_A)); + memcpy(priv->cck_tx_power_index_B, + priv->efuse_wifi.efuse8723.cck_tx_power_index_B, + sizeof(priv->cck_tx_power_index_B)); + + memcpy(priv->ht40_1s_tx_power_index_A, + priv->efuse_wifi.efuse8723.ht40_1s_tx_power_index_A, + sizeof(priv->ht40_1s_tx_power_index_A)); + memcpy(priv->ht40_1s_tx_power_index_B, + priv->efuse_wifi.efuse8723.ht40_1s_tx_power_index_B, + sizeof(priv->ht40_1s_tx_power_index_B)); + + memcpy(priv->ht20_tx_power_index_diff, + priv->efuse_wifi.efuse8723.ht20_tx_power_index_diff, + sizeof(priv->ht20_tx_power_index_diff)); + memcpy(priv->ofdm_tx_power_index_diff, + priv->efuse_wifi.efuse8723.ofdm_tx_power_index_diff, + sizeof(priv->ofdm_tx_power_index_diff)); + + memcpy(priv->ht40_max_power_offset, + priv->efuse_wifi.efuse8723.ht40_max_power_offset, + sizeof(priv->ht40_max_power_offset)); + memcpy(priv->ht20_max_power_offset, + priv->efuse_wifi.efuse8723.ht20_max_power_offset, + sizeof(priv->ht20_max_power_offset)); + + dev_info(&priv->udev->dev, "Vendor: %.7s\n", + priv->efuse_wifi.efuse8723.vendor_name); + dev_info(&priv->udev->dev, "Product: %.41s\n", + priv->efuse_wifi.efuse8723.device_name); + return 0; +} + +#ifdef CONFIG_RTL8XXXU_UNTESTED + +static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv) +{ + int i; + + if (priv->efuse_wifi.efuse8192.rtl_id != cpu_to_le16(0x8129)) + return -EINVAL; + + ether_addr_copy(priv->mac_addr, priv->efuse_wifi.efuse8192.mac_addr); + + memcpy(priv->cck_tx_power_index_A, + priv->efuse_wifi.efuse8192.cck_tx_power_index_A, + sizeof(priv->cck_tx_power_index_A)); + memcpy(priv->cck_tx_power_index_B, + priv->efuse_wifi.efuse8192.cck_tx_power_index_B, + sizeof(priv->cck_tx_power_index_B)); + + memcpy(priv->ht40_1s_tx_power_index_A, + priv->efuse_wifi.efuse8192.ht40_1s_tx_power_index_A, + sizeof(priv->ht40_1s_tx_power_index_A)); + memcpy(priv->ht40_1s_tx_power_index_B, + priv->efuse_wifi.efuse8192.ht40_1s_tx_power_index_B, + sizeof(priv->ht40_1s_tx_power_index_B)); + memcpy(priv->ht40_2s_tx_power_index_diff, + priv->efuse_wifi.efuse8192.ht40_2s_tx_power_index_diff, + sizeof(priv->ht40_2s_tx_power_index_diff)); + + memcpy(priv->ht20_tx_power_index_diff, + priv->efuse_wifi.efuse8192.ht20_tx_power_index_diff, + sizeof(priv->ht20_tx_power_index_diff)); + memcpy(priv->ofdm_tx_power_index_diff, + priv->efuse_wifi.efuse8192.ofdm_tx_power_index_diff, + sizeof(priv->ofdm_tx_power_index_diff)); + + memcpy(priv->ht40_max_power_offset, + priv->efuse_wifi.efuse8192.ht40_max_power_offset, + sizeof(priv->ht40_max_power_offset)); + memcpy(priv->ht20_max_power_offset, + priv->efuse_wifi.efuse8192.ht20_max_power_offset, + sizeof(priv->ht20_max_power_offset)); + + dev_info(&priv->udev->dev, "Vendor: %.7s\n", + priv->efuse_wifi.efuse8192.vendor_name); + dev_info(&priv->udev->dev, "Product: %.20s\n", + priv->efuse_wifi.efuse8192.device_name); + + if (priv->efuse_wifi.efuse8192.rf_regulatory & 0x20) { + sprintf(priv->chip_name, "8188RU"); + priv->hi_pa = 1; + } + + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) { + unsigned char *raw = priv->efuse_wifi.raw; + + dev_info(&priv->udev->dev, + "%s: dumping efuse (0x%02zx bytes):\n", + __func__, sizeof(struct rtl8192cu_efuse)); + for (i = 0; i < sizeof(struct rtl8192cu_efuse); i += 8) { + dev_info(&priv->udev->dev, "%02x: " + "%02x %02x %02x %02x %02x %02x %02x %02x\n", i, + raw[i], raw[i + 1], raw[i + 2], + raw[i + 3], raw[i + 4], raw[i + 5], + raw[i + 6], raw[i + 7]); + } + } + return 0; +} + +#endif + +static int +rtl8xxxu_read_efuse8(struct rtl8xxxu_priv *priv, u16 offset, u8 *data) +{ + int i; + u8 val8; + u32 val32; + + /* Write Address */ + rtl8xxxu_write8(priv, REG_EFUSE_CTRL + 1, offset & 0xff); + val8 = rtl8xxxu_read8(priv, REG_EFUSE_CTRL + 2); + val8 &= 0xfc; + val8 |= (offset >> 8) & 0x03; + rtl8xxxu_write8(priv, REG_EFUSE_CTRL + 2, val8); + + val8 = rtl8xxxu_read8(priv, REG_EFUSE_CTRL + 3); + rtl8xxxu_write8(priv, REG_EFUSE_CTRL + 3, val8 & 0x7f); + + /* Poll for data read */ + val32 = rtl8xxxu_read32(priv, REG_EFUSE_CTRL); + for (i = 0; i < RTL8XXXU_MAX_REG_POLL; i++) { + val32 = rtl8xxxu_read32(priv, REG_EFUSE_CTRL); + if (val32 & BIT(31)) + break; + } + + if (i == RTL8XXXU_MAX_REG_POLL) + return -EIO; + + udelay(50); + val32 = rtl8xxxu_read32(priv, REG_EFUSE_CTRL); + + *data = val32 & 0xff; + return 0; +} + +static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv) +{ + struct device *dev = &priv->udev->dev; + int i, ret = 0; + u8 val8, word_mask, header, extheader; + u16 val16, efuse_addr, offset; + u32 val32; + + val16 = rtl8xxxu_read16(priv, REG_9346CR); + if (val16 & EEPROM_ENABLE) + priv->has_eeprom = 1; + if (val16 & EEPROM_BOOT) + priv->boot_eeprom = 1; + + val32 = rtl8xxxu_read32(priv, REG_EFUSE_TEST); + val32 = (val32 & ~EFUSE_SELECT_MASK) | EFUSE_WIFI_SELECT; + rtl8xxxu_write32(priv, REG_EFUSE_TEST, val32); + + dev_dbg(dev, "Booting from %s\n", + priv->boot_eeprom ? "EEPROM" : "EFUSE"); + + rtl8xxxu_write8(priv, REG_EFUSE_ACCESS, EFUSE_ACCESS_ENABLE); + + /* 1.2V Power: From VDDON with Power Cut(0x0000[15]), default valid */ + val16 = rtl8xxxu_read16(priv, REG_SYS_ISO_CTRL); + if (!(val16 & SYS_ISO_PWC_EV12V)) { + val16 |= SYS_ISO_PWC_EV12V; + rtl8xxxu_write16(priv, REG_SYS_ISO_CTRL, val16); + } + /* Reset: 0x0000[28], default valid */ + val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); + if (!(val16 & SYS_FUNC_ELDR)) { + val16 |= SYS_FUNC_ELDR; + rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); + } + + /* + * Clock: Gated(0x0008[5]) 8M(0x0008[1]) clock from ANA, default valid + */ + val16 = rtl8xxxu_read16(priv, REG_SYS_CLKR); + if (!(val16 & SYS_CLK_LOADER_ENABLE) || !(val16 & SYS_CLK_ANA8M)) { + val16 |= (SYS_CLK_LOADER_ENABLE | SYS_CLK_ANA8M); + rtl8xxxu_write16(priv, REG_SYS_CLKR, val16); + } + + /* Default value is 0xff */ + memset(priv->efuse_wifi.raw, 0xff, EFUSE_MAP_LEN_8723A); + + efuse_addr = 0; + while (efuse_addr < EFUSE_REAL_CONTENT_LEN_8723A) { + ret = rtl8xxxu_read_efuse8(priv, efuse_addr++, &header); + if (ret || header == 0xff) + goto exit; + + if ((header & 0x1f) == 0x0f) { /* extended header */ + offset = (header & 0xe0) >> 5; + + ret = rtl8xxxu_read_efuse8(priv, efuse_addr++, + &extheader); + if (ret) + goto exit; + /* All words disabled */ + if ((extheader & 0x0f) == 0x0f) + continue; + + offset |= ((extheader & 0xf0) >> 1); + word_mask = extheader & 0x0f; + } else { + offset = (header >> 4) & 0x0f; + word_mask = header & 0x0f; + } + + if (offset < EFUSE_MAX_SECTION_8723A) { + u16 map_addr; + /* Get word enable value from PG header */ + + /* We have 8 bits to indicate validity */ + map_addr = offset * 8; + if (map_addr >= EFUSE_MAP_LEN_8723A) { + dev_warn(dev, "%s: Illegal map_addr (%04x), " + "efuse corrupt!\n", + __func__, map_addr); + ret = -EINVAL; + goto exit; + } + for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) { + /* Check word enable condition in the section */ + if (!(word_mask & BIT(i))) { + ret = rtl8xxxu_read_efuse8(priv, + efuse_addr++, + &val8); + if (ret) + goto exit; + priv->efuse_wifi.raw[map_addr++] = val8; + + ret = rtl8xxxu_read_efuse8(priv, + efuse_addr++, + &val8); + if (ret) + goto exit; + priv->efuse_wifi.raw[map_addr++] = val8; + } else + map_addr += 2; + } + } else { + dev_warn(dev, + "%s: Illegal offset (%04x), efuse corrupt!\n", + __func__, offset); + ret = -EINVAL; + goto exit; + } + } + +exit: + rtl8xxxu_write8(priv, REG_EFUSE_ACCESS, EFUSE_ACCESS_DISABLE); + + return ret; +} + +static int rtl8xxxu_start_firmware(struct rtl8xxxu_priv *priv) +{ + struct device *dev = &priv->udev->dev; + int ret = 0, i; + u32 val32; + + /* Poll checksum report */ + for (i = 0; i < RTL8XXXU_FIRMWARE_POLL_MAX; i++) { + val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL); + if (val32 & MCU_FW_DL_CSUM_REPORT) + break; + } + + if (i == RTL8XXXU_FIRMWARE_POLL_MAX) { + dev_warn(dev, "Firmware checksum poll timed out\n"); + ret = -EAGAIN; + goto exit; + } + + val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL); + val32 |= MCU_FW_DL_READY; + val32 &= ~MCU_WINT_INIT_READY; + rtl8xxxu_write32(priv, REG_MCU_FW_DL, val32); + + /* Wait for firmware to become ready */ + for (i = 0; i < RTL8XXXU_FIRMWARE_POLL_MAX; i++) { + val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL); + if (val32 & MCU_WINT_INIT_READY) + break; + + udelay(100); + } + + if (i == RTL8XXXU_FIRMWARE_POLL_MAX) { + dev_warn(dev, "Firmware failed to start\n"); + ret = -EAGAIN; + goto exit; + } + +exit: + return ret; +} + +static int rtl8xxxu_download_firmware(struct rtl8xxxu_priv *priv) +{ + int pages, remainder, i, ret; + u8 val8; + u16 val16; + u32 val32; + u8 *fwptr; + + val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC + 1); + val8 |= 4; + rtl8xxxu_write8(priv, REG_SYS_FUNC + 1, val8); + + /* 8051 enable */ + val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); + rtl8xxxu_write16(priv, REG_SYS_FUNC, val16 | SYS_FUNC_CPU_ENABLE); + + /* MCU firmware download enable */ + val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL); + rtl8xxxu_write8(priv, REG_MCU_FW_DL, val8 | MCU_FW_DL_ENABLE); + + /* 8051 reset */ + val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL); + rtl8xxxu_write32(priv, REG_MCU_FW_DL, val32 & ~BIT(19)); + + /* Reset firmware download checksum */ + val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL); + rtl8xxxu_write8(priv, REG_MCU_FW_DL, val8 | MCU_FW_DL_CSUM_REPORT); + + pages = priv->fw_size / RTL_FW_PAGE_SIZE; + remainder = priv->fw_size % RTL_FW_PAGE_SIZE; + + fwptr = priv->fw_data->data; + + for (i = 0; i < pages; i++) { + val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL + 2) & 0xF8; + rtl8xxxu_write8(priv, REG_MCU_FW_DL + 2, val8 | i); + + ret = rtl8xxxu_writeN(priv, REG_FW_START_ADDRESS, + fwptr, RTL_FW_PAGE_SIZE); + if (ret != RTL_FW_PAGE_SIZE) { + ret = -EAGAIN; + goto fw_abort; + } + + fwptr += RTL_FW_PAGE_SIZE; + } + + if (remainder) { + val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL + 2) & 0xF8; + rtl8xxxu_write8(priv, REG_MCU_FW_DL + 2, val8 | i); + ret = rtl8xxxu_writeN(priv, REG_FW_START_ADDRESS, + fwptr, remainder); + if (ret != remainder) { + ret = -EAGAIN; + goto fw_abort; + } + } + + ret = 0; +fw_abort: + /* MCU firmware download disable */ + val16 = rtl8xxxu_read16(priv, REG_MCU_FW_DL); + rtl8xxxu_write16(priv, REG_MCU_FW_DL, + val16 & (~MCU_FW_DL_ENABLE & 0xff)); + + return ret; +} + +static int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, char *fw_name) +{ + struct device *dev = &priv->udev->dev; + const struct firmware *fw; + int ret = 0; + u16 signature; + + dev_info(dev, "%s: Loading firmware %s\n", DRIVER_NAME, fw_name); + if (request_firmware(&fw, fw_name, &priv->udev->dev)) { + dev_warn(dev, "request_firmware(%s) failed\n", fw_name); + ret = -EAGAIN; + goto exit; + } + if (!fw) { + dev_warn(dev, "Firmware data not available\n"); + ret = -EINVAL; + goto exit; + } + + priv->fw_data = kmemdup(fw->data, fw->size, GFP_KERNEL); + priv->fw_size = fw->size - sizeof(struct rtl8xxxu_firmware_header); + + signature = le16_to_cpu(priv->fw_data->signature); + switch (signature & 0xfff0) { + case 0x92c0: + case 0x88c0: + case 0x2300: + break; + default: + ret = -EINVAL; + dev_warn(dev, "%s: Invalid firmware signature: 0x%04x\n", + __func__, signature); + } + + dev_info(dev, "Firmware revision %i.%i (signature 0x%04x)\n", + le16_to_cpu(priv->fw_data->major_version), + priv->fw_data->minor_version, signature); + +exit: + release_firmware(fw); + return ret; +} + +static int rtl8723au_load_firmware(struct rtl8xxxu_priv *priv) +{ + char *fw_name; + int ret; + + switch (priv->chip_cut) { + case 0: + fw_name = "rtlwifi/rtl8723aufw_A.bin"; + break; + case 1: + if (priv->enable_bluetooth) + fw_name = "rtlwifi/rtl8723aufw_B.bin"; + else + fw_name = "rtlwifi/rtl8723aufw_B_NoBT.bin"; + + break; + default: + return -EINVAL; + } + + ret = rtl8xxxu_load_firmware(priv, fw_name); + return ret; +} + +#ifdef CONFIG_RTL8XXXU_UNTESTED + +static int rtl8192cu_load_firmware(struct rtl8xxxu_priv *priv) +{ + char *fw_name; + int ret; + + if (!priv->vendor_umc) + fw_name = "rtlwifi/rtl8192cufw_TMSC.bin"; + else if (priv->chip_cut || priv->rtlchip == 0x8192c) + fw_name = "rtlwifi/rtl8192cufw_B.bin"; + else + fw_name = "rtlwifi/rtl8192cufw_A.bin"; + + ret = rtl8xxxu_load_firmware(priv, fw_name); + + return ret; +} + +#endif + +static void rtl8xxxu_firmware_self_reset(struct rtl8xxxu_priv *priv) +{ + u16 val16; + int i = 100; + + /* Inform 8051 to perform reset */ + rtl8xxxu_write8(priv, REG_HMTFR + 3, 0x20); + + for (i = 100; i > 0; i--) { + val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); + + if (!(val16 & SYS_FUNC_CPU_ENABLE)) { + dev_dbg(&priv->udev->dev, + "%s: Firmware self reset success!\n", __func__); + break; + } + udelay(50); + } + + if (!i) { + /* Force firmware reset */ + val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); + val16 &= ~SYS_FUNC_CPU_ENABLE; + rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); + } +} + +static int +rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv, struct rtl8xxxu_reg8val *array) +{ + int i, ret; + u16 reg; + u8 val; + + for (i = 0; ; i++) { + reg = array[i].reg; + val = array[i].val; + + if (reg == 0xffff && val == 0xff) + break; + + ret = rtl8xxxu_write8(priv, reg, val); + if (ret != 1) { + dev_warn(&priv->udev->dev, + "Failed to initialize MAC\n"); + return -EAGAIN; + } + } + + rtl8xxxu_write8(priv, REG_MAX_AGGR_NUM, 0x0a); + + return 0; +} + +static int rtl8xxxu_init_phy_regs(struct rtl8xxxu_priv *priv, + struct rtl8xxxu_reg32val *array) +{ + int i, ret; + u16 reg; + u32 val; + + for (i = 0; ; i++) { + reg = array[i].reg; + val = array[i].val; + + if (reg == 0xffff && val == 0xffffffff) + break; + + ret = rtl8xxxu_write32(priv, reg, val); + if (ret != sizeof(val)) { + dev_warn(&priv->udev->dev, + "Failed to initialize PHY\n"); + return -EAGAIN; + } + udelay(1); + } + + return 0; +} + +/* + * Most of this is black magic retrieved from the old rtl8723au driver + */ +static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) +{ + u8 val8, ldoa15, ldov12d, lpldo, ldohci12; + u32 val32; + + /* + * Todo: The vendor driver maintains a table of PHY register + * addresses, which is initialized here. Do we need this? + */ + + val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL); + udelay(2); + val8 |= AFE_PLL_320_ENABLE; + rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8); + udelay(2); + + rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL + 1, 0xff); + udelay(2); + + val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC); + val8 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB; + rtl8xxxu_write8(priv, REG_SYS_FUNC, val8); + + /* AFE_XTAL_RF_GATE (bit 14) if addressing as 32 bit register */ + val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL); + val32 &= ~AFE_XTAL_RF_GATE; + if (priv->has_bluetooth) + val32 &= ~AFE_XTAL_BT_GATE; + rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, val32); + + /* 6. 0x1f[7:0] = 0x07 */ + val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB; + rtl8xxxu_write8(priv, REG_RF_CTRL, val8); + + if (priv->hi_pa) + rtl8xxxu_init_phy_regs(priv, rtl8188ru_phy_1t_highpa_table); + else if (priv->tx_paths == 2) + rtl8xxxu_init_phy_regs(priv, rtl8192cu_phy_2t_init_table); + else + rtl8xxxu_init_phy_regs(priv, rtl8723a_phy_1t_init_table); + + + if (priv->rtlchip == 0x8188c && priv->hi_pa && + priv->vendor_umc && priv->chip_cut == 1) + rtl8xxxu_write8(priv, REG_OFDM0_AGC_PARM1 + 2, 0x50); + + if (priv->tx_paths == 1 && priv->rx_paths == 2) { + /* + * For 1T2R boards, patch the registers. + * + * It looks like 8191/2 1T2R boards use path B for TX + */ + val32 = rtl8xxxu_read32(priv, REG_FPGA0_TX_INFO); + val32 &= ~(BIT(0) | BIT(1)); + val32 |= BIT(1); + rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, val32); + + val32 = rtl8xxxu_read32(priv, REG_FPGA1_TX_INFO); + val32 &= ~0x300033; + val32 |= 0x200022; + rtl8xxxu_write32(priv, REG_FPGA1_TX_INFO, val32); + + val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING); + val32 &= 0xff000000; + val32 |= 0x45000000; + rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32); + + val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE); + val32 &= ~(OFDM_RF_PATH_RX_MASK | OFDM_RF_PATH_TX_MASK); + val32 |= (OFDM_RF_PATH_RX_A | OFDM_RF_PATH_RX_B | + OFDM_RF_PATH_TX_B); + rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32); + + val32 = rtl8xxxu_read32(priv, REG_OFDM0_AGC_PARM1); + val32 &= ~(BIT(4) | BIT(5)); + val32 |= BIT(4); + rtl8xxxu_write32(priv, REG_OFDM0_AGC_PARM1, val32); + + val32 = rtl8xxxu_read32(priv, REG_TX_CCK_RFON); + val32 &= ~(BIT(27) | BIT(26)); + val32 |= BIT(27); + rtl8xxxu_write32(priv, REG_TX_CCK_RFON, val32); + + val32 = rtl8xxxu_read32(priv, REG_TX_CCK_BBON); + val32 &= ~(BIT(27) | BIT(26)); + val32 |= BIT(27); + rtl8xxxu_write32(priv, REG_TX_CCK_BBON, val32); + + val32 = rtl8xxxu_read32(priv, REG_TX_OFDM_RFON); + val32 &= ~(BIT(27) | BIT(26)); + val32 |= BIT(27); + rtl8xxxu_write32(priv, REG_TX_OFDM_RFON, val32); + + val32 = rtl8xxxu_read32(priv, REG_TX_OFDM_BBON); + val32 &= ~(BIT(27) | BIT(26)); + val32 |= BIT(27); + rtl8xxxu_write32(priv, REG_TX_OFDM_BBON, val32); + + val32 = rtl8xxxu_read32(priv, REG_TX_TO_TX); + val32 &= ~(BIT(27) | BIT(26)); + val32 |= BIT(27); + rtl8xxxu_write32(priv, REG_TX_TO_TX, val32); + } + + if (priv->hi_pa) + rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_highpa_table); + else + rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_standard_table); + + if (priv->rtlchip == 0x8723a && + priv->efuse_wifi.efuse8723.version >= 0x01) { + val32 = rtl8xxxu_read32(priv, REG_MAC_PHY_CTRL); + + val8 = priv->efuse_wifi.efuse8723.xtal_k & 0x3f; + val32 &= 0xff000fff; + val32 |= ((val8 | (val8 << 6)) << 12); + + rtl8xxxu_write32(priv, REG_MAC_PHY_CTRL, val32); + } + + ldoa15 = LDOA15_ENABLE | LDOA15_OBUF; + ldov12d = LDOV12D_ENABLE | BIT(2) | (2 << LDOV12D_VADJ_SHIFT); + ldohci12 = 0x57; + lpldo = 1; + val32 = (lpldo << 24) | (ldohci12 << 16) | (ldov12d << 8) | ldoa15; + + rtl8xxxu_write32(priv, REG_LDOA15_CTRL, val32); + + return 0; +} + +static int rtl8xxxu_init_rf_regs(struct rtl8xxxu_priv *priv, + struct rtl8xxxu_rfregval *array, + enum rtl8xxxu_rfpath path) +{ + int i, ret; + u8 reg; + u32 val; + + for (i = 0; ; i++) { + reg = array[i].reg; + val = array[i].val; + + if (reg == 0xff && val == 0xffffffff) + break; + + switch (reg) { + case 0xfe: + msleep(50); + continue; + case 0xfd: + mdelay(5); + continue; + case 0xfc: + mdelay(1); + continue; + case 0xfb: + udelay(50); + continue; + case 0xfa: + udelay(5); + continue; + case 0xf9: + udelay(1); + continue; + } + + reg &= 0x3f; + + ret = rtl8xxxu_write_rfreg(priv, path, reg, val); + if (ret) { + dev_warn(&priv->udev->dev, + "Failed to initialize RF\n"); + return -EAGAIN; + } + udelay(1); + } + + return 0; +} + +static int rtl8xxxu_init_phy_rf(struct rtl8xxxu_priv *priv, + struct rtl8xxxu_rfregval *table, + enum rtl8xxxu_rfpath path) +{ + u32 val32; + u16 val16, rfsi_rfenv; + u16 reg_sw_ctrl, reg_int_oe, reg_hssi_parm2; + + switch (path) { + case RF_A: + reg_sw_ctrl = REG_FPGA0_XA_RF_SW_CTRL; + reg_int_oe = REG_FPGA0_XA_RF_INT_OE; + reg_hssi_parm2 = REG_FPGA0_XA_HSSI_PARM2; + break; + case RF_B: + reg_sw_ctrl = REG_FPGA0_XB_RF_SW_CTRL; + reg_int_oe = REG_FPGA0_XB_RF_INT_OE; + reg_hssi_parm2 = REG_FPGA0_XB_HSSI_PARM2; + break; + default: + dev_err(&priv->udev->dev, "%s:Unsupported RF path %c\n", + __func__, path + 'A'); + return -EINVAL; + } + /* For path B, use XB */ + rfsi_rfenv = rtl8xxxu_read16(priv, reg_sw_ctrl); + rfsi_rfenv &= FPGA0_RF_RFENV; + + /* + * These two we might be able to optimize into one + */ + val32 = rtl8xxxu_read32(priv, reg_int_oe); + val32 |= BIT(20); /* 0x10 << 16 */ + rtl8xxxu_write32(priv, reg_int_oe, val32); + udelay(1); + + val32 = rtl8xxxu_read32(priv, reg_int_oe); + val32 |= BIT(4); + rtl8xxxu_write32(priv, reg_int_oe, val32); + udelay(1); + + /* + * These two we might be able to optimize into one + */ + val32 = rtl8xxxu_read32(priv, reg_hssi_parm2); + val32 &= ~FPGA0_HSSI_3WIRE_ADDR_LEN; + rtl8xxxu_write32(priv, reg_hssi_parm2, val32); + udelay(1); + + val32 = rtl8xxxu_read32(priv, reg_hssi_parm2); + val32 &= ~FPGA0_HSSI_3WIRE_DATA_LEN; + rtl8xxxu_write32(priv, reg_hssi_parm2, val32); + udelay(1); + + rtl8xxxu_init_rf_regs(priv, table, path); + + /* For path B, use XB */ + val16 = rtl8xxxu_read16(priv, reg_sw_ctrl); + val16 &= ~FPGA0_RF_RFENV; + val16 |= rfsi_rfenv; + rtl8xxxu_write16(priv, reg_sw_ctrl, val16); + + return 0; +} + +static int rtl8xxxu_llt_write(struct rtl8xxxu_priv *priv, u8 address, u8 data) +{ + int ret = -EBUSY; + int count = 0; + u32 value; + + value = LLT_OP_WRITE | address << 8 | data; + + rtl8xxxu_write32(priv, REG_LLT_INIT, value); + + do { + value = rtl8xxxu_read32(priv, REG_LLT_INIT); + if ((value & LLT_OP_MASK) == LLT_OP_INACTIVE) { + ret = 0; + break; + } + } while (count++ < 20); + + return ret; +} + +static int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page) +{ + int ret; + int i; + + for (i = 0; i < last_tx_page; i++) { + ret = rtl8xxxu_llt_write(priv, i, i + 1); + if (ret) + goto exit; + } + + ret = rtl8xxxu_llt_write(priv, last_tx_page, 0xff); + if (ret) + goto exit; + + /* Mark remaining pages as a ring buffer */ + for (i = last_tx_page + 1; i < 0xff; i++) { + ret = rtl8xxxu_llt_write(priv, i, (i + 1)); + if (ret) + goto exit; + } + + /* Let last entry point to the start entry of ring buffer */ + ret = rtl8xxxu_llt_write(priv, 0xff, last_tx_page + 1); + if (ret) + goto exit; + +exit: + return ret; +} + +static int rtl8xxxu_init_queue_priority(struct rtl8xxxu_priv *priv) +{ + u16 val16, hi, lo; + u16 hiq, mgq, bkq, beq, viq, voq; + int hip, mgp, bkp, bep, vip, vop; + int ret = 0; + + switch (priv->ep_tx_count) { + case 1: + if (priv->ep_tx_high_queue) { + hi = TRXDMA_QUEUE_HIGH; + } else if (priv->ep_tx_low_queue) { + hi = TRXDMA_QUEUE_LOW; + } else if (priv->ep_tx_normal_queue) { + hi = TRXDMA_QUEUE_NORMAL; + } else { + hi = 0; + ret = -EINVAL; + } + + hiq = hi; + mgq = hi; + bkq = hi; + beq = hi; + viq = hi; + voq = hi; + + hip = 0; + mgp = 0; + bkp = 0; + bep = 0; + vip = 0; + vop = 0; + break; + case 2: + if (priv->ep_tx_high_queue && priv->ep_tx_low_queue) { + hi = TRXDMA_QUEUE_HIGH; + lo = TRXDMA_QUEUE_LOW; + } else if (priv->ep_tx_normal_queue && priv->ep_tx_low_queue) { + hi = TRXDMA_QUEUE_NORMAL; + lo = TRXDMA_QUEUE_LOW; + } else if (priv->ep_tx_high_queue && priv->ep_tx_normal_queue) { + hi = TRXDMA_QUEUE_HIGH; + lo = TRXDMA_QUEUE_NORMAL; + } else { + ret = -EINVAL; + hi = 0; + lo = 0; + } + + hiq = hi; + mgq = hi; + bkq = lo; + beq = lo; + viq = hi; + voq = hi; + + hip = 0; + mgp = 0; + bkp = 1; + bep = 1; + vip = 0; + vop = 0; + break; + case 3: + beq = TRXDMA_QUEUE_LOW; + bkq = TRXDMA_QUEUE_LOW; + viq = TRXDMA_QUEUE_NORMAL; + voq = TRXDMA_QUEUE_HIGH; + mgq = TRXDMA_QUEUE_HIGH; + hiq = TRXDMA_QUEUE_HIGH; + + hip = hiq ^ 3; + mgp = mgq ^ 3; + bkp = bkq ^ 3; + bep = beq ^ 3; + vip = viq ^ 3; + vop = viq ^ 3; + break; + default: + ret = -EINVAL; + } + + /* + * None of the vendor drivers are configuring the beacon + * queue here .... why? + */ + if (!ret) { + val16 = rtl8xxxu_read16(priv, REG_TRXDMA_CTRL); + val16 &= 0x7; + val16 |= (voq << TRXDMA_CTRL_VOQ_SHIFT) | + (viq << TRXDMA_CTRL_VIQ_SHIFT) | + (beq << TRXDMA_CTRL_BEQ_SHIFT) | + (bkq << TRXDMA_CTRL_BKQ_SHIFT) | + (mgq << TRXDMA_CTRL_MGQ_SHIFT) | + (hiq << TRXDMA_CTRL_HIQ_SHIFT); + rtl8xxxu_write16(priv, REG_TRXDMA_CTRL, val16); + + priv->pipe_out[TXDESC_QUEUE_VO] = + usb_sndbulkpipe(priv->udev, priv->out_ep[vop]); + priv->pipe_out[TXDESC_QUEUE_VI] = + usb_sndbulkpipe(priv->udev, priv->out_ep[vip]); + priv->pipe_out[TXDESC_QUEUE_BE] = + usb_sndbulkpipe(priv->udev, priv->out_ep[bep]); + priv->pipe_out[TXDESC_QUEUE_BK] = + usb_sndbulkpipe(priv->udev, priv->out_ep[bkp]); + priv->pipe_out[TXDESC_QUEUE_BEACON] = + usb_sndbulkpipe(priv->udev, priv->out_ep[0]); + priv->pipe_out[TXDESC_QUEUE_MGNT] = + usb_sndbulkpipe(priv->udev, priv->out_ep[mgp]); + priv->pipe_out[TXDESC_QUEUE_HIGH] = + usb_sndbulkpipe(priv->udev, priv->out_ep[hip]); + priv->pipe_out[TXDESC_QUEUE_CMD] = + usb_sndbulkpipe(priv->udev, priv->out_ep[0]); + } + + return ret; +} + +static void rtl8xxxu_fill_iqk_matrix_a(struct rtl8xxxu_priv *priv, + bool iqk_ok, int result[][8], + int candidate, bool tx_only) +{ + u32 oldval, x, tx0_a, reg; + int y, tx0_c; + u32 val32; + + if (!iqk_ok) + return; + + val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE); + oldval = val32 >> 22; + + x = result[candidate][0]; + if ((x & 0x00000200) != 0) + x = x | 0xfffffc00; + tx0_a = (x * oldval) >> 8; + + val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE); + val32 &= ~0x3ff; + val32 |= tx0_a; + rtl8xxxu_write32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE, val32); + + val32 = rtl8xxxu_read32(priv, REG_OFDM0_ENERGY_CCA_THRES); + val32 &= ~BIT(31); + if ((x * oldval >> 7) & 0x1) + val32 |= BIT(31); + rtl8xxxu_write32(priv, REG_OFDM0_ENERGY_CCA_THRES, val32); + + y = result[candidate][1]; + if ((y & 0x00000200) != 0) + y = y | 0xfffffc00; + tx0_c = (y * oldval) >> 8; + + val32 = rtl8xxxu_read32(priv, REG_OFDM0_XC_TX_AFE); + val32 &= ~0xf0000000; + val32 |= (((tx0_c & 0x3c0) >> 6) << 28); + rtl8xxxu_write32(priv, REG_OFDM0_XC_TX_AFE, val32); + + val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE); + val32 &= ~0x003f0000; + val32 |= ((tx0_c & 0x3f) << 16); + rtl8xxxu_write32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE, val32); + + val32 = rtl8xxxu_read32(priv, REG_OFDM0_ENERGY_CCA_THRES); + val32 &= ~BIT(29); + if ((y * oldval >> 7) & 0x1) + val32 |= BIT(29); + rtl8xxxu_write32(priv, REG_OFDM0_ENERGY_CCA_THRES, val32); + + if (tx_only) { + dev_dbg(&priv->udev->dev, "%s: only TX\n", __func__); + return; + } + + reg = result[candidate][2]; + + val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_RX_IQ_IMBALANCE); + val32 &= ~0x3ff; + val32 |= (reg & 0x3ff); + rtl8xxxu_write32(priv, REG_OFDM0_XA_RX_IQ_IMBALANCE, val32); + + reg = result[candidate][3] & 0x3F; + + val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_RX_IQ_IMBALANCE); + val32 &= ~0xfc00; + val32 |= ((reg << 10) & 0xfc00); + rtl8xxxu_write32(priv, REG_OFDM0_XA_RX_IQ_IMBALANCE, val32); + + reg = (result[candidate][3] >> 6) & 0xF; + + val32 = rtl8xxxu_read32(priv, REG_OFDM0_RX_IQ_EXT_ANTA); + val32 &= ~0xf0000000; + val32 |= (reg << 28); + rtl8xxxu_write32(priv, REG_OFDM0_RX_IQ_EXT_ANTA, val32); +} + +static void rtl8xxxu_fill_iqk_matrix_b(struct rtl8xxxu_priv *priv, + bool iqk_ok, int result[][8], + int candidate, bool tx_only) +{ + u32 oldval, x, tx1_a, reg; + int y, tx1_c; + u32 val32; + + if (!iqk_ok) + return; + + val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_TX_IQ_IMBALANCE); + oldval = val32 >> 22; + + x = result[candidate][4]; + if ((x & 0x00000200) != 0) + x = x | 0xfffffc00; + tx1_a = (x * oldval) >> 8; + + val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_TX_IQ_IMBALANCE); + val32 &= ~0x3ff; + val32 |= tx1_a; + rtl8xxxu_write32(priv, REG_OFDM0_XB_TX_IQ_IMBALANCE, val32); + + val32 = rtl8xxxu_read32(priv, REG_OFDM0_ENERGY_CCA_THRES); + val32 &= ~BIT(27); + if ((x * oldval >> 7) & 0x1) + val32 |= BIT(27); + rtl8xxxu_write32(priv, REG_OFDM0_ENERGY_CCA_THRES, val32); + + y = result[candidate][5]; + if ((y & 0x00000200) != 0) + y = y | 0xfffffc00; + tx1_c = (y * oldval) >> 8; + + val32 = rtl8xxxu_read32(priv, REG_OFDM0_XD_TX_AFE); + val32 &= ~0xf0000000; + val32 |= (((tx1_c & 0x3c0) >> 6) << 28); + rtl8xxxu_write32(priv, REG_OFDM0_XD_TX_AFE, val32); + + val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_TX_IQ_IMBALANCE); + val32 &= ~0x003f0000; + val32 |= ((tx1_c & 0x3f) << 16); + rtl8xxxu_write32(priv, REG_OFDM0_XB_TX_IQ_IMBALANCE, val32); + + val32 = rtl8xxxu_read32(priv, REG_OFDM0_ENERGY_CCA_THRES); + val32 &= ~BIT(25); + if ((y * oldval >> 7) & 0x1) + val32 |= BIT(25); + rtl8xxxu_write32(priv, REG_OFDM0_ENERGY_CCA_THRES, val32); + + if (tx_only) { + dev_dbg(&priv->udev->dev, "%s: only TX\n", __func__); + return; + } + + reg = result[candidate][6]; + + val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_RX_IQ_IMBALANCE); + val32 &= ~0x3ff; + val32 |= (reg & 0x3ff); + rtl8xxxu_write32(priv, REG_OFDM0_XB_RX_IQ_IMBALANCE, val32); + + reg = result[candidate][7] & 0x3f; + + val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_RX_IQ_IMBALANCE); + val32 &= ~0xfc00; + val32 |= ((reg << 10) & 0xfc00); + rtl8xxxu_write32(priv, REG_OFDM0_XB_RX_IQ_IMBALANCE, val32); + + reg = (result[candidate][7] >> 6) & 0xf; + + val32 = rtl8xxxu_read32(priv, REG_OFDM0_AGCR_SSI_TABLE); + val32 &= ~0x0000f000; + val32 |= (reg << 12); + rtl8xxxu_write32(priv, REG_OFDM0_AGCR_SSI_TABLE, val32); +} + +#define MAX_TOLERANCE 5 + +static bool rtl8xxxu_simularity_compare(struct rtl8xxxu_priv *priv, + int result[][8], int c1, int c2) +{ + u32 i, j, diff, simubitmap, bound = 0; + int candidate[2] = {-1, -1}; /* for path A and path B */ + bool retval = true; + + if (priv->tx_paths > 1) + bound = 8; + else + bound = 4; + + simubitmap = 0; + + for (i = 0; i < bound; i++) { + diff = (result[c1][i] > result[c2][i]) ? + (result[c1][i] - result[c2][i]) : + (result[c2][i] - result[c1][i]); + if (diff > MAX_TOLERANCE) { + if ((i == 2 || i == 6) && !simubitmap) { + if (result[c1][i] + result[c1][i + 1] == 0) + candidate[(i / 4)] = c2; + else if (result[c2][i] + result[c2][i + 1] == 0) + candidate[(i / 4)] = c1; + else + simubitmap = simubitmap | (1 << i); + } else { + simubitmap = simubitmap | (1 << i); + } + } + } + + if (simubitmap == 0) { + for (i = 0; i < (bound / 4); i++) { + if (candidate[i] >= 0) { + for (j = i * 4; j < (i + 1) * 4 - 2; j++) + result[3][j] = result[candidate[i]][j]; + retval = false; + } + } + return retval; + } else if (!(simubitmap & 0x0f)) { + /* path A OK */ + for (i = 0; i < 4; i++) + result[3][i] = result[c1][i]; + } else if (!(simubitmap & 0xf0) && priv->tx_paths > 1) { + /* path B OK */ + for (i = 4; i < 8; i++) + result[3][i] = result[c1][i]; + } + + return false; +} + +static void +rtl8xxxu_save_mac_regs(struct rtl8xxxu_priv *priv, const u32 *reg, u32 *backup) +{ + int i; + + for (i = 0; i < (RTL8XXXU_MAC_REGS - 1); i++) + backup[i] = rtl8xxxu_read8(priv, reg[i]); + + backup[i] = rtl8xxxu_read32(priv, reg[i]); +} + +static void rtl8xxxu_restore_mac_regs(struct rtl8xxxu_priv *priv, + const u32 *reg, u32 *backup) +{ + int i; + + for (i = 0; i < (RTL8XXXU_MAC_REGS - 1); i++) + rtl8xxxu_write8(priv, reg[i], backup[i]); + + rtl8xxxu_write32(priv, reg[i], backup[i]); +} + +static void rtl8xxxu_save_regs(struct rtl8xxxu_priv *priv, const u32 *regs, + u32 *backup, int count) +{ + int i; + + for (i = 0; i < count; i++) + backup[i] = rtl8xxxu_read32(priv, regs[i]); +} + +static void rtl8xxxu_restore_regs(struct rtl8xxxu_priv *priv, const u32 *regs, + u32 *backup, int count) +{ + int i; + + for (i = 0; i < count; i++) + rtl8xxxu_write32(priv, regs[i], backup[i]); +} + + +static void rtl8xxxu_path_adda_on(struct rtl8xxxu_priv *priv, const u32 *regs, + bool path_a_on) +{ + u32 path_on; + int i; + + path_on = path_a_on ? 0x04db25a4 : 0x0b1b25a4; + if (priv->tx_paths == 1) { + path_on = 0x0bdb25a0; + rtl8xxxu_write32(priv, regs[0], 0x0b1b25a0); + } else { + rtl8xxxu_write32(priv, regs[0], path_on); + } + + for (i = 1 ; i < RTL8XXXU_ADDA_REGS ; i++) + rtl8xxxu_write32(priv, regs[i], path_on); +} + +static void rtl8xxxu_mac_calibration(struct rtl8xxxu_priv *priv, + const u32 *regs, u32 *backup) +{ + int i = 0; + + rtl8xxxu_write8(priv, regs[i], 0x3f); + + for (i = 1 ; i < (RTL8XXXU_MAC_REGS - 1); i++) + rtl8xxxu_write8(priv, regs[i], (u8)(backup[i] & ~BIT(3))); + + rtl8xxxu_write8(priv, regs[i], (u8)(backup[i] & ~BIT(5))); +} + +static int rtl8xxxu_iqk_path_a(struct rtl8xxxu_priv *priv) +{ + u32 reg_eac, reg_e94, reg_e9c, reg_ea4, val32; + int result = 0; + + /* path-A IQK setting */ + rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x10008c1f); + rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x10008c1f); + rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82140102); + + val32 = (priv->rf_paths > 1) ? 0x28160202 : + /*IS_81xxC_VENDOR_UMC_B_CUT(pHalData->VersionID)?0x28160202: */ + 0x28160502; + rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, val32); + + /* path-B IQK setting */ + if (priv->rf_paths > 1) { + rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x10008c22); + rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x10008c22); + rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82140102); + rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28160202); + } + + /* LO calibration setting */ + rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x001028d1); + + /* One shot, path A LOK & IQK */ + rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000); + rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000); + + mdelay(1); + + /* Check failed */ + reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); + reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A); + reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A); + reg_ea4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2); + + if (!(reg_eac & BIT(28)) && + ((reg_e94 & 0x03ff0000) != 0x01420000) && + ((reg_e9c & 0x03ff0000) != 0x00420000)) + result |= 0x01; + else /* If TX not OK, ignore RX */ + goto out; + + /* If TX is OK, check whether RX is OK */ + if (!(reg_eac & BIT(27)) && + ((reg_ea4 & 0x03ff0000) != 0x01320000) && + ((reg_eac & 0x03ff0000) != 0x00360000)) + result |= 0x02; + else + dev_warn(&priv->udev->dev, "%s: Path A RX IQK failed!\n", + __func__); +out: + return result; +} + +static int rtl8xxxu_iqk_path_b(struct rtl8xxxu_priv *priv) +{ + u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc; + int result = 0; + + /* One shot, path B LOK & IQK */ + rtl8xxxu_write32(priv, REG_IQK_AGC_CONT, 0x00000002); + rtl8xxxu_write32(priv, REG_IQK_AGC_CONT, 0x00000000); + + mdelay(1); + + /* Check failed */ + reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); + reg_eb4 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B); + reg_ebc = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B); + reg_ec4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_B_2); + reg_ecc = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_B_2); + + if (!(reg_eac & BIT(31)) && + ((reg_eb4 & 0x03ff0000) != 0x01420000) && + ((reg_ebc & 0x03ff0000) != 0x00420000)) + result |= 0x01; + else + goto out; + + if (!(reg_eac & BIT(30)) && + (((reg_ec4 & 0x03ff0000) >> 16) != 0x132) && + (((reg_ecc & 0x03ff0000) >> 16) != 0x36)) + result |= 0x02; + else + dev_warn(&priv->udev->dev, "%s: Path B RX IQK failed!\n", + __func__); +out: + return result; +} + +static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv, + int result[][8], int t) +{ + struct device *dev = &priv->udev->dev; + u32 i, val32; + int path_a_ok, path_b_ok; + int retry = 2; + const u32 adda_regs[RTL8XXXU_ADDA_REGS] = { + REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH, + REG_RX_WAIT_CCA, REG_TX_CCK_RFON, + REG_TX_CCK_BBON, REG_TX_OFDM_RFON, + REG_TX_OFDM_BBON, REG_TX_TO_RX, + REG_TX_TO_TX, REG_RX_CCK, + REG_RX_OFDM, REG_RX_WAIT_RIFS, + REG_RX_TO_RX, REG_STANDBY, + REG_SLEEP, REG_PMPD_ANAEN + }; + const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = { + REG_TXPAUSE, REG_BEACON_CTRL, + REG_BEACON_CTRL_1, REG_GPIO_MUXCFG + }; + const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = { + REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR, + REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B, + REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE, + REG_FPGA0_XB_RF_INT_OE, REG_FPGA0_RF_MODE + }; + + /* + * Note: IQ calibration must be performed after loading + * PHY_REG.txt , and radio_a, radio_b.txt + */ + + if (t == 0) { + /* Save ADDA parameters, turn Path A ADDA on */ + rtl8xxxu_save_regs(priv, adda_regs, priv->adda_backup, + RTL8XXXU_ADDA_REGS); + rtl8xxxu_save_mac_regs(priv, iqk_mac_regs, priv->mac_backup); + rtl8xxxu_save_regs(priv, iqk_bb_regs, + priv->bb_backup, RTL8XXXU_BB_REGS); + } + + rtl8xxxu_path_adda_on(priv, adda_regs, true); + + if (t == 0) { + val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM1); + if (val32 & FPGA0_HSSI_PARM1_PI) + priv->pi_enabled = 1; + } + + if (!priv->pi_enabled) { + /* Switch BB to PI mode to do IQ Calibration. */ + rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM1, 0x01000100); + rtl8xxxu_write32(priv, REG_FPGA0_XB_HSSI_PARM1, 0x01000100); + } + + val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE); + val32 &= ~FPGA_RF_MODE_CCK; + rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32); + + rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x03a05600); + rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4); + rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22204000); + + val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_SW_CTRL); + val32 |= (FPGA0_RF_PAPE | (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT)); + rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32); + + val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_RF_INT_OE); + val32 &= ~BIT(10); + rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, val32); + val32 = rtl8xxxu_read32(priv, REG_FPGA0_XB_RF_INT_OE); + val32 &= ~BIT(10); + rtl8xxxu_write32(priv, REG_FPGA0_XB_RF_INT_OE, val32); + + if (priv->tx_paths > 1) { + rtl8xxxu_write32(priv, REG_FPGA0_XA_LSSI_PARM, 0x00010000); + rtl8xxxu_write32(priv, REG_FPGA0_XB_LSSI_PARM, 0x00010000); + } + + /* MAC settings */ + rtl8xxxu_mac_calibration(priv, iqk_mac_regs, priv->mac_backup); + + /* Page B init */ + rtl8xxxu_write32(priv, REG_CONFIG_ANT_A, 0x00080000); + + if (priv->tx_paths > 1) + rtl8xxxu_write32(priv, REG_CONFIG_ANT_B, 0x00080000); + + /* IQ calibration setting */ + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000); + rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00); + rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); + + for (i = 0; i < retry; i++) { + path_a_ok = rtl8xxxu_iqk_path_a(priv); + if (path_a_ok == 0x03) { + val32 = rtl8xxxu_read32(priv, + REG_TX_POWER_BEFORE_IQK_A); + result[t][0] = (val32 >> 16) & 0x3ff; + val32 = rtl8xxxu_read32(priv, + REG_TX_POWER_AFTER_IQK_A); + result[t][1] = (val32 >> 16) & 0x3ff; + val32 = rtl8xxxu_read32(priv, + REG_RX_POWER_BEFORE_IQK_A_2); + result[t][2] = (val32 >> 16) & 0x3ff; + val32 = rtl8xxxu_read32(priv, + REG_RX_POWER_AFTER_IQK_A_2); + result[t][3] = (val32 >> 16) & 0x3ff; + break; + } else if (i == (retry - 1) && path_a_ok == 0x01) { + /* TX IQK OK */ + dev_dbg(dev, "%s: Path A IQK Only Tx Success!!\n", + __func__); + + val32 = rtl8xxxu_read32(priv, + REG_TX_POWER_BEFORE_IQK_A); + result[t][0] = (val32 >> 16) & 0x3ff; + val32 = rtl8xxxu_read32(priv, + REG_TX_POWER_AFTER_IQK_A); + result[t][1] = (val32 >> 16) & 0x3ff; + } + } + + if (!path_a_ok) + dev_dbg(dev, "%s: Path A IQK failed!\n", __func__); + + if (priv->tx_paths > 1) { + /* + * Path A into standby + */ + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x0); + rtl8xxxu_write32(priv, REG_FPGA0_XA_LSSI_PARM, 0x00010000); + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000); + + /* Turn Path B ADDA on */ + rtl8xxxu_path_adda_on(priv, adda_regs, false); + + for (i = 0; i < retry; i++) { + path_b_ok = rtl8xxxu_iqk_path_b(priv); + if (path_b_ok == 0x03) { + val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B); + result[t][4] = (val32 >> 16) & 0x3ff; + val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B); + result[t][5] = (val32 >> 16) & 0x3ff; + val32 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_B_2); + result[t][6] = (val32 >> 16) & 0x3ff; + val32 = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_B_2); + result[t][7] = (val32 >> 16) & 0x3ff; + break; + } else if (i == (retry - 1) && path_b_ok == 0x01) { + /* TX IQK OK */ + val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B); + result[t][4] = (val32 >> 16) & 0x3ff; + val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B); + result[t][5] = (val32 >> 16) & 0x3ff; + } + } + + if (!path_b_ok) + dev_dbg(dev, "%s: Path B IQK failed!\n", __func__); + } + + /* Back to BB mode, load original value */ + rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0); + + if (t) { + if (!priv->pi_enabled) { + /* + * Switch back BB to SI mode after finishing + * IQ Calibration + */ + val32 = 0x01000000; + rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM1, val32); + rtl8xxxu_write32(priv, REG_FPGA0_XB_HSSI_PARM1, val32); + } + + /* Reload ADDA power saving parameters */ + rtl8xxxu_restore_regs(priv, adda_regs, priv->adda_backup, + RTL8XXXU_ADDA_REGS); + + /* Reload MAC parameters */ + rtl8xxxu_restore_mac_regs(priv, iqk_mac_regs, priv->mac_backup); + + /* Reload BB parameters */ + rtl8xxxu_restore_regs(priv, iqk_bb_regs, + priv->bb_backup, RTL8XXXU_BB_REGS); + + /* Restore RX initial gain */ + rtl8xxxu_write32(priv, REG_FPGA0_XA_LSSI_PARM, 0x00032ed3); + + if (priv->tx_paths > 1) { + rtl8xxxu_write32(priv, REG_FPGA0_XB_LSSI_PARM, + 0x00032ed3); + } + + /* Load 0xe30 IQC default value */ + rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x01008c00); + rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x01008c00); + } +} + +static void rtl8723a_phy_iq_calibrate(struct rtl8xxxu_priv *priv) +{ + struct device *dev = &priv->udev->dev; + int result[4][8]; /* last is final result */ + int i, candidate; + bool path_a_ok, path_b_ok; + u32 reg_e94, reg_e9c, reg_ea4, reg_eac; + u32 reg_eb4, reg_ebc, reg_ec4, reg_ecc; + s32 reg_tmp = 0; + bool simu; + + memset(result, 0, sizeof(result)); + candidate = -1; + + path_a_ok = false; + path_b_ok = false; + + rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE); + + for (i = 0; i < 3; i++) { + rtl8xxxu_phy_iqcalibrate(priv, result, i); + + if (i == 1) { + simu = rtl8xxxu_simularity_compare(priv, result, 0, 1); + if (simu) { + candidate = 0; + break; + } + } + + if (i == 2) { + simu = rtl8xxxu_simularity_compare(priv, result, 0, 2); + if (simu) { + candidate = 0; + break; + } + + simu = rtl8xxxu_simularity_compare(priv, result, 1, 2); + if (simu) { + candidate = 1; + } else { + for (i = 0; i < 8; i++) + reg_tmp += result[3][i]; + + if (reg_tmp) + candidate = 3; + else + candidate = -1; + } + } + } + + for (i = 0; i < 4; i++) { + reg_e94 = result[i][0]; + reg_e9c = result[i][1]; + reg_ea4 = result[i][2]; + reg_eac = result[i][3]; + reg_eb4 = result[i][4]; + reg_ebc = result[i][5]; + reg_ec4 = result[i][6]; + reg_ecc = result[i][7]; + } + + if (candidate >= 0) { + reg_e94 = result[candidate][0]; + priv->rege94 = reg_e94; + reg_e9c = result[candidate][1]; + priv->rege9c = reg_e9c; + reg_ea4 = result[candidate][2]; + reg_eac = result[candidate][3]; + reg_eb4 = result[candidate][4]; + priv->regeb4 = reg_eb4; + reg_ebc = result[candidate][5]; + priv->regebc = reg_ebc; + reg_ec4 = result[candidate][6]; + reg_ecc = result[candidate][7]; + dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate); + dev_dbg(dev, + "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x " + "ecc=%x\n ", __func__, reg_e94, reg_e9c, + reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc); + path_a_ok = true; + path_b_ok = true; + } else { + reg_e94 = reg_eb4 = priv->rege94 = priv->regeb4 = 0x100; + reg_e9c = reg_ebc = priv->rege9c = priv->regebc = 0x0; + } + + if (reg_e94 && candidate >= 0) + rtl8xxxu_fill_iqk_matrix_a(priv, path_a_ok, result, + candidate, (reg_ea4 == 0)); + + if (priv->tx_paths > 1 && reg_eb4) + rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result, + candidate, (reg_ec4 == 0)); + + rtl8xxxu_save_regs(priv, rtl8723au_iqk_phy_iq_bb_reg, + priv->bb_recovery_backup, RTL8XXXU_BB_REGS); +} + +static void rtl8723a_phy_lc_calibrate(struct rtl8xxxu_priv *priv) +{ + u32 val32; + u32 rf_amode, rf_bmode = 0, lstf; + + /* Check continuous TX and Packet TX */ + lstf = rtl8xxxu_read32(priv, REG_OFDM1_LSTF); + + if (lstf & OFDM_LSTF_MASK) { + /* Disable all continuous TX */ + val32 = lstf & ~OFDM_LSTF_MASK; + rtl8xxxu_write32(priv, REG_OFDM1_LSTF, val32); + + /* Read original RF mode Path A */ + rf_amode = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_AC); + + /* Set RF mode to standby Path A */ + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, + (rf_amode & 0x8ffff) | 0x10000); + + /* Path-B */ + if (priv->tx_paths > 1) { + rf_bmode = rtl8xxxu_read_rfreg(priv, RF_B, + RF6052_REG_AC); + + rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC, + (rf_bmode & 0x8ffff) | 0x10000); + } + } else { + /* Deal with Packet TX case */ + /* block all queues */ + rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff); + } + + /* Start LC calibration */ + val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_MODE_AG); + val32 |= 0x08000; + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, val32); + + msleep(100); + + /* Restore original parameters */ + if (lstf & OFDM_LSTF_MASK) { + /* Path-A */ + rtl8xxxu_write32(priv, REG_OFDM1_LSTF, lstf); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, rf_amode); + + /* Path-B */ + if (priv->tx_paths > 1) + rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC, + rf_bmode); + } else /* Deal with Packet TX case */ + rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00); +} + +static int rtl8xxxu_set_mac(struct rtl8xxxu_priv *priv) +{ + int i; + u16 reg; + + reg = REG_MACID; + + for (i = 0; i < ETH_ALEN; i++) + rtl8xxxu_write8(priv, reg + i, priv->mac_addr[i]); + + return 0; +} + +static int rtl8xxxu_set_bssid(struct rtl8xxxu_priv *priv, const u8 *bssid) +{ + int i; + u16 reg; + + dev_dbg(&priv->udev->dev, "%s: (%pM)\n", __func__, bssid); + + reg = REG_BSSID; + + for (i = 0; i < ETH_ALEN; i++) + rtl8xxxu_write8(priv, reg + i, bssid[i]); + + return 0; +} + +static void +rtl8xxxu_set_ampdu_factor(struct rtl8xxxu_priv *priv, u8 ampdu_factor) +{ + u8 vals[4] = { 0x41, 0xa8, 0x72, 0xb9 }; + u8 max_agg = 0xf; + int i; + + ampdu_factor = 1 << (ampdu_factor + 2); + if (ampdu_factor > max_agg) + ampdu_factor = max_agg; + + for (i = 0; i < 4; i++) { + if ((vals[i] & 0xf0) > (ampdu_factor << 4)) + vals[i] = (vals[i] & 0x0f) | (ampdu_factor << 4); + + if ((vals[i] & 0x0f) > ampdu_factor) + vals[i] = (vals[i] & 0xf0) | ampdu_factor; + + rtl8xxxu_write8(priv, REG_AGGLEN_LMT + i, vals[i]); + } +} + +static void rtl8xxxu_set_ampdu_min_space(struct rtl8xxxu_priv *priv, u8 density) +{ + u8 val8; + + val8 = rtl8xxxu_read8(priv, REG_AMPDU_MIN_SPACE); + val8 &= 0xf8; + val8 |= density; + rtl8xxxu_write8(priv, REG_AMPDU_MIN_SPACE, val8); +} + +static int rtl8xxxu_active_to_emu(struct rtl8xxxu_priv *priv) +{ + u8 val8; + int count, ret; + + /* Start of rtl8723AU_card_enable_flow */ + /* Act to Cardemu sequence*/ + /* Turn off RF */ + rtl8xxxu_write8(priv, REG_RF_CTRL, 0); + + /* 0x004E[7] = 0, switch DPDT_SEL_P output from register 0x0065[2] */ + val8 = rtl8xxxu_read8(priv, REG_LEDCFG2); + val8 &= ~LEDCFG2_DPDT_SELECT; + rtl8xxxu_write8(priv, REG_LEDCFG2, val8); + + /* 0x0005[1] = 1 turn off MAC by HW state machine*/ + val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); + val8 |= BIT(1); + rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8); + + for (count = RTL8XXXU_MAX_REG_POLL; count; count--) { + val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); + if ((val8 & BIT(1)) == 0) + break; + udelay(10); + } + + if (!count) { + dev_warn(&priv->udev->dev, "%s: Disabling MAC timed out\n", + __func__); + ret = -EBUSY; + goto exit; + } + + /* 0x0000[5] = 1 analog Ips to digital, 1:isolation */ + val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL); + val8 |= SYS_ISO_ANALOG_IPS; + rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8); + + /* 0x0020[0] = 0 disable LDOA12 MACRO block*/ + val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL); + val8 &= ~LDOA15_ENABLE; + rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8); + +exit: + return ret; +} + +static int rtl8xxxu_active_to_lps(struct rtl8xxxu_priv *priv) +{ + u8 val8; + u8 val32; + int count, ret; + + rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff); + + /* + * Poll - wait for RX packet to complete + */ + for (count = RTL8XXXU_MAX_REG_POLL; count; count--) { + val32 = rtl8xxxu_read32(priv, 0x5f8); + if (!val32) + break; + udelay(10); + } + + if (!count) { + dev_warn(&priv->udev->dev, + "%s: RX poll timed out (0x05f8)\n", __func__); + ret = -EBUSY; + goto exit; + } + + /* Disable CCK and OFDM, clock gated */ + val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC); + val8 &= ~SYS_FUNC_BBRSTB; + rtl8xxxu_write8(priv, REG_SYS_FUNC, val8); + + udelay(2); + + /* Reset baseband */ + val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC); + val8 &= ~SYS_FUNC_BB_GLB_RSTN; + rtl8xxxu_write8(priv, REG_SYS_FUNC, val8); + + /* Reset MAC TRX */ + val8 = rtl8xxxu_read8(priv, REG_CR); + val8 = CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE; + rtl8xxxu_write8(priv, REG_CR, val8); + + /* Reset MAC TRX */ + val8 = rtl8xxxu_read8(priv, REG_CR + 1); + val8 &= ~BIT(1); /* CR_SECURITY_ENABLE */ + rtl8xxxu_write8(priv, REG_CR + 1, val8); + + /* Respond TX OK to scheduler */ + val8 = rtl8xxxu_read8(priv, REG_DUAL_TSF_RST); + val8 |= DUAL_TSF_TX_OK; + rtl8xxxu_write8(priv, REG_DUAL_TSF_RST, val8); + +exit: + return ret; +} + +static void rtl8xxxu_disabled_to_emu(struct rtl8xxxu_priv *priv) +{ + u8 val8; + + /* Clear suspend enable and power down enable*/ + val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); + val8 &= ~(BIT(3) | BIT(7)); + rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8); + + /* 0x48[16] = 0 to disable GPIO9 as EXT WAKEUP*/ + val8 = rtl8xxxu_read8(priv, REG_GPIO_INTM + 2); + val8 &= ~BIT(0); + rtl8xxxu_write8(priv, REG_GPIO_INTM + 2, val8); + + /* 0x04[12:11] = 11 enable WL suspend*/ + val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); + val8 &= ~(BIT(3) | BIT(4)); + rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8); +} + +static int rtl8xxxu_emu_to_active(struct rtl8xxxu_priv *priv) +{ + u8 val8; + u32 val32; + int count, ret = 0; + + /* 0x20[0] = 1 enable LDOA12 MACRO block for all interface*/ + val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL); + val8 |= LDOA15_ENABLE; + rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8); + + /* 0x67[0] = 0 to disable BT_GPS_SEL pins*/ + val8 = rtl8xxxu_read8(priv, 0x0067); + val8 &= ~BIT(4); + rtl8xxxu_write8(priv, 0x0067, val8); + + mdelay(1); + + /* 0x00[5] = 0 release analog Ips to digital, 1:isolation */ + val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL); + val8 &= ~SYS_ISO_ANALOG_IPS; + rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8); + + /* disable SW LPS 0x04[10]= 0 */ + val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); + val8 &= ~BIT(2); + rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8); + + /* wait till 0x04[17] = 1 power ready*/ + for (count = RTL8XXXU_MAX_REG_POLL; count; count--) { + val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); + if (val32 & BIT(17)) + break; + + udelay(10); + } + + if (!count) { + ret = -EBUSY; + goto exit; + } + + /* We should be able to optimize the following three entries into one */ + + /* release WLON reset 0x04[16]= 1*/ + val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 2); + val8 |= BIT(0); + rtl8xxxu_write8(priv, REG_APS_FSMCO + 2, val8); + + /* disable HWPDN 0x04[15]= 0*/ + val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); + val8 &= ~BIT(7); + rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8); + + /* disable WL suspend*/ + val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); + val8 &= ~(BIT(3) | BIT(4)); + rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8); + + /* set, then poll until 0 */ + val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); + val32 |= APS_FSMCO_MAC_ENABLE; + rtl8xxxu_write32(priv, REG_APS_FSMCO, val32); + + for (count = RTL8XXXU_MAX_REG_POLL; count; count--) { + val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); + if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) { + ret = 0; + break; + } + udelay(10); + } + + if (!count) { + ret = -EBUSY; + goto exit; + } + + /* 0x4C[23] = 0x4E[7] = 1, switch DPDT_SEL_P output from WL BB */ + /* + * Note: Vendor driver actually clears this bit, despite the + * documentation claims it's being set! + */ + val8 = rtl8xxxu_read8(priv, REG_LEDCFG2); + val8 |= LEDCFG2_DPDT_SELECT; + val8 &= ~LEDCFG2_DPDT_SELECT; + rtl8xxxu_write8(priv, REG_LEDCFG2, val8); + +exit: + return ret; +} + +static int rtl8xxxu_emu_to_disabled(struct rtl8xxxu_priv *priv) +{ + u8 val8; + + /* 0x0007[7:0] = 0x20 SOP option to disable BG/MB */ + rtl8xxxu_write8(priv, REG_APS_FSMCO + 3, 0x20); + + /* 0x04[12:11] = 01 enable WL suspend */ + val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); + val8 &= ~BIT(4); + val8 |= BIT(3); + rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8); + + val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); + val8 |= BIT(7); + rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8); + + /* 0x48[16] = 1 to enable GPIO9 as EXT wakeup */ + val8 = rtl8xxxu_read8(priv, REG_GPIO_INTM + 2); + val8 |= BIT(0); + rtl8xxxu_write8(priv, REG_GPIO_INTM + 2, val8); + + return 0; +} + +static int rtl8723au_power_on(struct rtl8xxxu_priv *priv) +{ + u8 val8; + u16 val16; + u32 val32; + int ret; + + /* + * RSV_CTRL 0x001C[7:0] = 0x00, unlock ISO/CLK/Power control register + */ + rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0); + + rtl8xxxu_disabled_to_emu(priv); + + ret = rtl8xxxu_emu_to_active(priv); + if (ret) + goto exit; + + /* + * 0x0004[19] = 1, reset 8051 + */ + val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 2); + val8 |= BIT(3); + rtl8xxxu_write8(priv, REG_APS_FSMCO + 2, val8); + + /* + * Enable MAC DMA/WMAC/SCHEDULE/SEC block + * Set CR bit10 to enable 32k calibration. + */ + val16 = rtl8xxxu_read16(priv, REG_CR); + val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE | + CR_TXDMA_ENABLE | CR_RXDMA_ENABLE | + CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE | + CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE | + CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE); + rtl8xxxu_write16(priv, REG_CR, val16); + + /* For EFuse PG */ + val32 = rtl8xxxu_read32(priv, REG_EFUSE_CTRL); + val32 &= ~(BIT(28) | BIT(29) | BIT(30)); + val32 |= (0x06 << 28); + rtl8xxxu_write32(priv, REG_EFUSE_CTRL, val32); +exit: + return ret; +} + +#ifdef CONFIG_RTL8XXXU_UNTESTED + +static int rtl8192cu_power_on(struct rtl8xxxu_priv *priv) +{ + u8 val8; + u16 val16; + u32 val32; + int i; + + for (i = 100; i; i--) { + val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO); + if (val8 & APS_FSMCO_PFM_ALDN) + break; + } + + if (!i) { + pr_info("%s: Poll failed\n", __func__); + return -ENODEV; + } + + /* + * RSV_CTRL 0x001C[7:0] = 0x00, unlock ISO/CLK/Power control register + */ + rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0); + rtl8xxxu_write8(priv, REG_SPS0_CTRL, 0x2b); + udelay(100); + + val8 = rtl8xxxu_read8(priv, REG_LDOV12D_CTRL); + if (!(val8 & LDOV12D_ENABLE)) { + pr_info("%s: Enabling LDOV12D (%02x)\n", __func__, val8); + val8 |= LDOV12D_ENABLE; + rtl8xxxu_write8(priv, REG_LDOV12D_CTRL, val8); + + udelay(100); + + val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL); + val8 &= ~SYS_ISO_MD2PP; + rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8); + } + + /* + * Auto enable WLAN + */ + val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO); + val16 |= APS_FSMCO_MAC_ENABLE; + rtl8xxxu_write16(priv, REG_APS_FSMCO, val16); + + for (i = 1000; i; i--) { + val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO); + if (!(val16 & APS_FSMCO_MAC_ENABLE)) + break; + } + if (!i) { + pr_info("%s: FSMCO_MAC_ENABLE poll failed\n", __func__); + return -EBUSY; + } + + /* + * Enable radio, GPIO, LED + */ + val16 = APS_FSMCO_HW_SUSPEND | APS_FSMCO_ENABLE_POWERDOWN | + APS_FSMCO_PFM_ALDN; + rtl8xxxu_write16(priv, REG_APS_FSMCO, val16); + + /* + * Release RF digital isolation + */ + val16 = rtl8xxxu_read16(priv, REG_SYS_ISO_CTRL); + val16 &= ~SYS_ISO_DIOR; + rtl8xxxu_write16(priv, REG_SYS_ISO_CTRL, val16); + + val8 = rtl8xxxu_read8(priv, REG_APSD_CTRL); + val8 &= ~APSD_CTRL_OFF; + rtl8xxxu_write8(priv, REG_APSD_CTRL, val8); + for (i = 200; i; i--) { + val8 = rtl8xxxu_read8(priv, REG_APSD_CTRL); + if (!(val8 & APSD_CTRL_OFF_STATUS)) + break; + } + + if (!i) { + pr_info("%s: APSD_CTRL poll failed\n", __func__); + return -EBUSY; + } + + /* + * Enable MAC DMA/WMAC/SCHEDULE/SEC block + */ + val16 = rtl8xxxu_read16(priv, REG_CR); + val16 |= CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE | + CR_TXDMA_ENABLE | CR_RXDMA_ENABLE | CR_PROTOCOL_ENABLE | + CR_SCHEDULE_ENABLE | CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE; + rtl8xxxu_write16(priv, REG_CR, val16); + + /* + * Workaround for 8188RU LNA power leakage problem. + */ + if (priv->rtlchip == 0x8188c && priv->hi_pa) { + val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM); + val32 &= ~BIT(1); + rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32); + } + return 0; +} + +#endif + +static void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv) +{ + u8 val8; + u16 val16; + u32 val32; + + /* + * Workaround for 8188RU LNA power leakage problem. + */ + if (priv->rtlchip == 0x8188c && priv->hi_pa) { + val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM); + val32 |= BIT(1); + rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32); + } + + rtl8xxxu_active_to_lps(priv); + + /* Turn off RF */ + rtl8xxxu_write8(priv, REG_RF_CTRL, 0x00); + + /* Reset Firmware if running in RAM */ + if (rtl8xxxu_read8(priv, REG_MCU_FW_DL) & MCU_FW_RAM_SEL) + rtl8xxxu_firmware_self_reset(priv); + + /* Reset MCU */ + val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); + val16 &= ~SYS_FUNC_CPU_ENABLE; + rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); + + /* Reset MCU ready status */ + rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00); + + rtl8xxxu_active_to_emu(priv); + rtl8xxxu_emu_to_disabled(priv); + + /* Reset MCU IO Wrapper */ + val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1); + val8 &= ~BIT(0); + rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8); + + val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1); + val8 |= BIT(0); + rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8); + + /* RSV_CTRL 0x1C[7:0] = 0x0e lock ISO/CLK/Power control register */ + rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0e); +} + +static void rtl8xxxu_init_bt(struct rtl8xxxu_priv *priv) +{ + if (!priv->has_bluetooth) + return; +} + +static int rtl8xxxu_init_device(struct ieee80211_hw *hw) +{ + struct rtl8xxxu_priv *priv = hw->priv; + struct device *dev = &priv->udev->dev; + struct rtl8xxxu_rfregval *rftable; + bool macpower; + int ret; + u8 val8; + u16 val16; + u32 val32; + + /* Check if MAC is already powered on */ + val8 = rtl8xxxu_read8(priv, REG_CR); + + /* + * Fix 92DU-VC S3 hang with the reason is that secondary mac is not + * initialized. First MAC returns 0xea, second MAC returns 0x00 + */ + if (val8 == 0xea) + macpower = false; + else + macpower = true; + + ret = priv->fops->power_on(priv); + if (ret < 0) { + dev_warn(dev, "%s: Failed power on\n", __func__); + goto exit; + } + + dev_dbg(dev, "%s: macpower %i\n", __func__, macpower); + if (!macpower) { + ret = rtl8xxxu_init_llt_table(priv, TX_TOTAL_PAGE_NUM); + if (ret) { + dev_warn(dev, "%s: LLT table init failed\n", __func__); + goto exit; + } + } + + ret = rtl8xxxu_download_firmware(priv); + dev_dbg(dev, "%s: download_fiwmare %i\n", __func__, ret); + if (ret) + goto exit; + ret = rtl8xxxu_start_firmware(priv); + dev_dbg(dev, "%s: start_fiwmare %i\n", __func__, ret); + if (ret) + goto exit; + + ret = rtl8xxxu_init_mac(priv, rtl8723a_mac_init_table); + dev_dbg(dev, "%s: init_mac %i\n", __func__, ret); + if (ret) + goto exit; + + ret = rtl8xxxu_init_phy_bb(priv); + dev_dbg(dev, "%s: init_phy_bb %i\n", __func__, ret); + if (ret) + goto exit; + + switch(priv->rtlchip) { + case 0x8723a: + rftable = rtl8723au_radioa_1t_init_table; + ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); + break; + case 0x8188c: + if (priv->hi_pa) + rftable = rtl8188ru_radioa_1t_highpa_table; + else + rftable = rtl8192cu_radioa_1t_init_table; + ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); + break; + case 0x8191c: + rftable = rtl8192cu_radioa_1t_init_table; + ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); + break; + case 0x8192c: + rftable = rtl8192cu_radioa_2t_init_table; + ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); + if (ret) + break; + rftable = rtl8192cu_radiob_2t_init_table; + ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_B); + break; + default: + ret = -EINVAL; + } + + if (ret) + goto exit; + + /* Reduce 80M spur */ + rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x0381808d); + rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83); + rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff82); + rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83); + + /* RFSW Control - clear bit 14 ?? */ + rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003); + /* 0x07000760 */ + val32 = FPGA0_RF_TRSW | FPGA0_RF_TRSWB | FPGA0_RF_ANTSW | + FPGA0_RF_ANTSWB | FPGA0_RF_PAPE | + ((FPGA0_RF_ANTSW | FPGA0_RF_ANTSWB | FPGA0_RF_PAPE) << + FPGA0_RF_BD_CTRL_SHIFT); + rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32); + /* 0x860[6:5]= 00 - why? - this sets antenna B */ + rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, 0x66F60210); + + priv->rf_mode_ag[0] = rtl8xxxu_read_rfreg(priv, RF_A, + RF6052_REG_MODE_AG); + + dev_dbg(dev, "%s: macpower %i\n", __func__, macpower); + if (!macpower) { + if (priv->ep_tx_normal_queue) + val8 = TX_PAGE_NUM_NORM_PQ; + else + val8 = 0; + + rtl8xxxu_write8(priv, REG_RQPN_NPQ, val8); + + val32 = (TX_PAGE_NUM_PUBQ << RQPN_NORM_PQ_SHIFT) | RQPN_LOAD; + + if (priv->ep_tx_high_queue) + val32 |= (TX_PAGE_NUM_HI_PQ << RQPN_HI_PQ_SHIFT); + if (priv->ep_tx_low_queue) + val32 |= (TX_PAGE_NUM_LO_PQ << RQPN_LO_PQ_SHIFT); + + rtl8xxxu_write32(priv, REG_RQPN, val32); + + /* + * Set TX buffer boundary + */ + val8 = TX_TOTAL_PAGE_NUM + 1; + rtl8xxxu_write8(priv, REG_TXPKTBUF_BCNQ_BDNY, val8); + rtl8xxxu_write8(priv, REG_TXPKTBUF_MGQ_BDNY, val8); + rtl8xxxu_write8(priv, REG_TXPKTBUF_WMAC_LBK_BF_HD, val8); + rtl8xxxu_write8(priv, REG_TRXFF_BNDY, val8); + rtl8xxxu_write8(priv, REG_TDECTRL + 1, val8); + } + + ret = rtl8xxxu_init_queue_priority(priv); + dev_dbg(dev, "%s: init_queue_priority %i\n", __func__, ret); + if (ret) + goto exit; + + /* + * Set RX page boundary + */ + rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x27ff); + /* + * Transfer page size is always 128 + */ + val8 = (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_RX_SHIFT) | + (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_TX_SHIFT); + rtl8xxxu_write8(priv, REG_PBP, val8); + + /* + * Unit in 8 bytes, not obvious what it is used for + */ + rtl8xxxu_write8(priv, REG_RX_DRVINFO_SZ, 4); + + /* + * Enable all interrupts - not obvious USB needs to do this + */ + rtl8xxxu_write32(priv, REG_HISR, 0xffffffff); + rtl8xxxu_write32(priv, REG_HIMR, 0xffffffff); + + rtl8xxxu_set_mac(priv); + rtl8xxxu_set_linktype(priv, NL80211_IFTYPE_STATION); + + /* + * Configure initial WMAC settings + */ + val32 = RCR_ACCEPT_PHYS_MATCH | RCR_ACCEPT_MCAST | RCR_ACCEPT_BCAST | + /* RCR_CHECK_BSSID_MATCH | RCR_CHECK_BSSID_BEACON | */ + RCR_ACCEPT_MGMT_FRAME | RCR_HTC_LOC_CTRL | + RCR_APPEND_PHYSTAT | RCR_APPEND_ICV | RCR_APPEND_MIC; + rtl8xxxu_write32(priv, REG_RCR, val32); + + /* + * Accept all multicast + */ + rtl8xxxu_write32(priv, REG_MAR, 0xffffffff); + rtl8xxxu_write32(priv, REG_MAR + 4, 0xffffffff); + + /* + * Init adaptive controls + */ + val32 = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET); + val32 &= ~RESPONSE_RATE_BITMAP_ALL; + val32 |= RESPONSE_RATE_RRSR_CCK_ONLY_1M; + rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, val32); + + /* CCK = 0x0a, OFDM = 0x10 */ + rtl8xxxu_set_spec_sifs(priv, 0x10, 0x10); + rtl8xxxu_set_retry(priv, 0x30, 0x30); + rtl8xxxu_set_spec_sifs(priv, 0x0a, 0x10); + + /* + * Init EDCA + */ + rtl8xxxu_write16(priv, REG_MAC_SPEC_SIFS, 0x100a); + + /* Set CCK SIFS */ + rtl8xxxu_write16(priv, REG_SIFS_CCK, 0x100a); + + /* Set OFDM SIFS */ + rtl8xxxu_write16(priv, REG_SIFS_OFDM, 0x100a); + + /* TXOP */ + rtl8xxxu_write32(priv, REG_EDCA_BE_PARAM, 0x005ea42b); + rtl8xxxu_write32(priv, REG_EDCA_BK_PARAM, 0x0000a44f); + rtl8xxxu_write32(priv, REG_EDCA_VI_PARAM, 0x005ea324); + rtl8xxxu_write32(priv, REG_EDCA_VO_PARAM, 0x002fa226); + + /* Set data auto rate fallback retry count */ + rtl8xxxu_write32(priv, REG_DARFRC, 0x00000000); + rtl8xxxu_write32(priv, REG_DARFRC + 4, 0x10080404); + rtl8xxxu_write32(priv, REG_RARFRC, 0x04030201); + rtl8xxxu_write32(priv, REG_RARFRC + 4, 0x08070605); + + val8 = rtl8xxxu_read8(priv, REG_FWHW_TXQ_CTRL); + val8 |= FWHW_TXQ_CTRL_AMPDU_RETRY; + rtl8xxxu_write8(priv, REG_FWHW_TXQ_CTRL, val8); + + /* Set ACK timeout */ + rtl8xxxu_write8(priv, REG_ACKTO, 0x40); + + /* + * Initialize beacon parameters + */ + val16 = BEACON_DISABLE_TSF_UPDATE | (BEACON_DISABLE_TSF_UPDATE << 8); + rtl8xxxu_write16(priv, REG_BEACON_CTRL, val16); + rtl8xxxu_write16(priv, REG_TBTT_PROHIBIT, 0x6404); + rtl8xxxu_write8(priv, REG_DRIVER_EARLY_INT, DRIVER_EARLY_INT_TIME); + rtl8xxxu_write8(priv, REG_BEACON_DMA_TIME, BEACON_DMA_ATIME_INT_TIME); + rtl8xxxu_write16(priv, REG_BEACON_TCFG, 0x660F); + + /* + * Enable CCK and OFDM block + */ + val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE); + val32 |= (FPGA_RF_MODE_CCK | FPGA_RF_MODE_OFDM); + rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32); + + /* + * Invalidate all CAM entries - bit 30 is undocumented + */ + rtl8xxxu_write32(priv, REG_CAM_CMD, CAM_CMD_POLLING | BIT(30)); + + /* + * Start out with default power levels for channel 6, 20MHz + */ + rtl8723a_set_tx_power(priv, 1, false); + + /* Let the 8051 take control of antenna setting */ + val8 = rtl8xxxu_read8(priv, REG_LEDCFG2); + val8 |= LEDCFG2_DPDT_SELECT; + rtl8xxxu_write8(priv, REG_LEDCFG2, val8); + + rtl8xxxu_write8(priv, REG_HWSEQ_CTRL, 0xff); + + /* Disable BAR - not sure if this has any effect on USB */ + rtl8xxxu_write32(priv, REG_BAR_MODE_CTRL, 0x0201ffff); + + rtl8xxxu_write16(priv, REG_FAST_EDCA_CTRL, 0); + + /* + * Not sure if we should get into this at all + */ + if (priv->iqk_initialized) { + rtl8xxxu_restore_regs(priv, rtl8723au_iqk_phy_iq_bb_reg, + priv->bb_recovery_backup, + RTL8XXXU_BB_REGS); + } else { + rtl8723a_phy_iq_calibrate(priv); + priv->iqk_initialized = true; + } + + /* + * This should enable thermal meter + */ + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_T_METER, 0x60); + + rtl8723a_phy_lc_calibrate(priv); + + /* fix USB interface interference issue */ + rtl8xxxu_write8(priv, 0xfe40, 0xe0); + rtl8xxxu_write8(priv, 0xfe41, 0x8d); + rtl8xxxu_write8(priv, 0xfe42, 0x80); + rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, 0xfd0320); + + /* Solve too many protocol error on USB bus */ + /* Can't do this for 8188/8192 UMC A cut parts */ + rtl8xxxu_write8(priv, 0xfe40, 0xe6); + rtl8xxxu_write8(priv, 0xfe41, 0x94); + rtl8xxxu_write8(priv, 0xfe42, 0x80); + + rtl8xxxu_write8(priv, 0xfe40, 0xe0); + rtl8xxxu_write8(priv, 0xfe41, 0x19); + rtl8xxxu_write8(priv, 0xfe42, 0x80); + + rtl8xxxu_write8(priv, 0xfe40, 0xe5); + rtl8xxxu_write8(priv, 0xfe41, 0x91); + rtl8xxxu_write8(priv, 0xfe42, 0x80); + + rtl8xxxu_write8(priv, 0xfe40, 0xe2); + rtl8xxxu_write8(priv, 0xfe41, 0x81); + rtl8xxxu_write8(priv, 0xfe42, 0x80); + + /* Init BT hw config. */ + rtl8xxxu_init_bt(priv); + + /* + * Not sure if we really need to save these parameters, but the + * vendor driver does + */ + val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM2); + if (val32 & FPGA0_HSSI_PARM2_CCK_HIGH_PWR) + priv->path_a_hi_power = 1; + + val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE); + priv->path_a_rf_paths = val32 & OFDM_RF_PATH_RX_MASK; + + val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1); + priv->path_a_ig_value = val32 & OFDM0_X_AGC_CORE1_IGI_MASK; + + /* Set NAV_UPPER to 30000us */ + val8 = ((30000 + NAV_UPPER_UNIT - 1) / NAV_UPPER_UNIT); + rtl8xxxu_write8(priv, REG_NAV_UPPER, val8); + + /* + * 2011/03/09 MH debug only, UMC-B cut pass 2500 S5 test, + * but we need to fin root cause. + */ + val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE); + if ((val32 & 0xff000000) != 0x83000000) { + val32 |= FPGA_RF_MODE_CCK; + rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32); + } + + val32 = rtl8xxxu_read32(priv, REG_FWHW_TXQ_CTRL); + val32 |= FWHW_TXQ_CTRL_XMIT_MGMT_ACK; + /* ack for xmit mgmt frames. */ + rtl8xxxu_write32(priv, REG_FWHW_TXQ_CTRL, val32); + +exit: + return ret; +} + +static void rtl8xxxu_disable_device(struct ieee80211_hw *hw) +{ + struct rtl8xxxu_priv *priv = hw->priv; + + rtl8xxxu_power_off(priv); +} + +static void rtl8xxxu_cam_write(struct rtl8xxxu_priv *priv, + struct ieee80211_key_conf *key, const u8 *mac) +{ + u32 cmd, val32, addr, ctrl; + int j, i, tmp_debug; + + tmp_debug = rtl8xxxu_debug; + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_KEY) + rtl8xxxu_debug |= RTL8XXXU_DEBUG_REG_WRITE; + + /* + * This is a bit of a hack - the lower bits of the cipher + * suite selector happens to match the cipher index in the CAM + */ + addr = key->keyidx << CAM_CMD_KEY_SHIFT; + ctrl = (key->cipher & 0x0f) << 2 | key->keyidx | CAM_WRITE_VALID; + + for (j = 5; j >= 0; j--) { + switch (j) { + case 0: + val32 = ctrl | (mac[0] << 16) | (mac[1] << 24); + break; + case 1: + val32 = mac[2] | (mac[3] << 8) | + (mac[4] << 16) | (mac[5] << 24); + break; + default: + i = (j - 2) << 2; + val32 = key->key[i] | (key->key[i + 1] << 8) | + key->key[i + 2] << 16 | key->key[i + 3] << 24; + break; + } + + rtl8xxxu_write32(priv, REG_CAM_WRITE, val32); + cmd = CAM_CMD_POLLING | CAM_CMD_WRITE | (addr + j); + rtl8xxxu_write32(priv, REG_CAM_CMD, cmd); + udelay(100); + } + + rtl8xxxu_debug = tmp_debug; +} + +static void rtl8xxxu_sw_scan_start(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, const u8* mac) +{ + struct rtl8xxxu_priv *priv = hw->priv; + u8 val8; + + val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL); + val8 |= BEACON_DISABLE_TSF_UPDATE; + rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8); +} + +static void rtl8xxxu_sw_scan_complete(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct rtl8xxxu_priv *priv = hw->priv; + u8 val8; + + val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL); + val8 &= ~BEACON_DISABLE_TSF_UPDATE; + rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8); +} + +static void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv, + u32 ramask, int sgi) +{ + struct h2c_cmd h2c; + + h2c.ramask.cmd = H2C_SET_RATE_MASK; + h2c.ramask.mask_lo = cpu_to_le16(ramask & 0xffff); + h2c.ramask.mask_hi = cpu_to_le16(ramask >> 16); + + h2c.ramask.arg = 0x80; + if (sgi) + h2c.ramask.arg |= 0x20; + + dev_dbg(&priv->udev->dev, "%s: rate mask %08x, arg %02x\n", __func__, + ramask, h2c.ramask.arg); + rtl8723a_h2c_cmd(priv, &h2c); +} + +static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg) +{ + u32 val32; + u8 rate_idx = 0; + + rate_cfg &= RESPONSE_RATE_BITMAP_ALL; + + val32 = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET); + val32 &= ~RESPONSE_RATE_BITMAP_ALL; + val32 |= rate_cfg; + rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, val32); + + dev_dbg(&priv->udev->dev, "%s: rates %08x\n", __func__, rate_cfg); + + while (rate_cfg) { + rate_cfg = (rate_cfg >> 1); + rate_idx++; + } + rtl8xxxu_write8(priv, REG_INIRTS_RATE_SEL, rate_idx); +} + +static void +rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, u32 changed) +{ + struct rtl8xxxu_priv *priv = hw->priv; + struct device *dev = &priv->udev->dev; + struct ieee80211_sta *sta; + u32 val32; + u8 val8; + + if (changed & BSS_CHANGED_ASSOC) { + struct h2c_cmd h2c; + + dev_dbg(dev, "Changed ASSOC: %i!\n", bss_conf->assoc); + + memset(&h2c, 0, sizeof(struct h2c_cmd)); + rtl8xxxu_set_linktype(priv, vif->type); + + if (bss_conf->assoc) { + u32 ramask; + int sgi = 0; + + rcu_read_lock(); + sta = ieee80211_find_sta(vif, bss_conf->bssid); + if (!sta) { + dev_info(dev, "%s: ASSOC no sta found\n", + __func__); + rcu_read_unlock(); + goto error; + } + + if (sta->ht_cap.ht_supported) + dev_info(dev, "%s: HT supported\n", __func__); + if (sta->vht_cap.vht_supported) + dev_info(dev, "%s: VHT supported\n", __func__); + + /* TODO: Set bits 28-31 for rate adaptive id */ + ramask = (sta->supp_rates[0] & 0xfff) | + sta->ht_cap.mcs.rx_mask[0] << 12 | + sta->ht_cap.mcs.rx_mask[1] << 20; + if (sta->ht_cap.cap & + (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20)) + sgi = 1; + rcu_read_unlock(); + + rtl8xxxu_update_rate_mask(priv, ramask, sgi); + + val32 = rtl8xxxu_read32(priv, REG_RCR); + val32 |= RCR_CHECK_BSSID_MATCH | RCR_CHECK_BSSID_BEACON; + rtl8xxxu_write32(priv, REG_RCR, val32); + + /* Enable RX of data frames */ + rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0xffff); + + rtl8xxxu_write8(priv, REG_BCN_MAX_ERR, 0xff); + + rtl8723a_stop_tx_beacon(priv); + + /* joinbss sequence */ + rtl8xxxu_write16(priv, REG_BCN_PSR_RPT, + 0xc000 | bss_conf->aid); + + h2c.joinbss.data = H2C_JOIN_BSS_CONNECT; + } else { + val32 = rtl8xxxu_read32(priv, REG_RCR); + val32 &= ~(RCR_CHECK_BSSID_MATCH | + RCR_CHECK_BSSID_BEACON); + rtl8xxxu_write32(priv, REG_RCR, val32); + + val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL); + val8 |= BEACON_DISABLE_TSF_UPDATE; + rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8); + + /* Disable RX of data frames */ + rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000); + h2c.joinbss.data = H2C_JOIN_BSS_DISCONNECT; + } + h2c.joinbss.cmd = H2C_JOIN_BSS_REPORT; + rtl8723a_h2c_cmd(priv, &h2c); + } + + if (changed & BSS_CHANGED_ERP_PREAMBLE) { + dev_dbg(dev, "Changed ERP_PREAMBLE: Use short preamble %i\n", + bss_conf->use_short_preamble); + val32 = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET); + if (bss_conf->use_short_preamble) + val32 |= RSR_ACK_SHORT_PREAMBLE; + else + val32 &= ~RSR_ACK_SHORT_PREAMBLE; + rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, val32); + } + + if (changed & BSS_CHANGED_ERP_SLOT) { + dev_dbg(dev, "Changed ERP_SLOT: short_slot_time %i\n", + bss_conf->use_short_slot); + + if (bss_conf->use_short_slot) + val8 = 9; + else + val8 = 20; + rtl8xxxu_write8(priv, REG_SLOT, val8); + } + + if (changed & BSS_CHANGED_BSSID) { + dev_dbg(dev, "Changed BSSID!\n"); + rtl8xxxu_set_bssid(priv, bss_conf->bssid); + } + + if (changed & BSS_CHANGED_BASIC_RATES) { + dev_dbg(dev, "Changed BASIC_RATES!\n"); + rtl8xxxu_set_basic_rates(priv, bss_conf->basic_rates); + } +error: + return; +} + +static u32 rtl8xxxu_80211_to_rtl_queue(u32 queue) +{ + u32 rtlqueue; + + switch (queue) { + case IEEE80211_AC_VO: + rtlqueue = TXDESC_QUEUE_VO; + break; + case IEEE80211_AC_VI: + rtlqueue = TXDESC_QUEUE_VI; + break; + case IEEE80211_AC_BE: + rtlqueue = TXDESC_QUEUE_BE; + break; + case IEEE80211_AC_BK: + rtlqueue = TXDESC_QUEUE_BK; + break; + default: + rtlqueue = TXDESC_QUEUE_BE; + } + + return rtlqueue; +} + +static u32 rtl8xxxu_queue_select(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + u32 queue; + + if (ieee80211_is_mgmt(hdr->frame_control)) + queue = TXDESC_QUEUE_MGNT; + else + queue = rtl8xxxu_80211_to_rtl_queue(skb_get_queue_mapping(skb)); + + return queue; +} + +static void rtl8xxxu_calc_tx_desc_csum(struct rtl8xxxu_tx_desc *tx_desc) +{ + __le16 *ptr = (__le16 *)tx_desc; + u16 csum = 0; + int i; + + /* + * Clear csum field before calculation, as the csum field is + * in the middle of the struct. + */ + tx_desc->csum = cpu_to_le16(0); + + for (i = 0; i < (sizeof(struct rtl8xxxu_tx_desc) / sizeof(u16)); i++) + csum = csum ^ le16_to_cpu(ptr[i]); + + tx_desc->csum |= cpu_to_le16(csum); +} + +static void rtl8xxxu_free_tx_resources(struct rtl8xxxu_priv *priv) +{ + struct rtl8xxxu_tx_urb *tx_urb, *tmp; + unsigned long flags; + + spin_lock_irqsave(&priv->tx_urb_lock, flags); + list_for_each_entry_safe(tx_urb, tmp, &priv->tx_urb_free_list, list) { + list_del(&tx_urb->list); + priv->tx_urb_free_count--; + usb_free_urb(&tx_urb->urb); + } + spin_unlock_irqrestore(&priv->tx_urb_lock, flags); +} + +static struct rtl8xxxu_tx_urb * +rtl8xxxu_alloc_tx_urb(struct rtl8xxxu_priv *priv) +{ + struct rtl8xxxu_tx_urb *tx_urb; + unsigned long flags; + + spin_lock_irqsave(&priv->tx_urb_lock, flags); + tx_urb = list_first_entry_or_null(&priv->tx_urb_free_list, + struct rtl8xxxu_tx_urb, list); + if (tx_urb) { + list_del(&tx_urb->list); + priv->tx_urb_free_count--; + if (priv->tx_urb_free_count < RTL8XXXU_TX_URB_LOW_WATER && + !priv->tx_stopped) { + priv->tx_stopped = true; + ieee80211_stop_queues(priv->hw); + } + } + + spin_unlock_irqrestore(&priv->tx_urb_lock, flags); + + return tx_urb; +} + +static void rtl8xxxu_free_tx_urb(struct rtl8xxxu_priv *priv, + struct rtl8xxxu_tx_urb *tx_urb) +{ + unsigned long flags; + + INIT_LIST_HEAD(&tx_urb->list); + + spin_lock_irqsave(&priv->tx_urb_lock, flags); + + list_add(&tx_urb->list, &priv->tx_urb_free_list); + priv->tx_urb_free_count++; + if (priv->tx_urb_free_count > RTL8XXXU_TX_URB_HIGH_WATER && + priv->tx_stopped) { + priv->tx_stopped = false; + ieee80211_wake_queues(priv->hw); + } + + spin_unlock_irqrestore(&priv->tx_urb_lock, flags); +} + +static void rtl8xxxu_tx_complete(struct urb *urb) +{ + struct sk_buff *skb = (struct sk_buff *)urb->context; + struct ieee80211_tx_info *tx_info; + struct ieee80211_hw *hw; + struct rtl8xxxu_tx_urb *tx_urb = + container_of(urb, struct rtl8xxxu_tx_urb, urb); + + tx_info = IEEE80211_SKB_CB(skb); + hw = tx_info->rate_driver_data[0]; + + skb_pull(skb, sizeof(struct rtl8xxxu_tx_desc)); + + ieee80211_tx_info_clear_status(tx_info); + tx_info->status.rates[0].idx = -1; + tx_info->status.rates[0].count = 0; + + if (!urb->status) + tx_info->flags |= IEEE80211_TX_STAT_ACK; + + ieee80211_tx_status_irqsafe(hw, skb); + + rtl8xxxu_free_tx_urb(hw->priv, tx_urb); +} + +static void rtl8xxxu_dump_action(struct device *dev, + struct ieee80211_hdr *hdr) +{ + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)hdr; + u16 cap, timeout; + + if (!(rtl8xxxu_debug & RTL8XXXU_DEBUG_ACTION)) + return; + + switch (mgmt->u.action.u.addba_resp.action_code) { + case WLAN_ACTION_ADDBA_RESP: + cap = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); + timeout = le16_to_cpu(mgmt->u.action.u.addba_resp.timeout); + dev_info(dev, "WLAN_ACTION_ADDBA_RESP: " + "timeout %i, tid %02x, buf_size %02x, policy %02x, " + "status %02x\n", + timeout, + (cap & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2, + (cap & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6, + (cap >> 1) & 0x1, + le16_to_cpu(mgmt->u.action.u.addba_resp.status)); + break; + case WLAN_ACTION_ADDBA_REQ: + cap = le16_to_cpu(mgmt->u.action.u.addba_req.capab); + timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout); + dev_info(dev, "WLAN_ACTION_ADDBA_REQ: " + "timeout %i, tid %02x, buf_size %02x, policy %02x\n", + timeout, + (cap & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2, + (cap & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6, + (cap >> 1) & 0x1); + break; + default: + dev_info(dev, "action frame %02x\n", + mgmt->u.action.u.addba_resp.action_code); + break; + } +} + +static void rtl8xxxu_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info); + struct rtl8xxxu_priv *priv = hw->priv; + struct rtl8xxxu_tx_desc *tx_desc; + struct rtl8xxxu_tx_urb *tx_urb; + struct ieee80211_sta *sta = NULL; + struct ieee80211_vif *vif = tx_info->control.vif; + struct device *dev = &priv->udev->dev; + u32 queue, rate; + u16 pktlen = skb->len; + u16 seq_number; + u16 rate_flag = tx_info->control.rates[0].flags; + int ret; + + if (skb_headroom(skb) < sizeof(struct rtl8xxxu_tx_desc)) { + dev_warn(dev, + "%s: Not enough headroom (%i) for tx descriptor\n", + __func__, skb_headroom(skb)); + goto error; + } + + if (unlikely(skb->len > (65535 - sizeof(struct rtl8xxxu_tx_desc)))) { + dev_warn(dev, "%s: Trying to send over-sized skb (%i)\n", + __func__, skb->len); + goto error; + } + + tx_urb = rtl8xxxu_alloc_tx_urb(priv); + if (!tx_urb) { + dev_warn(dev, "%s: Unable to allocate tx urb\n", __func__); + goto error; + } + + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_TX) + dev_info(dev, "%s: TX rate: %d (%d), pkt size %d\n", + __func__, tx_rate->bitrate, tx_rate->hw_value, pktlen); + + if (ieee80211_is_action(hdr->frame_control)) + rtl8xxxu_dump_action(dev, hdr); + + tx_info->rate_driver_data[0] = hw; + + if (control && control->sta) + sta = control->sta; + + tx_desc = (struct rtl8xxxu_tx_desc *) + skb_push(skb, sizeof(struct rtl8xxxu_tx_desc)); + + memset(tx_desc, 0, sizeof(struct rtl8xxxu_tx_desc)); + tx_desc->pkt_size = cpu_to_le16(pktlen); + tx_desc->pkt_offset = sizeof(struct rtl8xxxu_tx_desc); + + tx_desc->txdw0 = + TXDESC_OWN | TXDESC_FIRST_SEGMENT | TXDESC_LAST_SEGMENT; + if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) || + is_broadcast_ether_addr(ieee80211_get_DA(hdr))) + tx_desc->txdw0 |= TXDESC_BROADMULTICAST; + + queue = rtl8xxxu_queue_select(hw, skb); + tx_desc->txdw1 = cpu_to_le32(queue << TXDESC_QUEUE_SHIFT); + + if (tx_info->control.hw_key) { + switch (tx_info->control.hw_key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + case WLAN_CIPHER_SUITE_TKIP: + tx_desc->txdw1 |= cpu_to_le32(TXDESC_SEC_RC4); + break; + case WLAN_CIPHER_SUITE_CCMP: + tx_desc->txdw1 |= cpu_to_le32(TXDESC_SEC_AES); + break; + default: + break; + } + } + + seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); + tx_desc->txdw3 = cpu_to_le32((u32)seq_number << TXDESC_SEQ_SHIFT); + + if (rate_flag & IEEE80211_TX_RC_MCS) + rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0; + else + rate = tx_rate->hw_value; + tx_desc->txdw5 = cpu_to_le32(rate); + + if (ieee80211_is_data(hdr->frame_control)) + tx_desc->txdw5 |= cpu_to_le32(0x0001ff00); + + /* (tx_info->flags & IEEE80211_TX_CTL_AMPDU) && */ + if (ieee80211_is_data_qos(hdr->frame_control) && sta) { + if (sta->ht_cap.ht_supported) { + u32 ampdu, val32; + + ampdu = (u32)sta->ht_cap.ampdu_density; + val32 = ampdu << TXDESC_AMPDU_DENSITY_SHIFT; + tx_desc->txdw2 |= cpu_to_le32(val32); + tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_ENABLE); + } else + tx_desc->txdw1 |= cpu_to_le32(TXDESC_BK); + } else + tx_desc->txdw1 |= cpu_to_le32(TXDESC_BK); + + if (ieee80211_is_data_qos(hdr->frame_control)) + tx_desc->txdw4 |= cpu_to_le32(TXDESC_QOS); + if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE || + (sta && vif && vif->bss_conf.use_short_preamble)) + tx_desc->txdw4 |= cpu_to_le32(TXDESC_SHORT_PREAMBLE); + if (rate_flag & IEEE80211_TX_RC_SHORT_GI || + (ieee80211_is_data_qos(hdr->frame_control) && + sta && sta->ht_cap.cap & + (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))) { + tx_desc->txdw5 |= cpu_to_le32(TXDESC_SHORT_GI); + } + if (ieee80211_is_mgmt(hdr->frame_control)) { + tx_desc->txdw5 = cpu_to_le32(tx_rate->hw_value); + tx_desc->txdw4 |= cpu_to_le32(TXDESC_USE_DRIVER_RATE); + tx_desc->txdw5 |= cpu_to_le32(6 << TXDESC_RETRY_LIMIT_SHIFT); + tx_desc->txdw5 |= cpu_to_le32(TXDESC_RETRY_LIMIT_ENABLE); + } + + if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) { + /* Use RTS rate 24M - does the mac80211 tell us which to use? */ + tx_desc->txdw4 |= cpu_to_le32(DESC_RATE_24M); + tx_desc->txdw4 |= cpu_to_le32(TXDESC_RTS_CTS_ENABLE); + tx_desc->txdw4 |= cpu_to_le32(TXDESC_HW_RTS_ENABLE); + } + + rtl8xxxu_calc_tx_desc_csum(tx_desc); + + usb_fill_bulk_urb(&tx_urb->urb, priv->udev, priv->pipe_out[queue], + skb->data, skb->len, rtl8xxxu_tx_complete, skb); + + usb_anchor_urb(&tx_urb->urb, &priv->tx_anchor); + ret = usb_submit_urb(&tx_urb->urb, GFP_ATOMIC); + if (ret) { + usb_unanchor_urb(&tx_urb->urb); + rtl8xxxu_free_tx_urb(priv, tx_urb); + goto error; + } + return; +error: + dev_kfree_skb(skb); +} + +static void rtl8xxxu_rx_parse_phystats(struct rtl8xxxu_priv *priv, + struct ieee80211_rx_status *rx_status, + struct rtl8xxxu_rx_desc *rx_desc, + struct rtl8723au_phy_stats *phy_stats) +{ + if (phy_stats->sgi_en) + rx_status->flag |= RX_FLAG_SHORT_GI; + + if (rx_desc->rxmcs < DESC_RATE_6M) { + /* + * Handle PHY stats for CCK rates + */ + u8 cck_agc_rpt = phy_stats->cck_agc_rpt_ofdm_cfosho_a; + + switch (cck_agc_rpt & 0xc0) { + case 0xc0: + rx_status->signal = -46 - (cck_agc_rpt & 0x3e); + break; + case 0x80: + rx_status->signal = -26 - (cck_agc_rpt & 0x3e); + break; + case 0x40: + rx_status->signal = -12 - (cck_agc_rpt & 0x3e); + break; + case 0x00: + rx_status->signal = 16 - (cck_agc_rpt & 0x3e); + break; + } + } else { + rx_status->signal = + (phy_stats->cck_sig_qual_ofdm_pwdb_all >> 1) - 110; + } +} + +static void rtl8xxxu_free_rx_resources(struct rtl8xxxu_priv *priv) +{ + struct rtl8xxxu_rx_urb *rx_urb, *tmp; + unsigned long flags; + + spin_lock_irqsave(&priv->rx_urb_lock, flags); + + list_for_each_entry_safe(rx_urb, tmp, + &priv->rx_urb_pending_list, list) { + list_del(&rx_urb->list); + priv->rx_urb_pending_count--; + usb_free_urb(&rx_urb->urb); + } + + spin_unlock_irqrestore(&priv->rx_urb_lock, flags); +} + +static void rtl8xxxu_queue_rx_urb(struct rtl8xxxu_priv *priv, + struct rtl8xxxu_rx_urb *rx_urb) +{ + struct sk_buff *skb; + unsigned long flags; + int pending = 0; + + spin_lock_irqsave(&priv->rx_urb_lock, flags); + + if (!priv->shutdown) { + list_add_tail(&rx_urb->list, &priv->rx_urb_pending_list); + priv->rx_urb_pending_count++; + pending = priv->rx_urb_pending_count; + } else { + skb = (struct sk_buff *)rx_urb->urb.context; + dev_kfree_skb(skb); + usb_free_urb(&rx_urb->urb); + } + + spin_unlock_irqrestore(&priv->rx_urb_lock, flags); + + if (pending > RTL8XXXU_RX_URB_PENDING_WATER) + schedule_work(&priv->rx_urb_wq); +} + +static void rtl8xxxu_rx_urb_work(struct work_struct *work) +{ + struct rtl8xxxu_priv *priv; + struct rtl8xxxu_rx_urb *rx_urb, *tmp; + struct list_head local; + struct sk_buff *skb; + unsigned long flags; + int ret; + + priv = container_of(work, struct rtl8xxxu_priv, rx_urb_wq); + INIT_LIST_HEAD(&local); + + spin_lock_irqsave(&priv->rx_urb_lock, flags); + + list_splice_init(&priv->rx_urb_pending_list, &local); + priv->rx_urb_pending_count = 0; + + spin_unlock_irqrestore(&priv->rx_urb_lock, flags); + + list_for_each_entry_safe(rx_urb, tmp, &local, list) { + list_del_init(&rx_urb->list); + ret = rtl8xxxu_submit_rx_urb(priv, rx_urb); + /* + * If out of memory or temporary error, put it back on the + * queue and try again. Otherwise the device is dead/gone + * and we should drop it. + */ + switch (ret) { + case 0: + break; + case -ENOMEM: + case -EAGAIN: + rtl8xxxu_queue_rx_urb(priv, rx_urb); + break; + default: + pr_info("failed to requeue urb %i\n", ret); + skb = (struct sk_buff *)rx_urb->urb.context; + dev_kfree_skb(skb); + usb_free_urb(&rx_urb->urb); + } + } +} + +static void rtl8xxxu_rx_complete(struct urb *urb) +{ + struct rtl8xxxu_rx_urb *rx_urb = + container_of(urb, struct rtl8xxxu_rx_urb, urb); + struct ieee80211_hw *hw = rx_urb->hw; + struct rtl8xxxu_priv *priv = hw->priv; + struct sk_buff *skb = (struct sk_buff *)urb->context; + struct rtl8xxxu_rx_desc *rx_desc = (struct rtl8xxxu_rx_desc *)skb->data; + struct rtl8723au_phy_stats *phy_stats; + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_mgmt *mgmt; + struct device *dev = &priv->udev->dev; + __le32 *_rx_desc_le = (__le32 *)skb->data; + u32 *_rx_desc = (u32 *)skb->data; + int cnt, len, drvinfo_sz, desc_shift, i; + + for (i = 0; i < (sizeof(struct rtl8xxxu_rx_desc) / sizeof(u32)); i++) + _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]); + + cnt = rx_desc->frag; + len = rx_desc->pktlen; + drvinfo_sz = rx_desc->drvinfo_sz * 8; + desc_shift = rx_desc->shift; + skb_put(skb, urb->actual_length); + + if (urb->status == 0) { + skb_pull(skb, sizeof(struct rtl8xxxu_rx_desc)); + phy_stats = (struct rtl8723au_phy_stats *)skb->data; + + skb_pull(skb, drvinfo_sz + desc_shift); + + mgmt = (struct ieee80211_mgmt *)skb->data; + + memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); + + if (rx_desc->phy_stats) + rtl8xxxu_rx_parse_phystats(priv, rx_status, + rx_desc, phy_stats); + + rx_status->freq = hw->conf.chandef.chan->center_freq; + rx_status->band = hw->conf.chandef.chan->band; + + rx_status->mactime = le32_to_cpu(rx_desc->tsfl); + rx_status->flag |= RX_FLAG_MACTIME_START; + + if (!rx_desc->swdec) + rx_status->flag |= RX_FLAG_DECRYPTED; + if (rx_desc->crc32) + rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; + if (rx_desc->bw) + rx_status->flag |= RX_FLAG_40MHZ; + + if (rx_desc->rxht) { + rx_status->flag |= RX_FLAG_HT; + rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0; + } else { + rx_status->rate_idx = rx_desc->rxmcs; + } + + ieee80211_rx_irqsafe(hw, skb); + skb = NULL; + rx_urb->urb.context = NULL; + rtl8xxxu_queue_rx_urb(priv, rx_urb); + } else { + dev_dbg(dev, "%s: status %i\n", __func__, urb->status); + goto cleanup; + } + return; + +cleanup: + usb_free_urb(urb); + dev_kfree_skb(skb); + return; +} + +static int rtl8xxxu_submit_rx_urb(struct rtl8xxxu_priv *priv, + struct rtl8xxxu_rx_urb *rx_urb) +{ + struct sk_buff *skb; + int skb_size; + int ret; + + skb_size = sizeof(struct rtl8xxxu_rx_desc) + RTL_RX_BUFFER_SIZE; + skb = __netdev_alloc_skb(NULL, skb_size, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + memset(skb->data, 0, sizeof(struct rtl8xxxu_rx_desc)); + usb_fill_bulk_urb(&rx_urb->urb, priv->udev, priv->pipe_in, skb->data, + skb_size, rtl8xxxu_rx_complete, skb); + usb_anchor_urb(&rx_urb->urb, &priv->rx_anchor); + ret = usb_submit_urb(&rx_urb->urb, GFP_ATOMIC); + if (ret) + usb_unanchor_urb(&rx_urb->urb); + return ret; +} + +static void rtl8xxxu_int_complete(struct urb *urb) +{ + struct rtl8xxxu_priv *priv = (struct rtl8xxxu_priv *)urb->context; + struct device *dev = &priv->udev->dev; + int ret; + + dev_dbg(dev, "%s: status %i\n", __func__, urb->status); + if (urb->status == 0) { + usb_anchor_urb(urb, &priv->int_anchor); + ret = usb_submit_urb(urb, GFP_ATOMIC); + if (ret) + usb_unanchor_urb(urb); + } else { + dev_info(dev, "%s: Error %i\n", __func__, urb->status); + } +} + + +static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw) +{ + struct rtl8xxxu_priv *priv = hw->priv; + struct urb *urb; + u32 val32; + int ret; + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) + return -ENOMEM; + + usb_fill_int_urb(urb, priv->udev, priv->pipe_interrupt, + priv->int_buf, USB_INTR_CONTENT_LENGTH, + rtl8xxxu_int_complete, priv, 1); + usb_anchor_urb(urb, &priv->int_anchor); + ret = usb_submit_urb(urb, GFP_KERNEL); + if (ret) { + usb_unanchor_urb(urb); + goto error; + } + + val32 = rtl8xxxu_read32(priv, REG_USB_HIMR); + val32 |= USB_HIMR_CPWM; + rtl8xxxu_write32(priv, REG_USB_HIMR, val32); + +error: + return ret; +} + +static int rtl8xxxu_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct rtl8xxxu_priv *priv = hw->priv; + int ret; + u8 val8; + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + rtl8723a_stop_tx_beacon(priv); + + val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL); + val8 |= BEACON_ATIM | BEACON_FUNCTION_ENABLE | + BEACON_DISABLE_TSF_UPDATE; + rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8); + ret = 0; + break; + default: + ret = -EOPNOTSUPP; + } + + rtl8xxxu_set_linktype(priv, vif->type); + + return ret; +} + +static void rtl8xxxu_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct rtl8xxxu_priv *priv = hw->priv; + + dev_dbg(&priv->udev->dev, "%s\n", __func__); +} + +static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed) +{ + struct rtl8xxxu_priv *priv = hw->priv; + struct device *dev = &priv->udev->dev; + u16 val16; + int ret = 0, channel; + bool ht40; + + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_CHANNEL) + dev_info(dev, + "%s: channel: %i (changed %08x chandef.width %02x)\n", + __func__, hw->conf.chandef.chan->hw_value, + changed, hw->conf.chandef.width); + + if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) { + val16 = ((hw->conf.long_frame_max_tx_count << + RETRY_LIMIT_LONG_SHIFT) & RETRY_LIMIT_LONG_MASK) | + ((hw->conf.short_frame_max_tx_count << + RETRY_LIMIT_SHORT_SHIFT) & RETRY_LIMIT_SHORT_MASK); + rtl8xxxu_write16(priv, REG_RETRY_LIMIT, val16); + } + + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { + switch (hw->conf.chandef.width) { + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: + ht40 = false; + break; + case NL80211_CHAN_WIDTH_40: + ht40 = true; + break; + default: + ret = -ENOTSUPP; + goto exit; + } + + channel = hw->conf.chandef.chan->hw_value; + + rtl8723a_set_tx_power(priv, channel, ht40); + + rtl8723au_config_channel(hw); + } + +exit: + return ret; +} + +static int rtl8xxxu_conf_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u16 queue, + const struct ieee80211_tx_queue_params *param) +{ + struct rtl8xxxu_priv *priv = hw->priv; + struct device *dev = &priv->udev->dev; + u32 val32; + u8 aifs, acm_ctrl, acm_bit; + + aifs = param->aifs; + + val32 = aifs | + fls(param->cw_min) << EDCA_PARAM_ECW_MIN_SHIFT | + fls(param->cw_max) << EDCA_PARAM_ECW_MAX_SHIFT | + (u32)param->txop << EDCA_PARAM_TXOP_SHIFT; + + acm_ctrl = rtl8xxxu_read8(priv, REG_ACM_HW_CTRL); + dev_dbg(dev, + "%s: IEEE80211 queue %02x val %08x, acm %i, acm_ctrl %02x\n", + __func__, queue, val32, param->acm, acm_ctrl); + + switch (queue) { + case IEEE80211_AC_VO: + acm_bit = ACM_HW_CTRL_VO; + rtl8xxxu_write32(priv, REG_EDCA_VO_PARAM, val32); + break; + case IEEE80211_AC_VI: + acm_bit = ACM_HW_CTRL_VI; + rtl8xxxu_write32(priv, REG_EDCA_VI_PARAM, val32); + break; + case IEEE80211_AC_BE: + acm_bit = ACM_HW_CTRL_BE; + rtl8xxxu_write32(priv, REG_EDCA_BE_PARAM, val32); + break; + case IEEE80211_AC_BK: + acm_bit = ACM_HW_CTRL_BK; + rtl8xxxu_write32(priv, REG_EDCA_BK_PARAM, val32); + break; + default: + acm_bit = 0; + break; + } + + if (param->acm) + acm_ctrl |= acm_bit; + else + acm_ctrl &= ~acm_bit; + rtl8xxxu_write8(priv, REG_ACM_HW_CTRL, acm_ctrl); + + return 0; +} + +static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, u64 multicast) +{ + struct rtl8xxxu_priv *priv = hw->priv; + + dev_dbg(&priv->udev->dev, "%s: changed_flags %08x, total_flags %08x\n", + __func__, changed_flags, *total_flags); + + *total_flags &= (FIF_ALLMULTI | FIF_CONTROL | FIF_BCN_PRBRESP_PROMISC); +} + +static int rtl8xxxu_set_rts_threshold(struct ieee80211_hw *hw, u32 rts) +{ + if (rts > 2347) + return -EINVAL; + + return 0; +} + +static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct rtl8xxxu_priv *priv = hw->priv; + struct device *dev = &priv->udev->dev; + u8 mac_addr[ETH_ALEN]; + u8 val8; + u16 val16; + u32 val32; + int retval = -EOPNOTSUPP; + + dev_dbg(dev, "%s: cmd %02x, cipher %08x, index %i\n", + __func__, cmd, key->cipher, key->keyidx); + + if (vif->type != NL80211_IFTYPE_STATION) + return -EOPNOTSUPP; + + if (key->keyidx > 3) + return -EOPNOTSUPP; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + + break; + case WLAN_CIPHER_SUITE_CCMP: + key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; + break; + case WLAN_CIPHER_SUITE_TKIP: + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + default: + return -EOPNOTSUPP; + } + + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { + dev_dbg(dev, "%s: pairwise key\n", __func__); + ether_addr_copy(mac_addr, sta->addr); + } else { + dev_dbg(dev, "%s: group key\n", __func__); + eth_broadcast_addr(mac_addr); + } + + val16 = rtl8xxxu_read16(priv, REG_CR); + val16 |= CR_SECURITY_ENABLE; + rtl8xxxu_write16(priv, REG_CR, val16); + + val8 = SEC_CFG_TX_SEC_ENABLE | SEC_CFG_TXBC_USE_DEFKEY | + SEC_CFG_RX_SEC_ENABLE | SEC_CFG_RXBC_USE_DEFKEY; + val8 |= SEC_CFG_TX_USE_DEFKEY | SEC_CFG_RX_USE_DEFKEY; + rtl8xxxu_write8(priv, REG_SECURITY_CFG, val8); + + switch (cmd) { + case SET_KEY: + key->hw_key_idx = key->keyidx; + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + rtl8xxxu_cam_write(priv, key, mac_addr); + retval = 0; + break; + case DISABLE_KEY: + rtl8xxxu_write32(priv, REG_CAM_WRITE, 0x00000000); + val32 = CAM_CMD_POLLING | CAM_CMD_WRITE | + key->keyidx << CAM_CMD_KEY_SHIFT; + rtl8xxxu_write32(priv, REG_CAM_CMD, val32); + retval = 0; + break; + default: + dev_warn(dev, "%s: Unsupported command %02x\n", __func__, cmd); + } + + return retval; +} + +static int +rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size, + bool amsdu) +{ + struct rtl8xxxu_priv *priv = hw->priv; + struct device *dev = &priv->udev->dev; + u8 ampdu_factor, ampdu_density; + + switch (action) { + case IEEE80211_AMPDU_TX_START: + dev_info(dev, "%s: IEEE80211_AMPDU_TX_START\n", __func__); + ampdu_factor = sta->ht_cap.ampdu_factor; + ampdu_density = sta->ht_cap.ampdu_density; + rtl8xxxu_set_ampdu_factor(priv, ampdu_factor); + rtl8xxxu_set_ampdu_min_space(priv, ampdu_density); + dev_dbg(dev, + "Changed HT: ampdu_factor %02x, ampdu_density %02x\n", + ampdu_factor, ampdu_density); + break; + case IEEE80211_AMPDU_TX_STOP_FLUSH: + dev_info(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH\n", __func__); + rtl8xxxu_set_ampdu_factor(priv, 0); + rtl8xxxu_set_ampdu_min_space(priv, 0); + break; + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: + dev_info(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH_CONT\n", + __func__); + rtl8xxxu_set_ampdu_factor(priv, 0); + rtl8xxxu_set_ampdu_min_space(priv, 0); + break; + case IEEE80211_AMPDU_RX_START: + dev_info(dev, "%s: IEEE80211_AMPDU_RX_START\n", __func__); + break; + case IEEE80211_AMPDU_RX_STOP: + dev_info(dev, "%s: IEEE80211_AMPDU_RX_STOP\n", __func__); + break; + default: + break; + } + return 0; +} + +static int rtl8xxxu_start(struct ieee80211_hw *hw) +{ + struct rtl8xxxu_priv *priv = hw->priv; + struct rtl8xxxu_rx_urb *rx_urb; + struct rtl8xxxu_tx_urb *tx_urb; + unsigned long flags; + int ret, i; + + ret = 0; + + init_usb_anchor(&priv->rx_anchor); + init_usb_anchor(&priv->tx_anchor); + init_usb_anchor(&priv->int_anchor); + + rtl8723a_enable_rf(priv); + ret = rtl8xxxu_submit_int_urb(hw); + if (ret) + goto exit; + + for (i = 0; i < RTL8XXXU_TX_URBS; i++) { + tx_urb = kmalloc(sizeof(struct rtl8xxxu_tx_urb), GFP_KERNEL); + if (!tx_urb) { + if (!i) + ret = -ENOMEM; + + goto error_out; + } + usb_init_urb(&tx_urb->urb); + INIT_LIST_HEAD(&tx_urb->list); + tx_urb->hw = hw; + list_add(&tx_urb->list, &priv->tx_urb_free_list); + priv->tx_urb_free_count++; + } + + priv->tx_stopped = false; + + spin_lock_irqsave(&priv->rx_urb_lock, flags); + priv->shutdown = false; + spin_unlock_irqrestore(&priv->rx_urb_lock, flags); + + for (i = 0; i < RTL8XXXU_RX_URBS; i++) { + rx_urb = kmalloc(sizeof(struct rtl8xxxu_rx_urb), GFP_KERNEL); + if (!rx_urb) { + if (!i) + ret = -ENOMEM; + + goto error_out; + } + usb_init_urb(&rx_urb->urb); + INIT_LIST_HEAD(&rx_urb->list); + rx_urb->hw = hw; + + ret = rtl8xxxu_submit_rx_urb(priv, rx_urb); + } +exit: + /* + * Disable all data frames + */ + rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000); + /* + * Accept all mgmt frames + */ + rtl8xxxu_write16(priv, REG_RXFLTMAP0, 0xffff); + + rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, 0x6954341e); + + return ret; + +error_out: + rtl8xxxu_free_tx_resources(priv); + /* + * Disable all data and mgmt frames + */ + rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000); + rtl8xxxu_write16(priv, REG_RXFLTMAP0, 0x0000); + + return ret; +} + +static void rtl8xxxu_stop(struct ieee80211_hw *hw) +{ + struct rtl8xxxu_priv *priv = hw->priv; + unsigned long flags; + + rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff); + + rtl8xxxu_write16(priv, REG_RXFLTMAP0, 0x0000); + rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000); + + spin_lock_irqsave(&priv->rx_urb_lock, flags); + priv->shutdown = true; + spin_unlock_irqrestore(&priv->rx_urb_lock, flags); + + usb_kill_anchored_urbs(&priv->rx_anchor); + usb_kill_anchored_urbs(&priv->tx_anchor); + usb_kill_anchored_urbs(&priv->int_anchor); + + rtl8723a_disable_rf(priv); + + /* + * Disable interrupts + */ + rtl8xxxu_write32(priv, REG_USB_HIMR, 0); + + rtl8xxxu_free_rx_resources(priv); + rtl8xxxu_free_tx_resources(priv); +} + +static const struct ieee80211_ops rtl8xxxu_ops = { + .tx = rtl8xxxu_tx, + .add_interface = rtl8xxxu_add_interface, + .remove_interface = rtl8xxxu_remove_interface, + .config = rtl8xxxu_config, + .conf_tx = rtl8xxxu_conf_tx, + .bss_info_changed = rtl8xxxu_bss_info_changed, + .configure_filter = rtl8xxxu_configure_filter, + .set_rts_threshold = rtl8xxxu_set_rts_threshold, + .start = rtl8xxxu_start, + .stop = rtl8xxxu_stop, + .sw_scan_start = rtl8xxxu_sw_scan_start, + .sw_scan_complete = rtl8xxxu_sw_scan_complete, + .set_key = rtl8xxxu_set_key, + .ampdu_action = rtl8xxxu_ampdu_action, +}; + +static int rtl8xxxu_parse_usb(struct rtl8xxxu_priv *priv, + struct usb_interface *interface) +{ + struct usb_interface_descriptor *interface_desc; + struct usb_host_interface *host_interface; + struct usb_endpoint_descriptor *endpoint; + struct device *dev = &priv->udev->dev; + int i, j = 0, endpoints; + u8 dir, xtype, num; + int ret = 0; + + host_interface = &interface->altsetting[0]; + interface_desc = &host_interface->desc; + endpoints = interface_desc->bNumEndpoints; + + for (i = 0; i < endpoints; i++) { + endpoint = &host_interface->endpoint[i].desc; + + dir = endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK; + num = usb_endpoint_num(endpoint); + xtype = usb_endpoint_type(endpoint); + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_USB) + dev_dbg(dev, + "%s: endpoint: dir %02x, # %02x, type %02x\n", + __func__, dir, num, xtype); + if (usb_endpoint_dir_in(endpoint) && + usb_endpoint_xfer_bulk(endpoint)) { + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_USB) + dev_dbg(dev, "%s: in endpoint num %i\n", + __func__, num); + + if (priv->pipe_in) { + dev_warn(dev, + "%s: Too many IN pipes\n", __func__); + ret = -EINVAL; + goto exit; + } + + priv->pipe_in = usb_rcvbulkpipe(priv->udev, num); + } + + if (usb_endpoint_dir_in(endpoint) && + usb_endpoint_xfer_int(endpoint)) { + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_USB) + dev_dbg(dev, "%s: interrupt endpoint num %i\n", + __func__, num); + + if (priv->pipe_interrupt) { + dev_warn(dev, "%s: Too many INTERRUPT pipes\n", + __func__); + ret = -EINVAL; + goto exit; + } + + priv->pipe_interrupt = usb_rcvintpipe(priv->udev, num); + } + + if (usb_endpoint_dir_out(endpoint) && + usb_endpoint_xfer_bulk(endpoint)) { + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_USB) + dev_dbg(dev, "%s: out endpoint num %i\n", + __func__, num); + if (j >= RTL8XXXU_OUT_ENDPOINTS) { + dev_warn(dev, + "%s: Too many OUT pipes\n", __func__); + ret = -EINVAL; + goto exit; + } + priv->out_ep[j++] = num; + } + } +exit: + priv->nr_out_eps = j; + return ret; +} + +static int rtl8xxxu_probe(struct usb_interface *interface, + const struct usb_device_id *id) +{ + struct rtl8xxxu_priv *priv; + struct ieee80211_hw *hw; + struct usb_device *udev; + struct ieee80211_supported_band *sband; + int ret = 0; + int untested = 1; + + udev = usb_get_dev(interface_to_usbdev(interface)); + + switch (id->idVendor) { + case USB_VENDOR_ID_REALTEK: + switch(id->idProduct) { + case 0x1724: + case 0x8176: + case 0x8178: + case 0x817f: + untested = 0; + break; + } + break; + case 0x7392: + if (id->idProduct == 0x7811) + untested = 0; + break; + default: + break; + } + + if (untested) { + rtl8xxxu_debug = RTL8XXXU_DEBUG_EFUSE; + dev_info(&udev->dev, + "This Realtek USB WiFi dongle (0x%04x:0x%04x) is untested!\n", + id->idVendor, id->idProduct); + dev_info(&udev->dev, + "Please report results to Jes.Sorensen@gmail.com\n"); + } + + hw = ieee80211_alloc_hw(sizeof(struct rtl8xxxu_priv), &rtl8xxxu_ops); + if (!hw) { + ret = -ENOMEM; + goto exit; + } + + priv = hw->priv; + priv->hw = hw; + priv->udev = udev; + priv->fops = (struct rtl8xxxu_fileops *)id->driver_info; + mutex_init(&priv->usb_buf_mutex); + mutex_init(&priv->h2c_mutex); + INIT_LIST_HEAD(&priv->tx_urb_free_list); + spin_lock_init(&priv->tx_urb_lock); + INIT_LIST_HEAD(&priv->rx_urb_pending_list); + spin_lock_init(&priv->rx_urb_lock); + INIT_WORK(&priv->rx_urb_wq, rtl8xxxu_rx_urb_work); + + usb_set_intfdata(interface, hw); + + ret = rtl8xxxu_parse_usb(priv, interface); + if (ret) + goto exit; + + ret = rtl8xxxu_identify_chip(priv); + if (ret) { + dev_err(&udev->dev, "Fatal - failed to identify chip\n"); + goto exit; + } + + ret = rtl8xxxu_read_efuse(priv); + if (ret) { + dev_err(&udev->dev, "Fatal - failed to read EFuse\n"); + goto exit; + } + + ret = priv->fops->parse_efuse(priv); + if (ret) { + dev_err(&udev->dev, "Fatal - failed to parse EFuse\n"); + goto exit; + } + + rtl8xxxu_print_chipinfo(priv); + + ret = priv->fops->load_firmware(priv); + if (ret) { + dev_err(&udev->dev, "Fatal - failed to load firmware\n"); + goto exit; + } + + ret = rtl8xxxu_init_device(hw); + + hw->wiphy->max_scan_ssids = 1; + hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; + hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + hw->queues = 4; + + sband = &rtl8xxxu_supported_band; + sband->ht_cap.ht_supported = true; + sband->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; + sband->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16; + sband->ht_cap.cap = IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40; + memset(&sband->ht_cap.mcs, 0, sizeof(sband->ht_cap.mcs)); + sband->ht_cap.mcs.rx_mask[0] = 0xff; + sband->ht_cap.mcs.rx_mask[4] = 0x01; + if (priv->rf_paths > 1) { + sband->ht_cap.mcs.rx_mask[1] = 0xff; + sband->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; + } + sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; + /* + * Some APs will negotiate HT20_40 in a noisy environment leading + * to miserable performance. Rather than defaulting to this, only + * enable it if explicitly requested at module load time. + */ + if (rtl8xxxu_ht40_2g) { + dev_info(&udev->dev, "Enabling HT_20_40 on the 2.4GHz band\n"); + sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; + } + hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; + + hw->wiphy->rts_threshold = 2347; + + SET_IEEE80211_DEV(priv->hw, &interface->dev); + SET_IEEE80211_PERM_ADDR(hw, priv->mac_addr); + + hw->extra_tx_headroom = sizeof(struct rtl8xxxu_tx_desc); + ieee80211_hw_set(hw, SIGNAL_DBM); + /* + * The firmware handles rate control + */ + ieee80211_hw_set(hw, HAS_RATE_CONTROL); + ieee80211_hw_set(hw, AMPDU_AGGREGATION); + + ret = ieee80211_register_hw(priv->hw); + if (ret) { + dev_err(&udev->dev, "%s: Failed to register: %i\n", + __func__, ret); + goto exit; + } + +exit: + if (ret < 0) + usb_put_dev(udev); + return ret; +} + +static void rtl8xxxu_disconnect(struct usb_interface *interface) +{ + struct rtl8xxxu_priv *priv; + struct ieee80211_hw *hw; + + hw = usb_get_intfdata(interface); + priv = hw->priv; + + rtl8xxxu_disable_device(hw); + usb_set_intfdata(interface, NULL); + + dev_info(&priv->udev->dev, "disconnecting\n"); + + ieee80211_unregister_hw(hw); + + kfree(priv->fw_data); + mutex_destroy(&priv->usb_buf_mutex); + mutex_destroy(&priv->h2c_mutex); + + usb_put_dev(priv->udev); + ieee80211_free_hw(hw); +} + +static struct rtl8xxxu_fileops rtl8723au_fops = { + .parse_efuse = rtl8723au_parse_efuse, + .load_firmware = rtl8723au_load_firmware, + .power_on = rtl8723au_power_on, + .writeN_block_size = 1024, +}; + +#ifdef CONFIG_RTL8XXXU_UNTESTED + +static struct rtl8xxxu_fileops rtl8192cu_fops = { + .parse_efuse = rtl8192cu_parse_efuse, + .load_firmware = rtl8192cu_load_firmware, + .power_on = rtl8192cu_power_on, + .writeN_block_size = 128, +}; + +#endif + +static struct usb_device_id dev_table[] = { +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8724, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8723au_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x1724, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8723au_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x0724, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8723au_fops}, +#ifdef CONFIG_RTL8XXXU_UNTESTED +/* Still supported by rtlwifi */ +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8176, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8178, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817f, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +/* Tested by Larry Finger */ +{USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0x7811, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +/* Currently untested 8188 series devices */ +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8191, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8170, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8177, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817a, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817b, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817d, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817e, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818a, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x317f, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x1058, 0x0631, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x04bb, 0x094c, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1102, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe033, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8189, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9041, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17ba, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x1e1e, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x5088, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0052, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x005c, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x0eb0, 0x9071, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x103c, 0x1629, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3357, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3308, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330b, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0x4902, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab2a, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab2e, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xed17, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x648b, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x4855, 0x0090, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x4856, 0x0091, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0xcdab, 0x8010, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x317f, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, /* Netcore 8188RU */ +{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff7, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff9, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaffa, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff8, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaffb, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaffc, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0x1201, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +/* Currently untested 8192 series devices */ +{USB_DEVICE_AND_INTERFACE_INFO(0x04bb, 0x0950, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1004, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x2102, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x2103, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x0586, 0x341f, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe035, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17ab, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0061, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0070, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x0789, 0x016d, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x07aa, 0x0056, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8178, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9021, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0xf001, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x2e2e, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x0e66, 0x0019, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x0e66, 0x0020, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3307, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3309, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330a, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab2b, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x624d, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0100, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x4855, 0x0091, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0x7822, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +#endif +{ } +}; + +static struct usb_driver rtl8xxxu_driver = { + .name = DRIVER_NAME, + .probe = rtl8xxxu_probe, + .disconnect = rtl8xxxu_disconnect, + .id_table = dev_table, + .disable_hub_initiated_lpm = 1, +}; + +static int __init rtl8xxxu_module_init(void) +{ + int res; + + res = usb_register(&rtl8xxxu_driver); + if (res < 0) + pr_err(DRIVER_NAME ": usb_register() failed (%i)\n", res); + + return res; +} + +static void __exit rtl8xxxu_module_exit(void) +{ + usb_deregister(&rtl8xxxu_driver); +} + + +MODULE_DEVICE_TABLE(usb, dev_table); + +module_init(rtl8xxxu_module_init); +module_exit(rtl8xxxu_module_exit); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h new file mode 100644 index 000000000000..f2a1bac6c8ec --- /dev/null +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -0,0 +1,676 @@ +/* + * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * Register definitions taken from original Realtek rtl8723au driver + */ + +#include <asm/byteorder.h> + +#define RTL8XXXU_DEBUG_REG_WRITE 0x01 +#define RTL8XXXU_DEBUG_REG_READ 0x02 +#define RTL8XXXU_DEBUG_RFREG_WRITE 0x04 +#define RTL8XXXU_DEBUG_RFREG_READ 0x08 +#define RTL8XXXU_DEBUG_CHANNEL 0x10 +#define RTL8XXXU_DEBUG_TX 0x20 +#define RTL8XXXU_DEBUG_TX_DUMP 0x40 +#define RTL8XXXU_DEBUG_RX 0x80 +#define RTL8XXXU_DEBUG_RX_DUMP 0x100 +#define RTL8XXXU_DEBUG_USB 0x200 +#define RTL8XXXU_DEBUG_KEY 0x400 +#define RTL8XXXU_DEBUG_H2C 0x800 +#define RTL8XXXU_DEBUG_ACTION 0x1000 +#define RTL8XXXU_DEBUG_EFUSE 0x2000 + +#define RTW_USB_CONTROL_MSG_TIMEOUT 500 +#define RTL8XXXU_MAX_REG_POLL 500 +#define USB_INTR_CONTENT_LENGTH 56 + +#define RTL8XXXU_OUT_ENDPOINTS 3 + +#define REALTEK_USB_READ 0xc0 +#define REALTEK_USB_WRITE 0x40 +#define REALTEK_USB_CMD_REQ 0x05 +#define REALTEK_USB_CMD_IDX 0x00 + +#define TX_TOTAL_PAGE_NUM 0xf8 +/* (HPQ + LPQ + NPQ + PUBQ) = TX_TOTAL_PAGE_NUM */ +#define TX_PAGE_NUM_PUBQ 0xe7 +#define TX_PAGE_NUM_HI_PQ 0x0c +#define TX_PAGE_NUM_LO_PQ 0x02 +#define TX_PAGE_NUM_NORM_PQ 0x02 + +#define RTL_FW_PAGE_SIZE 4096 +#define RTL8XXXU_FIRMWARE_POLL_MAX 1000 + +#define RTL8723A_CHANNEL_GROUPS 3 +#define RTL8723A_MAX_RF_PATHS 2 +#define RF6052_MAX_TX_PWR 0x3f + +#define EFUSE_MAP_LEN_8723A 256 +#define EFUSE_MAX_SECTION_8723A 32 +#define EFUSE_REAL_CONTENT_LEN_8723A 512 +#define EFUSE_BT_MAP_LEN_8723A 1024 +#define EFUSE_MAX_WORD_UNIT 4 + +struct rtl8xxxu_rx_desc { +#ifdef __LITTLE_ENDIAN + u32 pktlen:14; + u32 crc32:1; + u32 icverr:1; + u32 drvinfo_sz:4; + u32 security:3; + u32 qos:1; + u32 shift:2; + u32 phy_stats:1; + u32 swdec:1; + u32 ls:1; + u32 fs:1; + u32 eor:1; + u32 own:1; + + u32 macid:5; + u32 tid:4; + u32 hwrsvd:4; + u32 amsdu:1; + u32 paggr:1; + u32 faggr:1; + u32 a1fit:4; + u32 a2fit:4; + u32 pam:1; + u32 pwr:1; + u32 md:1; + u32 mf:1; + u32 type:2; + u32 mc:1; + u32 bc:1; + + u32 seq:12; + u32 frag:4; + u32 nextpktlen:14; + u32 nextind:1; + u32 reserved0:1; + + u32 rxmcs:6; + u32 rxht:1; + u32 gf:1; + u32 splcp:1; + u32 bw:1; + u32 htc:1; + u32 eosp:1; + u32 bssidfit:2; + u32 reserved1:16; + u32 unicastwake:1; + u32 magicwake:1; + + u32 pattern0match:1; + u32 pattern1match:1; + u32 pattern2match:1; + u32 pattern3match:1; + u32 pattern4match:1; + u32 pattern5match:1; + u32 pattern6match:1; + u32 pattern7match:1; + u32 pattern8match:1; + u32 pattern9match:1; + u32 patternamatch:1; + u32 patternbmatch:1; + u32 patterncmatch:1; + u32 reserved2:19; +#else + u32 own:1; + u32 eor:1; + u32 fs:1; + u32 ls:1; + u32 swdec:1; + u32 phy_stats:1; + u32 shift:2; + u32 qos:1; + u32 security:3; + u32 drvinfo_sz:4; + u32 icverr:1; + u32 crc32:1; + u32 pktlen:14; + + u32 bc:1; + u32 mc:1; + u32 type:2; + u32 mf:1; + u32 md:1; + u32 pwr:1; + u32 pam:1; + u32 a2fit:4; + u32 a1fit:4; + u32 faggr:1; + u32 paggr:1; + u32 amsdu:1; + u32 hwrsvd:4; + u32 tid:4; + u32 macid:5; + + u32 reserved0:1; + u32 nextind:1; + u32 nextpktlen:14; + u32 frag:4; + u32 seq:12; + + u32 magicwake:1; + u32 unicastwake:1; + u32 reserved1:16; + u32 bssidfit:2; + u32 eosp:1; + u32 htc:1; + u32 bw:1; + u32 splcp:1; + u32 gf:1; + u32 rxht:1; + u32 rxmcs:6; + + u32 reserved2:19; + u32 patterncmatch:1; + u32 patternbmatch:1; + u32 patternamatch:1; + u32 pattern9match:1; + u32 pattern8match:1; + u32 pattern7match:1; + u32 pattern6match:1; + u32 pattern5match:1; + u32 pattern4match:1; + u32 pattern3match:1; + u32 pattern2match:1; + u32 pattern1match:1; + u32 pattern0match:1; +#endif + __le32 tsfl; +#if 0 + u32 bassn:12; + u32 bavld:1; + u32 reserved3:19; +#endif +}; + +struct rtl8xxxu_tx_desc { + __le16 pkt_size; + u8 pkt_offset; + u8 txdw0; + __le32 txdw1; + __le32 txdw2; + __le32 txdw3; + __le32 txdw4; + __le32 txdw5; + __le32 txdw6; + __le16 csum; + __le16 txdw7; +}; + +/* CCK Rates, TxHT = 0 */ +#define DESC_RATE_1M 0x00 +#define DESC_RATE_2M 0x01 +#define DESC_RATE_5_5M 0x02 +#define DESC_RATE_11M 0x03 + +/* OFDM Rates, TxHT = 0 */ +#define DESC_RATE_6M 0x04 +#define DESC_RATE_9M 0x05 +#define DESC_RATE_12M 0x06 +#define DESC_RATE_18M 0x07 +#define DESC_RATE_24M 0x08 +#define DESC_RATE_36M 0x09 +#define DESC_RATE_48M 0x0a +#define DESC_RATE_54M 0x0b + +/* MCS Rates, TxHT = 1 */ +#define DESC_RATE_MCS0 0x0c +#define DESC_RATE_MCS1 0x0d +#define DESC_RATE_MCS2 0x0e +#define DESC_RATE_MCS3 0x0f +#define DESC_RATE_MCS4 0x10 +#define DESC_RATE_MCS5 0x11 +#define DESC_RATE_MCS6 0x12 +#define DESC_RATE_MCS7 0x13 +#define DESC_RATE_MCS8 0x14 +#define DESC_RATE_MCS9 0x15 +#define DESC_RATE_MCS10 0x16 +#define DESC_RATE_MCS11 0x17 +#define DESC_RATE_MCS12 0x18 +#define DESC_RATE_MCS13 0x19 +#define DESC_RATE_MCS14 0x1a +#define DESC_RATE_MCS15 0x1b +#define DESC_RATE_MCS15_SG 0x1c +#define DESC_RATE_MCS32 0x20 + +#define TXDESC_OFFSET_SZ 0 +#define TXDESC_OFFSET_SHT 16 +#if 0 +#define TXDESC_BMC BIT(24) +#define TXDESC_LSG BIT(26) +#define TXDESC_FSG BIT(27) +#define TXDESC_OWN BIT(31) +#else +#define TXDESC_BROADMULTICAST BIT(0) +#define TXDESC_LAST_SEGMENT BIT(2) +#define TXDESC_FIRST_SEGMENT BIT(3) +#define TXDESC_OWN BIT(7) +#endif + +/* Word 1 */ +#define TXDESC_PKT_OFFSET_SZ 0 +#define TXDESC_AGG_ENABLE BIT(5) +#define TXDESC_BK BIT(6) +#define TXDESC_QUEUE_SHIFT 8 +#define TXDESC_QUEUE_MASK 0x1f00 +#define TXDESC_QUEUE_BK 0x2 +#define TXDESC_QUEUE_BE 0x0 +#define TXDESC_QUEUE_VI 0x5 +#define TXDESC_QUEUE_VO 0x7 +#define TXDESC_QUEUE_BEACON 0x10 +#define TXDESC_QUEUE_HIGH 0x11 +#define TXDESC_QUEUE_MGNT 0x12 +#define TXDESC_QUEUE_CMD 0x13 +#define TXDESC_QUEUE_MAX (TXDESC_QUEUE_CMD + 1) + +#define DESC_RATE_ID_SHIFT 16 +#define DESC_RATE_ID_MASK 0xf +#define TXDESC_NAVUSEHDR BIT(20) +#define TXDESC_SEC_RC4 0x00400000 +#define TXDESC_SEC_AES 0x00c00000 +#define TXDESC_PKT_OFFSET_SHIFT 26 +#define TXDESC_AGG_EN BIT(29) +#define TXDESC_HWPC BIT(31) + +/* Word 2 */ +#define TXDESC_ACK_REPORT BIT(19) +#define TXDESC_AMPDU_DENSITY_SHIFT 20 + +/* Word 3 */ +#define TXDESC_SEQ_SHIFT 16 +#define TXDESC_SEQ_MASK 0x0fff0000 + +/* Word 4 */ +#define TXDESC_QOS BIT(6) +#define TXDESC_HW_SEQ_ENABLE BIT(7) +#define TXDESC_USE_DRIVER_RATE BIT(8) +#define TXDESC_DISABLE_DATA_FB BIT(10) +#define TXDESC_CTS_SELF_ENABLE BIT(11) +#define TXDESC_RTS_CTS_ENABLE BIT(12) +#define TXDESC_HW_RTS_ENABLE BIT(13) +#define TXDESC_PRIME_CH_OFF_LOWER BIT(20) +#define TXDESC_PRIME_CH_OFF_UPPER BIT(21) +#define TXDESC_SHORT_PREAMBLE BIT(24) +#define TXDESC_DATA_BW BIT(25) +#define TXDESC_RTS_DATA_BW BIT(27) +#define TXDESC_RTS_PRIME_CH_OFF_LOWER BIT(28) +#define TXDESC_RTS_PRIME_CH_OFF_UPPER BIT(29) + +/* Word 5 */ +#define TXDESC_RTS_RATE_SHIFT 0 +#define TXDESC_RTS_RATE_MASK 0x3f +#define TXDESC_SHORT_GI BIT(6) +#define TXDESC_CCX_TAG BIT(7) +#define TXDESC_RETRY_LIMIT_ENABLE BIT(17) +#define TXDESC_RETRY_LIMIT_SHIFT 18 +#define TXDESC_RETRY_LIMIT_MASK 0x00fc0000 + +/* Word 6 */ +#define TXDESC_MAX_AGG_SHIFT 11 + +struct phy_rx_agc_info { +#ifdef __LITTLE_ENDIAN + u8 gain:7, trsw:1; +#else + u8 trsw:1, gain:7; +#endif +}; + +struct rtl8723au_phy_stats { + struct phy_rx_agc_info path_agc[RTL8723A_MAX_RF_PATHS]; + u8 ch_corr[RTL8723A_MAX_RF_PATHS]; + u8 cck_sig_qual_ofdm_pwdb_all; + u8 cck_agc_rpt_ofdm_cfosho_a; + u8 cck_rpt_b_ofdm_cfosho_b; + u8 reserved_1; + u8 noise_power_db_msb; + u8 path_cfotail[RTL8723A_MAX_RF_PATHS]; + u8 pcts_mask[RTL8723A_MAX_RF_PATHS]; + s8 stream_rxevm[RTL8723A_MAX_RF_PATHS]; + u8 path_rxsnr[RTL8723A_MAX_RF_PATHS]; + u8 noise_power_db_lsb; + u8 reserved_2[3]; + u8 stream_csi[RTL8723A_MAX_RF_PATHS]; + u8 stream_target_csi[RTL8723A_MAX_RF_PATHS]; + s8 sig_evm; + u8 reserved_3; + +#ifdef __LITTLE_ENDIAN + u8 antsel_rx_keep_2:1; /* ex_intf_flg:1; */ + u8 sgi_en:1; + u8 rxsc:2; + u8 idle_long:1; + u8 r_ant_train_en:1; + u8 antenna_select_b:1; + u8 antenna_select:1; +#else /* _BIG_ENDIAN_ */ + u8 antenna_select:1; + u8 antenna_select_b:1; + u8 r_ant_train_en:1; + u8 idle_long:1; + u8 rxsc:2; + u8 sgi_en:1; + u8 antsel_rx_keep_2:1; /* ex_intf_flg:1; */ +#endif +}; + +/* + * Regs to backup + */ +#define RTL8XXXU_ADDA_REGS 16 +#define RTL8XXXU_MAC_REGS 4 +#define RTL8XXXU_BB_REGS 9 + +struct rtl8xxxu_firmware_header { + __le16 signature; /* 92C0: test chip; 92C, + 88C0: test chip; + 88C1: MP A-cut; + 92C1: MP A-cut */ + u8 category; /* AP/NIC and USB/PCI */ + u8 function; + + __le16 major_version; /* FW Version */ + u8 minor_version; /* FW Subversion, default 0x00 */ + u8 reserved1; + + u8 month; /* Release time Month field */ + u8 date; /* Release time Date field */ + u8 hour; /* Release time Hour field */ + u8 minute; /* Release time Minute field */ + + __le16 ramcodesize; /* Size of RAM code */ + u16 reserved2; + + __le32 svn_idx; /* SVN entry index */ + u32 reserved3; + + u32 reserved4; + u32 reserved5; + + u8 data[0]; +}; + +/* + * The 8723au has 3 channel groups: 1-3, 4-9, and 10-14 + */ +struct rtl8723au_idx { +#ifdef __LITTLE_ENDIAN + int a:4; + int b:4; +#else + int b:4; + int a:4; +#endif +} __attribute__((packed)); + +struct rtl8723au_efuse { + __le16 rtl_id; + u8 res0[0xe]; + u8 cck_tx_power_index_A[3]; /* 0x10 */ + u8 cck_tx_power_index_B[3]; + u8 ht40_1s_tx_power_index_A[3]; /* 0x16 */ + u8 ht40_1s_tx_power_index_B[3]; + /* + * The following entries are half-bytes split as: + * bits 0-3: path A, bits 4-7: path B, all values 4 bits signed + */ + struct rtl8723au_idx ht20_tx_power_index_diff[3]; + struct rtl8723au_idx ofdm_tx_power_index_diff[3]; + struct rtl8723au_idx ht40_max_power_offset[3]; + struct rtl8723au_idx ht20_max_power_offset[3]; + u8 channel_plan; /* 0x28 */ + u8 tssi_a; + u8 thermal_meter; + u8 rf_regulatory; + u8 rf_option_2; + u8 rf_option_3; + u8 rf_option_4; + u8 res7; + u8 version /* 0x30 */; + u8 customer_id_major; + u8 customer_id_minor; + u8 xtal_k; + u8 chipset; /* 0x34 */ + u8 res8[0x82]; + u8 vid; /* 0xb7 */ + u8 res9; + u8 pid; /* 0xb9 */ + u8 res10[0x0c]; + u8 mac_addr[ETH_ALEN]; /* 0xc6 */ + u8 res11[2]; + u8 vendor_name[7]; + u8 res12[2]; + u8 device_name[0x29]; /* 0xd7 */ +}; + +struct rtl8192cu_efuse { + __le16 rtl_id; + __le16 hpon; + u8 res0[2]; + __le16 clk; + __le16 testr; + __le16 vid; + __le16 did; + __le16 svid; + __le16 smid; /* 0x10 */ + u8 res1[4]; + u8 mac_addr[ETH_ALEN]; /* 0x16 */ + u8 res2[2]; + u8 vendor_name[7]; + u8 res3[3]; + u8 device_name[0x14]; /* 0x28 */ + u8 res4[0x1e]; /* 0x3c */ + u8 cck_tx_power_index_A[3]; /* 0x5a */ + u8 cck_tx_power_index_B[3]; + u8 ht40_1s_tx_power_index_A[3]; /* 0x60 */ + u8 ht40_1s_tx_power_index_B[3]; + /* + * The following entries are half-bytes split as: + * bits 0-3: path A, bits 4-7: path B, all values 4 bits signed + */ + struct rtl8723au_idx ht40_2s_tx_power_index_diff[3]; + struct rtl8723au_idx ht20_tx_power_index_diff[3]; /* 0x69 */ + struct rtl8723au_idx ofdm_tx_power_index_diff[3]; + struct rtl8723au_idx ht40_max_power_offset[3]; /* 0x6f */ + struct rtl8723au_idx ht20_max_power_offset[3]; + u8 channel_plan; /* 0x75 */ + u8 tssi_a; + u8 tssi_b; + u8 thermal_meter; /* xtal_k */ /* 0x78 */ + u8 rf_regulatory; + u8 rf_option_2; + u8 rf_option_3; + u8 rf_option_4; + u8 res5[1]; /* 0x7d */ + u8 version; + u8 customer_id; +}; + +struct rtl8xxxu_reg8val { + u16 reg; + u8 val; +}; + +struct rtl8xxxu_reg32val { + u16 reg; + u32 val; +}; + +struct rtl8xxxu_rfregval { + u8 reg; + u32 val; +}; + +enum rtl8xxxu_rfpath { + RF_A = 0, + RF_B = 1, +}; + +struct rtl8xxxu_rfregs { + u16 hssiparm1; + u16 hssiparm2; + u16 lssiparm; + u16 hspiread; + u16 lssiread; + u16 rf_sw_ctrl; +}; + +#define H2C_MAX_MBOX 4 +#define H2C_EXT BIT(7) +#define H2C_SET_POWER_MODE 1 +#define H2C_JOIN_BSS_REPORT 2 +#define H2C_JOIN_BSS_DISCONNECT 0 +#define H2C_JOIN_BSS_CONNECT 1 +#define H2C_SET_RSSI 5 +#define H2C_SET_RATE_MASK (6 | H2C_EXT) + +struct h2c_cmd { + union { + struct { + u8 cmd; + u8 data[5]; + } __packed cmd; + struct { + __le32 data; + __le16 ext; + } __packed raw; + struct { + u8 cmd; + u8 data; + u8 pad[4]; + } __packed joinbss; + struct { + u8 cmd; + __le16 mask_hi; + u8 arg; + __le16 mask_lo; + } __packed ramask; + }; +}; + +struct rtl8xxxu_fileops; + +struct rtl8xxxu_priv { + struct ieee80211_hw *hw; + struct usb_device *udev; + struct rtl8xxxu_fileops *fops; + + spinlock_t tx_urb_lock; + struct list_head tx_urb_free_list; + int tx_urb_free_count; + bool tx_stopped; + + spinlock_t rx_urb_lock; + struct list_head rx_urb_pending_list; + int rx_urb_pending_count; + bool shutdown; + struct work_struct rx_urb_wq; + + u8 mac_addr[ETH_ALEN]; + char chip_name[8]; + u8 cck_tx_power_index_A[3]; /* 0x10 */ + u8 cck_tx_power_index_B[3]; + u8 ht40_1s_tx_power_index_A[3]; /* 0x16 */ + u8 ht40_1s_tx_power_index_B[3]; + /* + * The following entries are half-bytes split as: + * bits 0-3: path A, bits 4-7: path B, all values 4 bits signed + */ + struct rtl8723au_idx ht40_2s_tx_power_index_diff[3]; + struct rtl8723au_idx ht20_tx_power_index_diff[3]; + struct rtl8723au_idx ofdm_tx_power_index_diff[3]; + struct rtl8723au_idx ht40_max_power_offset[3]; + struct rtl8723au_idx ht20_max_power_offset[3]; + u32 chip_cut:4; + u32 rom_rev:4; + u32 has_wifi:1; + u32 has_bluetooth:1; + u32 enable_bluetooth:1; + u32 has_gps:1; + u32 hi_pa:1; + u32 vendor_umc:1; + u32 has_polarity_ctrl:1; + u32 has_eeprom:1; + u32 boot_eeprom:1; + u32 ep_tx_high_queue:1; + u32 ep_tx_normal_queue:1; + u32 ep_tx_low_queue:1; + u32 path_a_hi_power:1; + u32 path_a_rf_paths:4; + unsigned int pipe_interrupt; + unsigned int pipe_in; + unsigned int pipe_out[TXDESC_QUEUE_MAX]; + u8 out_ep[RTL8XXXU_OUT_ENDPOINTS]; + u8 path_a_ig_value; + u8 ep_tx_count; + u8 rf_paths; + u8 rx_paths; + u8 tx_paths; + u32 rf_mode_ag[2]; + u32 rege94; + u32 rege9c; + u32 regeb4; + u32 regebc; + int next_mbox; + int nr_out_eps; + + struct mutex h2c_mutex; + + struct usb_anchor rx_anchor; + struct usb_anchor tx_anchor; + struct usb_anchor int_anchor; + struct rtl8xxxu_firmware_header *fw_data; + size_t fw_size; + struct mutex usb_buf_mutex; + union { + __le32 val32; + __le16 val16; + u8 val8; + } usb_buf; + union { + u8 raw[EFUSE_MAP_LEN_8723A]; + struct rtl8723au_efuse efuse8723; + struct rtl8192cu_efuse efuse8192; + } efuse_wifi; + u32 adda_backup[RTL8XXXU_ADDA_REGS]; + u32 mac_backup[RTL8XXXU_MAC_REGS]; + u32 bb_backup[RTL8XXXU_BB_REGS]; + u32 bb_recovery_backup[RTL8XXXU_BB_REGS]; + u32 rtlchip; + u8 pi_enabled:1; + u8 iqk_initialized:1; + u8 int_buf[USB_INTR_CONTENT_LENGTH]; +}; + +struct rtl8xxxu_rx_urb { + struct urb urb; + struct ieee80211_hw *hw; + struct list_head list; +}; + +struct rtl8xxxu_tx_urb { + struct urb urb; + struct ieee80211_hw *hw; + struct list_head list; +}; + +struct rtl8xxxu_fileops { + int (*parse_efuse) (struct rtl8xxxu_priv *priv); + int (*load_firmware) (struct rtl8xxxu_priv *priv); + int (*power_on) (struct rtl8xxxu_priv *priv); + int writeN_block_size; +}; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h new file mode 100644 index 000000000000..23208f79b97c --- /dev/null +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -0,0 +1,981 @@ +/* + * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * Register definitions taken from original Realtek rtl8723au driver + */ + +/* 0x0000 ~ 0x00FF System Configuration */ +#define REG_SYS_ISO_CTRL 0x0000 +#define SYS_ISO_MD2PP BIT(0) +#define SYS_ISO_ANALOG_IPS BIT(5) +#define SYS_ISO_DIOR BIT(9) +#define SYS_ISO_PWC_EV25V BIT(14) +#define SYS_ISO_PWC_EV12V BIT(15) + +#define REG_SYS_FUNC 0x0002 +#define SYS_FUNC_BBRSTB BIT(0) +#define SYS_FUNC_BB_GLB_RSTN BIT(1) +#define SYS_FUNC_USBA BIT(2) +#define SYS_FUNC_UPLL BIT(3) +#define SYS_FUNC_USBD BIT(4) +#define SYS_FUNC_DIO_PCIE BIT(5) +#define SYS_FUNC_PCIEA BIT(6) +#define SYS_FUNC_PPLL BIT(7) +#define SYS_FUNC_PCIED BIT(8) +#define SYS_FUNC_DIOE BIT(9) +#define SYS_FUNC_CPU_ENABLE BIT(10) +#define SYS_FUNC_DCORE BIT(11) +#define SYS_FUNC_ELDR BIT(12) +#define SYS_FUNC_DIO_RF BIT(13) +#define SYS_FUNC_HWPDN BIT(14) +#define SYS_FUNC_MREGEN BIT(15) + +#define REG_APS_FSMCO 0x0004 +#define APS_FSMCO_PFM_ALDN BIT(1) +#define APS_FSMCO_PFM_WOWL BIT(3) +#define APS_FSMCO_ENABLE_POWERDOWN BIT(4) +#define APS_FSMCO_MAC_ENABLE BIT(8) +#define APS_FSMCO_MAC_OFF BIT(9) +#define APS_FSMCO_HW_SUSPEND BIT(11) +#define APS_FSMCO_PCIE BIT(12) +#define APS_FSMCO_HW_POWERDOWN BIT(15) +#define APS_FSMCO_WLON_RESET BIT(16) + +#define REG_SYS_CLKR 0x0008 +#define SYS_CLK_ANAD16V_ENABLE BIT(0) +#define SYS_CLK_ANA8M BIT(1) +#define SYS_CLK_MACSLP BIT(4) +#define SYS_CLK_LOADER_ENABLE BIT(5) +#define SYS_CLK_80M_SSC_DISABLE BIT(7) +#define SYS_CLK_80M_SSC_ENABLE_HO BIT(8) +#define SYS_CLK_PHY_SSC_RSTB BIT(9) +#define SYS_CLK_SEC_CLK_ENABLE BIT(10) +#define SYS_CLK_MAC_CLK_ENABLE BIT(11) +#define SYS_CLK_ENABLE BIT(12) +#define SYS_CLK_RING_CLK_ENABLE BIT(13) + +#define REG_9346CR 0x000a +#define EEPROM_BOOT BIT(4) +#define EEPROM_ENABLE BIT(5) + +#define REG_EE_VPD 0x000c +#define REG_AFE_MISC 0x0010 +#define REG_SPS0_CTRL 0x0011 +#define REG_SPS_OCP_CFG 0x0018 +#define REG_RSV_CTRL 0x001c + +#define REG_RF_CTRL 0x001f +#define RF_ENABLE BIT(0) +#define RF_RSTB BIT(1) +#define RF_SDMRSTB BIT(2) + +#define REG_LDOA15_CTRL 0x0020 +#define LDOA15_ENABLE BIT(0) +#define LDOA15_STANDBY BIT(1) +#define LDOA15_OBUF BIT(2) +#define LDOA15_REG_VOS BIT(3) +#define LDOA15_VOADJ_SHIFT 4 + +#define REG_LDOV12D_CTRL 0x0021 +#define LDOV12D_ENABLE BIT(0) +#define LDOV12D_STANDBY BIT(1) +#define LDOV12D_VADJ_SHIFT 4 + +#define REG_LDOHCI12_CTRL 0x0022 + +#define REG_LPLDO_CTRL 0x0023 +#define LPLDO_HSM BIT(2) +#define LPLDO_LSM_DIS BIT(3) + +#define REG_AFE_XTAL_CTRL 0x0024 +#define AFE_XTAL_ENABLE BIT(0) +#define AFE_XTAL_B_SELECT BIT(1) +#define AFE_XTAL_GATE_USB BIT(8) +#define AFE_XTAL_GATE_AFE BIT(11) +#define AFE_XTAL_RF_GATE BIT(14) +#define AFE_XTAL_GATE_DIG BIT(17) +#define AFE_XTAL_BT_GATE BIT(20) + +#define REG_AFE_PLL_CTRL 0x0028 +#define AFE_PLL_ENABLE BIT(0) +#define AFE_PLL_320_ENABLE BIT(1) +#define APE_PLL_FREF_SELECT BIT(2) +#define AFE_PLL_EDGE_SELECT BIT(3) +#define AFE_PLL_WDOGB BIT(4) +#define AFE_PLL_LPF_ENABLE BIT(5) + +#define REG_MAC_PHY_CTRL 0x002c + +#define REG_EFUSE_CTRL 0x0030 +#define REG_EFUSE_TEST 0x0034 +#define EFUSE_TRPT BIT(7) + /* 00: Wifi Efuse, 01: BT Efuse0, 10: BT Efuse1, 11: BT Efuse2 */ +#define EFUSE_CELL_SEL (BIT(8) | BIT(9)) +#define EFUSE_LDOE25_ENABLE BIT(31) +#define EFUSE_SELECT_MASK 0x0300 +#define EFUSE_WIFI_SELECT 0x0000 +#define EFUSE_BT0_SELECT 0x0100 +#define EFUSE_BT1_SELECT 0x0200 +#define EFUSE_BT2_SELECT 0x0300 + +#define EFUSE_ACCESS_ENABLE 0x69 /* RTL8723 only */ +#define EFUSE_ACCESS_DISABLE 0x00 /* RTL8723 only */ + +#define REG_PWR_DATA 0x0038 +#define REG_CAL_TIMER 0x003c +#define REG_ACLK_MON 0x003e +#define REG_GPIO_MUXCFG 0x0040 +#define REG_GPIO_IO_SEL 0x0042 +#define REG_MAC_PINMUX_CFG 0x0043 +#define REG_GPIO_PIN_CTRL 0x0044 +#define REG_GPIO_INTM 0x0048 +#define REG_LEDCFG0 0x004c +#define REG_LEDCFG1 0x004d +#define REG_LEDCFG2 0x004e +#define LEDCFG2_DPDT_SELECT BIT(7) +#define REG_LEDCFG3 0x004f +#define REG_LEDCFG REG_LEDCFG2 +#define REG_FSIMR 0x0050 +#define REG_FSISR 0x0054 +#define REG_HSIMR 0x0058 +#define REG_HSISR 0x005c +/* RTL8723 WIFI/BT/GPS Multi-Function GPIO Pin Control. */ +#define REG_GPIO_PIN_CTRL_2 0x0060 +/* RTL8723 WIFI/BT/GPS Multi-Function GPIO Select. */ +#define REG_GPIO_IO_SEL_2 0x0062 + +/* RTL8723 only WIFI/BT/GPS Multi-Function control source. */ +#define REG_MULTI_FUNC_CTRL 0x0068 + +#define MULTI_FN_WIFI_HW_PWRDOWN_EN BIT(0) /* Enable GPIO[9] as WiFi HW + powerdown source */ +#define MULTI_FN_WIFI_HW_PWRDOWN_SL BIT(1) /* WiFi HW powerdown polarity + control */ +#define MULTI_WIFI_FUNC_EN BIT(2) /* WiFi function enable */ + +#define MULTI_WIFI_HW_ROF_EN BIT(3) /* Enable GPIO[9] as WiFi RF HW + powerdown source */ +#define MULTI_BT_HW_PWRDOWN_EN BIT(16) /* Enable GPIO[11] as BT HW + powerdown source */ +#define MULTI_BT_HW_PWRDOWN_SL BIT(17) /* BT HW powerdown polarity + control */ +#define MULTI_BT_FUNC_EN BIT(18) /* BT function enable */ +#define MULTI_BT_HW_ROF_EN BIT(19) /* Enable GPIO[11] as BT/GPS + RF HW powerdown source */ +#define MULTI_GPS_HW_PWRDOWN_EN BIT(20) /* Enable GPIO[10] as GPS HW + powerdown source */ +#define MULTI_GPS_HW_PWRDOWN_SL BIT(21) /* GPS HW powerdown polarity + control */ +#define MULTI_GPS_FUNC_EN BIT(22) /* GPS function enable */ + +#define REG_MCU_FW_DL 0x0080 +#define MCU_FW_DL_ENABLE BIT(0) +#define MCU_FW_DL_READY BIT(1) +#define MCU_FW_DL_CSUM_REPORT BIT(2) +#define MCU_MAC_INIT_READY BIT(3) +#define MCU_BB_INIT_READY BIT(4) +#define MCU_RF_INIT_READY BIT(5) +#define MCU_WINT_INIT_READY BIT(6) +#define MCU_FW_RAM_SEL BIT(7) /* 1: RAM, 0:ROM */ +#define MCU_CP_RESET BIT(23) + +#define REG_HMBOX_EXT_0 0x0088 +#define REG_HMBOX_EXT_1 0x008a +#define REG_HMBOX_EXT_2 0x008c +#define REG_HMBOX_EXT_3 0x008e +/* Host suspend counter on FPGA platform */ +#define REG_HOST_SUSP_CNT 0x00bc +/* Efuse access protection for RTL8723 */ +#define REG_EFUSE_ACCESS 0x00cf +#define REG_BIST_SCAN 0x00d0 +#define REG_BIST_RPT 0x00d4 +#define REG_BIST_ROM_RPT 0x00d8 +#define REG_USB_SIE_INTF 0x00e0 +#define REG_PCIE_MIO_INTF 0x00e4 +#define REG_PCIE_MIO_INTD 0x00e8 +#define REG_HPON_FSM 0x00ec +#define HPON_FSM_BONDING_MASK (BIT(22) | BIT(23)) +#define HPON_FSM_BONDING_1T2R BIT(22) +#define REG_SYS_CFG 0x00f0 +#define SYS_CFG_XCLK_VLD BIT(0) +#define SYS_CFG_ACLK_VLD BIT(1) +#define SYS_CFG_UCLK_VLD BIT(2) +#define SYS_CFG_PCLK_VLD BIT(3) +#define SYS_CFG_PCIRSTB BIT(4) +#define SYS_CFG_V15_VLD BIT(5) +#define SYS_CFG_TRP_B15V_EN BIT(7) +#define SYS_CFG_SIC_IDLE BIT(8) +#define SYS_CFG_BD_MAC2 BIT(9) +#define SYS_CFG_BD_MAC1 BIT(10) +#define SYS_CFG_IC_MACPHY_MODE BIT(11) +#define SYS_CFG_CHIP_VER (BIT(12) | BIT(13) | BIT(14) | BIT(15)) +#define SYS_CFG_BT_FUNC BIT(16) +#define SYS_CFG_VENDOR_ID BIT(19) +#define SYS_CFG_PAD_HWPD_IDN BIT(22) +#define SYS_CFG_TRP_VAUX_EN BIT(23) +#define SYS_CFG_TRP_BT_EN BIT(24) +#define SYS_CFG_BD_PKG_SEL BIT(25) +#define SYS_CFG_BD_HCI_SEL BIT(26) +#define SYS_CFG_TYPE_ID BIT(27) +#define SYS_CFG_RTL_ID BIT(23) /* TestChip ID, + 1:Test(RLE); 0:MP(RL) */ +#define SYS_CFG_SPS_SEL BIT(24) /* 1:LDO regulator mode; + 0:Switching regulator mode*/ +#define SYS_CFG_CHIP_VERSION_MASK 0xf000 /* Bit 12 - 15 */ +#define SYS_CFG_CHIP_VERSION_SHIFT 12 + +#define REG_GPIO_OUTSTS 0x00f4 /* For RTL8723 only. */ +#define GPIO_EFS_HCI_SEL (BIT(0) | BIT(1)) +#define GPIO_PAD_HCI_SEL (BIT(2) | BIT(3)) +#define GPIO_HCI_SEL (BIT(4) | BIT(5)) +#define GPIO_PKG_SEL_HCI BIT(6) +#define GPIO_FEN_GPS BIT(7) +#define GPIO_FEN_BT BIT(8) +#define GPIO_FEN_WL BIT(9) +#define GPIO_FEN_PCI BIT(10) +#define GPIO_FEN_USB BIT(11) +#define GPIO_BTRF_HWPDN_N BIT(12) +#define GPIO_WLRF_HWPDN_N BIT(13) +#define GPIO_PDN_BT_N BIT(14) +#define GPIO_PDN_GPS_N BIT(15) +#define GPIO_BT_CTL_HWPDN BIT(16) +#define GPIO_GPS_CTL_HWPDN BIT(17) +#define GPIO_PPHY_SUSB BIT(20) +#define GPIO_UPHY_SUSB BIT(21) +#define GPIO_PCI_SUSEN BIT(22) +#define GPIO_USB_SUSEN BIT(23) +#define GPIO_RF_RL_ID (BIT(31) | BIT(30) | BIT(29) | BIT(28)) + +/* 0x0100 ~ 0x01FF MACTOP General Configuration */ +#define REG_CR 0x0100 +#define CR_HCI_TXDMA_ENABLE BIT(0) +#define CR_HCI_RXDMA_ENABLE BIT(1) +#define CR_TXDMA_ENABLE BIT(2) +#define CR_RXDMA_ENABLE BIT(3) +#define CR_PROTOCOL_ENABLE BIT(4) +#define CR_SCHEDULE_ENABLE BIT(5) +#define CR_MAC_TX_ENABLE BIT(6) +#define CR_MAC_RX_ENABLE BIT(7) +#define CR_SW_BEACON_ENABLE BIT(8) +#define CR_SECURITY_ENABLE BIT(9) +#define CR_CALTIMER_ENABLE BIT(10) + +/* Media Status Register */ +#define REG_MSR 0x0102 +#define MSR_LINKTYPE_MASK 0x3 +#define MSR_LINKTYPE_NONE 0x0 +#define MSR_LINKTYPE_ADHOC 0x1 +#define MSR_LINKTYPE_STATION 0x2 +#define MSR_LINKTYPE_AP 0x3 + +#define REG_PBP 0x0104 +#define PBP_PAGE_SIZE_RX_SHIFT 0 +#define PBP_PAGE_SIZE_TX_SHIFT 4 +#define PBP_PAGE_SIZE_64 0x0 +#define PBP_PAGE_SIZE_128 0x1 +#define PBP_PAGE_SIZE_256 0x2 +#define PBP_PAGE_SIZE_512 0x3 +#define PBP_PAGE_SIZE_1024 0x4 + +#define REG_TRXDMA_CTRL 0x010c +#define TRXDMA_CTRL_VOQ_SHIFT 4 +#define TRXDMA_CTRL_VIQ_SHIFT 6 +#define TRXDMA_CTRL_BEQ_SHIFT 8 +#define TRXDMA_CTRL_BKQ_SHIFT 10 +#define TRXDMA_CTRL_MGQ_SHIFT 12 +#define TRXDMA_CTRL_HIQ_SHIFT 14 +#define TRXDMA_QUEUE_LOW 1 +#define TRXDMA_QUEUE_NORMAL 2 +#define TRXDMA_QUEUE_HIGH 3 + +#define REG_TRXFF_BNDY 0x0114 +#define REG_TRXFF_STATUS 0x0118 +#define REG_RXFF_PTR 0x011c +#define REG_HIMR 0x0120 +#define REG_HISR 0x0124 +#define REG_HIMRE 0x0128 +#define REG_HISRE 0x012c +#define REG_CPWM 0x012f +#define REG_FWIMR 0x0130 +#define REG_FWISR 0x0134 +#define REG_PKTBUF_DBG_CTRL 0x0140 +#define REG_PKTBUF_DBG_DATA_L 0x0144 +#define REG_PKTBUF_DBG_DATA_H 0x0148 + +#define REG_TC0_CTRL 0x0150 +#define REG_TC1_CTRL 0x0154 +#define REG_TC2_CTRL 0x0158 +#define REG_TC3_CTRL 0x015c +#define REG_TC4_CTRL 0x0160 +#define REG_TCUNIT_BASE 0x0164 +#define REG_MBIST_START 0x0174 +#define REG_MBIST_DONE 0x0178 +#define REG_MBIST_FAIL 0x017c +#define REG_C2HEVT_MSG_NORMAL 0x01a0 +#define REG_C2HEVT_CLEAR 0x01af +#define REG_C2HEVT_MSG_TEST 0x01b8 +#define REG_MCUTST_1 0x01c0 +#define REG_FMTHR 0x01c8 +#define REG_HMTFR 0x01cc +#define REG_HMBOX_0 0x01d0 +#define REG_HMBOX_1 0x01d4 +#define REG_HMBOX_2 0x01d8 +#define REG_HMBOX_3 0x01dc + +#define REG_LLT_INIT 0x01e0 +#define LLT_OP_INACTIVE 0x0 +#define LLT_OP_WRITE (0x1 << 30) +#define LLT_OP_READ (0x2 << 30) +#define LLT_OP_MASK (0x3 << 30) + +#define REG_BB_ACCEESS_CTRL 0x01e8 +#define REG_BB_ACCESS_DATA 0x01ec + +/* 0x0200 ~ 0x027F TXDMA Configuration */ +#define REG_RQPN 0x0200 +#define RQPN_HI_PQ_SHIFT 0 +#define RQPN_LO_PQ_SHIFT 8 +#define RQPN_NORM_PQ_SHIFT 16 +#define RQPN_LOAD BIT(31) + +#define REG_FIFOPAGE 0x0204 +#define REG_TDECTRL 0x0208 +#define REG_TXDMA_OFFSET_CHK 0x020c +#define REG_TXDMA_STATUS 0x0210 +#define REG_RQPN_NPQ 0x0214 + +/* 0x0280 ~ 0x02FF RXDMA Configuration */ +#define REG_RXDMA_AGG_PG_TH 0x0280 +#define REG_RXPKT_NUM 0x0284 +#define REG_RXDMA_STATUS 0x0288 + +#define REG_RF_BB_CMD_ADDR 0x02c0 +#define REG_RF_BB_CMD_DATA 0x02c4 + +/* spec version 11 */ +/* 0x0400 ~ 0x047F Protocol Configuration */ +#define REG_VOQ_INFORMATION 0x0400 +#define REG_VIQ_INFORMATION 0x0404 +#define REG_BEQ_INFORMATION 0x0408 +#define REG_BKQ_INFORMATION 0x040c +#define REG_MGQ_INFORMATION 0x0410 +#define REG_HGQ_INFORMATION 0x0414 +#define REG_BCNQ_INFORMATION 0x0418 + +#define REG_CPU_MGQ_INFORMATION 0x041c +#define REG_FWHW_TXQ_CTRL 0x0420 +#define FWHW_TXQ_CTRL_AMPDU_RETRY BIT(7) +#define FWHW_TXQ_CTRL_XMIT_MGMT_ACK BIT(12) + +#define REG_HWSEQ_CTRL 0x0423 +#define REG_TXPKTBUF_BCNQ_BDNY 0x0424 +#define REG_TXPKTBUF_MGQ_BDNY 0x0425 +#define REG_LIFETIME_EN 0x0426 +#define REG_MULTI_BCNQ_OFFSET 0x0427 + +#define REG_SPEC_SIFS 0x0428 +#define SPEC_SIFS_CCK_MASK 0x00ff +#define SPEC_SIFS_CCK_SHIFT 0 +#define SPEC_SIFS_OFDM_MASK 0xff00 +#define SPEC_SIFS_OFDM_SHIFT 8 + +#define REG_RETRY_LIMIT 0x042a +#define RETRY_LIMIT_LONG_SHIFT 0 +#define RETRY_LIMIT_LONG_MASK 0x003f +#define RETRY_LIMIT_SHORT_SHIFT 8 +#define RETRY_LIMIT_SHORT_MASK 0x3f00 + +#define REG_DARFRC 0x0430 +#define REG_RARFRC 0x0438 +#define REG_RESPONSE_RATE_SET 0x0440 +#define RESPONSE_RATE_BITMAP_ALL 0xfffff +#define RESPONSE_RATE_RRSR_CCK_ONLY_1M 0xffff1 +#define RSR_1M BIT(0) +#define RSR_2M BIT(1) +#define RSR_5_5M BIT(2) +#define RSR_11M BIT(3) +#define RSR_6M BIT(4) +#define RSR_9M BIT(5) +#define RSR_12M BIT(6) +#define RSR_18M BIT(7) +#define RSR_24M BIT(8) +#define RSR_36M BIT(9) +#define RSR_48M BIT(10) +#define RSR_54M BIT(11) +#define RSR_MCS0 BIT(12) +#define RSR_MCS1 BIT(13) +#define RSR_MCS2 BIT(14) +#define RSR_MCS3 BIT(15) +#define RSR_MCS4 BIT(16) +#define RSR_MCS5 BIT(17) +#define RSR_MCS6 BIT(18) +#define RSR_MCS7 BIT(19) +#define RSR_RSC_LOWER_SUB_CHANNEL BIT(21) /* 0x200000 */ +#define RSR_RSC_UPPER_SUB_CHANNEL BIT(22) /* 0x400000 */ +#define RSR_RSC_BANDWIDTH_40M (RSR_RSC_UPPER_SUB_CHANNEL | \ + RSR_RSC_LOWER_SUB_CHANNEL) +#define RSR_ACK_SHORT_PREAMBLE BIT(23) + +#define REG_ARFR0 0x0444 +#define REG_ARFR1 0x0448 +#define REG_ARFR2 0x044c +#define REG_ARFR3 0x0450 +#define REG_AGGLEN_LMT 0x0458 +#define REG_AMPDU_MIN_SPACE 0x045c +#define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045d +#define REG_FAST_EDCA_CTRL 0x0460 +#define REG_RD_RESP_PKT_TH 0x0463 +#define REG_INIRTS_RATE_SEL 0x0480 +#define REG_INIDATA_RATE_SEL 0x0484 + +#define REG_POWER_STATUS 0x04a4 +#define REG_POWER_STAGE1 0x04b4 +#define REG_POWER_STAGE2 0x04b8 +#define REG_PKT_VO_VI_LIFE_TIME 0x04c0 +#define REG_PKT_BE_BK_LIFE_TIME 0x04c2 +#define REG_STBC_SETTING 0x04c4 +#define REG_PROT_MODE_CTRL 0x04c8 +#define REG_MAX_AGGR_NUM 0x04ca +#define REG_RTS_MAX_AGGR_NUM 0x04cb +#define REG_BAR_MODE_CTRL 0x04cc +#define REG_RA_TRY_RATE_AGG_LMT 0x04cf +#define REG_NQOS_SEQ 0x04dc +#define REG_QOS_SEQ 0x04de +#define REG_NEED_CPU_HANDLE 0x04e0 +#define REG_PKT_LOSE_RPT 0x04e1 +#define REG_PTCL_ERR_STATUS 0x04e2 +#define REG_DUMMY 0x04fc + +/* 0x0500 ~ 0x05FF EDCA Configuration */ +#define REG_EDCA_VO_PARAM 0x0500 +#define REG_EDCA_VI_PARAM 0x0504 +#define REG_EDCA_BE_PARAM 0x0508 +#define REG_EDCA_BK_PARAM 0x050c +#define EDCA_PARAM_ECW_MIN_SHIFT 8 +#define EDCA_PARAM_ECW_MAX_SHIFT 12 +#define EDCA_PARAM_TXOP_SHIFT 16 +#define REG_BEACON_TCFG 0x0510 +#define REG_PIFS 0x0512 +#define REG_RDG_PIFS 0x0513 +#define REG_SIFS_CCK 0x0514 +#define REG_SIFS_OFDM 0x0516 +#define REG_TSFTR_SYN_OFFSET 0x0518 +#define REG_AGGR_BREAK_TIME 0x051a +#define REG_SLOT 0x051b +#define REG_TX_PTCL_CTRL 0x0520 +#define REG_TXPAUSE 0x0522 +#define REG_DIS_TXREQ_CLR 0x0523 +#define REG_RD_CTRL 0x0524 +#define REG_TBTT_PROHIBIT 0x0540 +#define REG_RD_NAV_NXT 0x0544 +#define REG_NAV_PROT_LEN 0x0546 + +#define REG_BEACON_CTRL 0x0550 +#define REG_BEACON_CTRL_1 0x0551 +#define BEACON_ATIM BIT(0) +#define BEACON_CTRL_MBSSID BIT(1) +#define BEACON_CTRL_TX_BEACON_RPT BIT(2) +#define BEACON_FUNCTION_ENABLE BIT(3) +#define BEACON_DISABLE_TSF_UPDATE BIT(4) + +#define REG_MBID_NUM 0x0552 +#define REG_DUAL_TSF_RST 0x0553 +#define DUAL_TSF_RESET_TSF0 BIT(0) +#define DUAL_TSF_RESET_TSF1 BIT(1) +#define DUAL_TSF_RESET_P2P BIT(4) +#define DUAL_TSF_TX_OK BIT(5) + +/* The same as REG_MBSSID_BCN_SPACE */ +#define REG_BCN_INTERVAL 0x0554 +#define REG_MBSSID_BCN_SPACE 0x0554 + +#define REG_DRIVER_EARLY_INT 0x0558 +#define DRIVER_EARLY_INT_TIME 5 + +#define REG_BEACON_DMA_TIME 0x0559 +#define BEACON_DMA_ATIME_INT_TIME 2 + +#define REG_ATIMWND 0x055a +#define REG_BCN_MAX_ERR 0x055d +#define REG_RXTSF_OFFSET_CCK 0x055e +#define REG_RXTSF_OFFSET_OFDM 0x055f +#define REG_TSFTR 0x0560 +#define REG_TSFTR1 0x0568 +#define REG_INIT_TSFTR 0x0564 +#define REG_ATIMWND_1 0x0570 +#define REG_PSTIMER 0x0580 +#define REG_TIMER0 0x0584 +#define REG_TIMER1 0x0588 +#define REG_ACM_HW_CTRL 0x05c0 +#define ACM_HW_CTRL_BK BIT(0) +#define ACM_HW_CTRL_BE BIT(1) +#define ACM_HW_CTRL_VI BIT(2) +#define ACM_HW_CTRL_VO BIT(3) +#define REG_ACM_RST_CTRL 0x05c1 +#define REG_ACMAVG 0x05c2 +#define REG_VO_ADMTIME 0x05c4 +#define REG_VI_ADMTIME 0x05c6 +#define REG_BE_ADMTIME 0x05c8 +#define REG_EDCA_RANDOM_GEN 0x05cc +#define REG_SCH_TXCMD 0x05d0 + +/* define REG_FW_TSF_SYNC_CNT 0x04a0 */ +#define REG_FW_RESET_TSF_CNT_1 0x05fc +#define REG_FW_RESET_TSF_CNT_0 0x05fd +#define REG_FW_BCN_DIS_CNT 0x05fe + +/* 0x0600 ~ 0x07FF WMAC Configuration */ +#define REG_APSD_CTRL 0x0600 +#define APSD_CTRL_OFF BIT(6) +#define APSD_CTRL_OFF_STATUS BIT(7) +#define REG_BW_OPMODE 0x0603 +#define BW_OPMODE_20MHZ BIT(2) +#define BW_OPMODE_5G BIT(1) +#define BW_OPMODE_11J BIT(0) + +#define REG_TCR 0x0604 + +/* Receive Configuration Register */ +#define REG_RCR 0x0608 +#define RCR_ACCEPT_AP BIT(0) /* Accept all unicast packet */ +#define RCR_ACCEPT_PHYS_MATCH BIT(1) /* Accept phys match packet */ +#define RCR_ACCEPT_MCAST BIT(2) +#define RCR_ACCEPT_BCAST BIT(3) +#define RCR_ACCEPT_ADDR3 BIT(4) /* Accept address 3 match + packet */ +#define RCR_ACCEPT_PM BIT(5) /* Accept power management + packet */ +#define RCR_CHECK_BSSID_MATCH BIT(6) /* Accept BSSID match packet */ +#define RCR_CHECK_BSSID_BEACON BIT(7) /* Accept BSSID match packet + (Rx beacon, probe rsp) */ +#define RCR_ACCEPT_CRC32 BIT(8) /* Accept CRC32 error packet */ +#define RCR_ACCEPT_ICV BIT(9) /* Accept ICV error packet */ +#define RCR_ACCEPT_DATA_FRAME BIT(11) +#define RCR_ACCEPT_CTRL_FRAME BIT(12) +#define RCR_ACCEPT_MGMT_FRAME BIT(13) +#define RCR_HTC_LOC_CTRL BIT(14) /* MFC<--HTC=1 MFC-->HTC=0 */ +#define RCR_MFBEN BIT(22) +#define RCR_LSIGEN BIT(23) +#define RCR_MULTI_BSSID_ENABLE BIT(24) /* Enable Multiple BssId */ +#define RCR_ACCEPT_BA_SSN BIT(27) /* Accept BA SSN */ +#define RCR_APPEND_PHYSTAT BIT(28) +#define RCR_APPEND_ICV BIT(29) +#define RCR_APPEND_MIC BIT(30) +#define RCR_APPEND_FCS BIT(31) /* WMAC append FCS after */ + +#define REG_RX_PKT_LIMIT 0x060c +#define REG_RX_DLK_TIME 0x060d +#define REG_RX_DRVINFO_SZ 0x060f + +#define REG_MACID 0x0610 +#define REG_BSSID 0x0618 +#define REG_MAR 0x0620 +#define REG_MBIDCAMCFG 0x0628 + +#define REG_USTIME_EDCA 0x0638 +#define REG_MAC_SPEC_SIFS 0x063a + +/* 20100719 Joseph: Hardware register definition change. (HW datasheet v54) */ + /* [15:8]SIFS_R2T_OFDM, [7:0]SIFS_R2T_CCK */ +#define REG_R2T_SIFS 0x063c + /* [15:8]SIFS_T2T_OFDM, [7:0]SIFS_T2T_CCK */ +#define REG_T2T_SIFS 0x063e +#define REG_ACKTO 0x0640 +#define REG_CTS2TO 0x0641 +#define REG_EIFS 0x0642 + +/* WMA, BA, CCX */ +#define REG_NAV_CTRL 0x0650 +/* In units of 128us */ +#define REG_NAV_UPPER 0x0652 +#define NAV_UPPER_UNIT 128 + +#define REG_BACAMCMD 0x0654 +#define REG_BACAMCONTENT 0x0658 +#define REG_LBDLY 0x0660 +#define REG_FWDLY 0x0661 +#define REG_RXERR_RPT 0x0664 +#define REG_WMAC_TRXPTCL_CTL 0x0668 + +/* Security */ +#define REG_CAM_CMD 0x0670 +#define CAM_CMD_POLLING BIT(31) +#define CAM_CMD_WRITE BIT(16) +#define CAM_CMD_KEY_SHIFT 3 +#define REG_CAM_WRITE 0x0674 +#define CAM_WRITE_VALID BIT(15) +#define REG_CAM_READ 0x0678 +#define REG_CAM_DEBUG 0x067c +#define REG_SECURITY_CFG 0x0680 +#define SEC_CFG_TX_USE_DEFKEY BIT(0) +#define SEC_CFG_RX_USE_DEFKEY BIT(1) +#define SEC_CFG_TX_SEC_ENABLE BIT(2) +#define SEC_CFG_RX_SEC_ENABLE BIT(3) +#define SEC_CFG_SKBYA2 BIT(4) +#define SEC_CFG_NO_SKMC BIT(5) +#define SEC_CFG_TXBC_USE_DEFKEY BIT(6) +#define SEC_CFG_RXBC_USE_DEFKEY BIT(7) + +/* Power */ +#define REG_WOW_CTRL 0x0690 +#define REG_PSSTATUS 0x0691 +#define REG_PS_RX_INFO 0x0692 +#define REG_LPNAV_CTRL 0x0694 +#define REG_WKFMCAM_CMD 0x0698 +#define REG_WKFMCAM_RWD 0x069c +#define REG_RXFLTMAP0 0x06a0 +#define REG_RXFLTMAP1 0x06a2 +#define REG_RXFLTMAP2 0x06a4 +#define REG_BCN_PSR_RPT 0x06a8 +#define REG_CALB32K_CTRL 0x06ac +#define REG_PKT_MON_CTRL 0x06b4 +#define REG_BT_COEX_TABLE 0x06c0 +#define REG_WMAC_RESP_TXINFO 0x06d8 + +#define REG_MACID1 0x0700 +#define REG_BSSID1 0x0708 + +#define REG_FPGA0_RF_MODE 0x0800 +#define FPGA_RF_MODE BIT(0) +#define FPGA_RF_MODE_JAPAN BIT(1) +#define FPGA_RF_MODE_CCK BIT(24) +#define FPGA_RF_MODE_OFDM BIT(25) + +#define REG_FPGA0_TX_INFO 0x0804 +#define REG_FPGA0_PSD_FUNC 0x0808 +#define REG_FPGA0_TX_GAIN 0x080c +#define REG_FPGA0_RF_TIMING1 0x0810 +#define REG_FPGA0_RF_TIMING2 0x0814 +#define REG_FPGA0_POWER_SAVE 0x0818 +#define FPGA0_PS_LOWER_CHANNEL BIT(26) +#define FPGA0_PS_UPPER_CHANNEL BIT(27) + +#define REG_FPGA0_XA_HSSI_PARM1 0x0820 /* RF 3 wire register */ +#define FPGA0_HSSI_PARM1_PI BIT(8) +#define REG_FPGA0_XA_HSSI_PARM2 0x0824 +#define REG_FPGA0_XB_HSSI_PARM1 0x0828 +#define REG_FPGA0_XB_HSSI_PARM2 0x082c +#define FPGA0_HSSI_3WIRE_DATA_LEN 0x800 +#define FPGA0_HSSI_3WIRE_ADDR_LEN 0x400 +#define FPGA0_HSSI_PARM2_ADDR_SHIFT 23 +#define FPGA0_HSSI_PARM2_ADDR_MASK 0x7f800000 /* 0xff << 23 */ +#define FPGA0_HSSI_PARM2_CCK_HIGH_PWR BIT(9) +#define FPGA0_HSSI_PARM2_EDGE_READ BIT(31) + +#define REG_TX_AGC_B_RATE18_06 0x0830 +#define REG_TX_AGC_B_RATE54_24 0x0834 +#define REG_TX_AGC_B_CCK1_55_MCS32 0x0838 +#define REG_TX_AGC_B_MCS03_MCS00 0x083c + +#define REG_FPGA0_XA_LSSI_PARM 0x0840 +#define REG_FPGA0_XB_LSSI_PARM 0x0844 +#define FPGA0_LSSI_PARM_ADDR_SHIFT 20 +#define FPGA0_LSSI_PARM_ADDR_MASK 0x0ff00000 +#define FPGA0_LSSI_PARM_DATA_MASK 0x000fffff + +#define REG_TX_AGC_B_MCS07_MCS04 0x0848 +#define REG_TX_AGC_B_MCS11_MCS08 0x084c + +#define REG_FPGA0_XCD_SWITCH_CTRL 0x085c + +#define REG_FPGA0_XA_RF_INT_OE 0x0860 /* RF Channel switch */ +#define REG_FPGA0_XB_RF_INT_OE 0x0864 +#define FPGA0_INT_OE_ANTENNA_AB_OPEN 0x000 +#define FPGA0_INT_OE_ANTENNA_A BIT(8) +#define FPGA0_INT_OE_ANTENNA_B BIT(9) +#define FPGA0_INT_OE_ANTENNA_MASK (FPGA0_INT_OE_ANTENNA_A | \ + FPGA0_INT_OE_ANTENNA_B) + +#define REG_TX_AGC_B_MCS15_MCS12 0x0868 +#define REG_TX_AGC_B_CCK11_A_CCK2_11 0x086c + +#define REG_FPGA0_XAB_RF_SW_CTRL 0x0870 +#define REG_FPGA0_XA_RF_SW_CTRL 0x0870 /* 16 bit */ +#define REG_FPGA0_XB_RF_SW_CTRL 0x0872 /* 16 bit */ +#define REG_FPGA0_XCD_RF_SW_CTRL 0x0874 +#define REG_FPGA0_XC_RF_SW_CTRL 0x0874 /* 16 bit */ +#define REG_FPGA0_XD_RF_SW_CTRL 0x0876 /* 16 bit */ +#define FPGA0_RF_3WIRE_DATA BIT(0) +#define FPGA0_RF_3WIRE_CLOC BIT(1) +#define FPGA0_RF_3WIRE_LOAD BIT(2) +#define FPGA0_RF_3WIRE_RW BIT(3) +#define FPGA0_RF_3WIRE_MASK 0xf +#define FPGA0_RF_RFENV BIT(4) +#define FPGA0_RF_TRSW BIT(5) /* Useless now */ +#define FPGA0_RF_TRSWB BIT(6) +#define FPGA0_RF_ANTSW BIT(8) +#define FPGA0_RF_ANTSWB BIT(9) +#define FPGA0_RF_PAPE BIT(10) +#define FPGA0_RF_PAPE5G BIT(11) +#define FPGA0_RF_BD_CTRL_SHIFT 16 + +#define REG_FPGA0_XAB_RF_PARM 0x0878 /* Antenna select path in ODM */ +#define REG_FPGA0_XA_RF_PARM 0x0878 /* 16 bit */ +#define REG_FPGA0_XB_RF_PARM 0x087a /* 16 bit */ +#define REG_FPGA0_XCD_RF_PARM 0x087c +#define REG_FPGA0_XC_RF_PARM 0x087c /* 16 bit */ +#define REG_FPGA0_XD_RF_PARM 0x087e /* 16 bit */ +#define FPGA0_RF_PARM_RFA_ENABLE BIT(1) +#define FPGA0_RF_PARM_RFB_ENABLE BIT(17) +#define FPGA0_RF_PARM_CLK_GATE BIT(31) + +#define REG_FPGA0_ANALOG1 0x0880 +#define REG_FPGA0_ANALOG2 0x0884 +#define FPGA0_ANALOG2_20MHZ BIT(10) +#define REG_FPGA0_ANALOG3 0x0888 +#define REG_FPGA0_ANALOG4 0x088c + +#define REG_FPGA0_XA_LSSI_READBACK 0x08a0 /* Tranceiver LSSI Readback */ +#define REG_FPGA0_XB_LSSI_READBACK 0x08a4 +#define REG_HSPI_XA_READBACK 0x08b8 /* Transceiver A HSPI read */ +#define REG_HSPI_XB_READBACK 0x08bc /* Transceiver B HSPI read */ + +#define REG_FPGA1_RF_MODE 0x0900 + +#define REG_FPGA1_TX_INFO 0x090c + +#define REG_CCK0_SYSTEM 0x0a00 +#define CCK0_SIDEBAND BIT(4) + +#define REG_CCK0_AFE_SETTING 0x0a04 + +#define REG_CONFIG_ANT_A 0x0b68 +#define REG_CONFIG_ANT_B 0x0b6c + +#define REG_OFDM0_TRX_PATH_ENABLE 0x0c04 +#define OFDM_RF_PATH_RX_MASK 0x0f +#define OFDM_RF_PATH_RX_A BIT(0) +#define OFDM_RF_PATH_RX_B BIT(1) +#define OFDM_RF_PATH_RX_C BIT(2) +#define OFDM_RF_PATH_RX_D BIT(3) +#define OFDM_RF_PATH_TX_MASK 0xf0 +#define OFDM_RF_PATH_TX_A BIT(4) +#define OFDM_RF_PATH_TX_B BIT(5) +#define OFDM_RF_PATH_TX_C BIT(6) +#define OFDM_RF_PATH_TX_D BIT(7) + +#define REG_OFDM0_TR_MUX_PAR 0x0c08 + +#define REG_OFDM0_XA_RX_IQ_IMBALANCE 0x0c14 +#define REG_OFDM0_XB_RX_IQ_IMBALANCE 0x0c1c + +#define REG_OFDM0_ENERGY_CCA_THRES 0x0c4c + +#define REG_OFDM0_XA_AGC_CORE1 0x0c50 +#define REG_OFDM0_XA_AGC_CORE2 0x0c54 +#define REG_OFDM0_XB_AGC_CORE1 0x0c58 +#define REG_OFDM0_XB_AGC_CORE2 0x0c5c +#define REG_OFDM0_XC_AGC_CORE1 0x0c60 +#define REG_OFDM0_XC_AGC_CORE2 0x0c64 +#define REG_OFDM0_XD_AGC_CORE1 0x0c68 +#define REG_OFDM0_XD_AGC_CORE2 0x0c6c +#define OFDM0_X_AGC_CORE1_IGI_MASK 0x0000007F + +#define REG_OFDM0_AGC_PARM1 0x0c70 + +#define REG_OFDM0_AGCR_SSI_TABLE 0x0c78 + +#define REG_OFDM0_XA_TX_IQ_IMBALANCE 0x0c80 +#define REG_OFDM0_XB_TX_IQ_IMBALANCE 0x0c88 +#define REG_OFDM0_XC_TX_IQ_IMBALANCE 0x0c90 +#define REG_OFDM0_XD_TX_IQ_IMBALANCE 0x0c98 + +#define REG_OFDM0_XC_TX_AFE 0x0c94 +#define REG_OFDM0_XD_TX_AFE 0x0c9c + +#define REG_OFDM0_RX_IQ_EXT_ANTA 0x0ca0 + +#define REG_OFDM1_LSTF 0x0d00 +#define OFDM_LSTF_PRIME_CH_LOW BIT(10) +#define OFDM_LSTF_PRIME_CH_HIGH BIT(11) +#define OFDM_LSTF_PRIME_CH_MASK (OFDM_LSTF_PRIME_CH_LOW | \ + OFDM_LSTF_PRIME_CH_HIGH) +#define OFDM_LSTF_CONTINUE_TX BIT(28) +#define OFDM_LSTF_SINGLE_CARRIER BIT(29) +#define OFDM_LSTF_SINGLE_TONE BIT(30) +#define OFDM_LSTF_MASK 0x70000000 + +#define REG_OFDM1_TRX_PATH_ENABLE 0x0d04 + +#define REG_TX_AGC_A_RATE18_06 0x0e00 +#define REG_TX_AGC_A_RATE54_24 0x0e04 +#define REG_TX_AGC_A_CCK1_MCS32 0x0e08 +#define REG_TX_AGC_A_MCS03_MCS00 0x0e10 +#define REG_TX_AGC_A_MCS07_MCS04 0x0e14 +#define REG_TX_AGC_A_MCS11_MCS08 0x0e18 +#define REG_TX_AGC_A_MCS15_MCS12 0x0e1c + +#define REG_FPGA0_IQK 0x0e28 + +#define REG_TX_IQK_TONE_A 0x0e30 +#define REG_RX_IQK_TONE_A 0x0e34 +#define REG_TX_IQK_PI_A 0x0e38 +#define REG_RX_IQK_PI_A 0x0e3c + +#define REG_TX_IQK 0x0e40 +#define REG_RX_IQK 0x0e44 +#define REG_IQK_AGC_PTS 0x0e48 +#define REG_IQK_AGC_RSP 0x0e4c +#define REG_TX_IQK_TONE_B 0x0e50 +#define REG_RX_IQK_TONE_B 0x0e54 +#define REG_TX_IQK_PI_B 0x0e58 +#define REG_RX_IQK_PI_B 0x0e5c +#define REG_IQK_AGC_CONT 0x0e60 + +#define REG_BLUETOOTH 0x0e6c +#define REG_RX_WAIT_CCA 0x0e70 +#define REG_TX_CCK_RFON 0x0e74 +#define REG_TX_CCK_BBON 0x0e78 +#define REG_TX_OFDM_RFON 0x0e7c +#define REG_TX_OFDM_BBON 0x0e80 +#define REG_TX_TO_RX 0x0e84 +#define REG_TX_TO_TX 0x0e88 +#define REG_RX_CCK 0x0e8c + +#define REG_TX_POWER_BEFORE_IQK_A 0x0e94 +#define REG_TX_POWER_AFTER_IQK_A 0x0e9c + +#define REG_RX_POWER_BEFORE_IQK_A 0x0ea0 +#define REG_RX_POWER_BEFORE_IQK_A_2 0x0ea4 +#define REG_RX_POWER_AFTER_IQK_A 0x0ea8 +#define REG_RX_POWER_AFTER_IQK_A_2 0x0eac + +#define REG_TX_POWER_BEFORE_IQK_B 0x0eb4 +#define REG_TX_POWER_AFTER_IQK_B 0x0ebc + +#define REG_RX_POWER_BEFORE_IQK_B 0x0ec0 +#define REG_RX_POWER_BEFORE_IQK_B_2 0x0ec4 +#define REG_RX_POWER_AFTER_IQK_B 0x0ec8 +#define REG_RX_POWER_AFTER_IQK_B_2 0x0ecc + +#define REG_RX_OFDM 0x0ed0 +#define REG_RX_WAIT_RIFS 0x0ed4 +#define REG_RX_TO_RX 0x0ed8 +#define REG_STANDBY 0x0edc +#define REG_SLEEP 0x0ee0 +#define REG_PMPD_ANAEN 0x0eec + +#define REG_FW_START_ADDRESS 0x1000 + +#define REG_USB_INFO 0xfe17 +#define REG_USB_HIMR 0xfe38 +#define USB_HIMR_TIMEOUT2 BIT(31) +#define USB_HIMR_TIMEOUT1 BIT(30) +#define USB_HIMR_PSTIMEOUT BIT(29) +#define USB_HIMR_GTINT4 BIT(28) +#define USB_HIMR_GTINT3 BIT(27) +#define USB_HIMR_TXBCNERR BIT(26) +#define USB_HIMR_TXBCNOK BIT(25) +#define USB_HIMR_TSF_BIT32_TOGGLE BIT(24) +#define USB_HIMR_BCNDMAINT3 BIT(23) +#define USB_HIMR_BCNDMAINT2 BIT(22) +#define USB_HIMR_BCNDMAINT1 BIT(21) +#define USB_HIMR_BCNDMAINT0 BIT(20) +#define USB_HIMR_BCNDOK3 BIT(19) +#define USB_HIMR_BCNDOK2 BIT(18) +#define USB_HIMR_BCNDOK1 BIT(17) +#define USB_HIMR_BCNDOK0 BIT(16) +#define USB_HIMR_HSISR_IND BIT(15) +#define USB_HIMR_BCNDMAINT_E BIT(14) +/* RSVD BIT(13) */ +#define USB_HIMR_CTW_END BIT(12) +/* RSVD BIT(11) */ +#define USB_HIMR_C2HCMD BIT(10) +#define USB_HIMR_CPWM2 BIT(9) +#define USB_HIMR_CPWM BIT(8) +#define USB_HIMR_HIGHDOK BIT(7) /* High Queue DMA OK + Interrupt */ +#define USB_HIMR_MGNTDOK BIT(6) /* Management Queue DMA OK + Interrupt */ +#define USB_HIMR_BKDOK BIT(5) /* AC_BK DMA OK Interrupt */ +#define USB_HIMR_BEDOK BIT(4) /* AC_BE DMA OK Interrupt */ +#define USB_HIMR_VIDOK BIT(3) /* AC_VI DMA OK Interrupt */ +#define USB_HIMR_VODOK BIT(2) /* AC_VO DMA Interrupt */ +#define USB_HIMR_RDU BIT(1) /* Receive Descriptor + Unavailable */ +#define USB_HIMR_ROK BIT(0) /* Receive DMA OK Interrupt */ + +#define REG_USB_SPECIAL_OPTION 0xfe55 +#define REG_USB_DMA_AGG_TO 0xfe5b +#define REG_USB_AGG_TO 0xfe5c +#define REG_USB_AGG_TH 0xfe5d + +#define REG_NORMAL_SIE_VID 0xfe60 /* 0xfe60 - 0xfe61 */ +#define REG_NORMAL_SIE_PID 0xfe62 /* 0xfe62 - 0xfe63 */ +#define REG_NORMAL_SIE_OPTIONAL 0xfe64 +#define REG_NORMAL_SIE_EP 0xfe65 /* 0xfe65 - 0xfe67 */ +#define REG_NORMAL_SIE_EP_TX 0xfe66 +#define NORMAL_SIE_EP_TX_HIGH_MASK 0x000f +#define NORMAL_SIE_EP_TX_NORMAL_MASK 0x00f0 +#define NORMAL_SIE_EP_TX_LOW_MASK 0x0f00 + +#define REG_NORMAL_SIE_PHY 0xfe68 /* 0xfe68 - 0xfe6b */ +#define REG_NORMAL_SIE_OPTIONAL2 0xfe6c +#define REG_NORMAL_SIE_GPS_EP 0xfe6d /* RTL8723 only */ +#define REG_NORMAL_SIE_MAC_ADDR 0xfe70 /* 0xfe70 - 0xfe75 */ +#define REG_NORMAL_SIE_STRING 0xfe80 /* 0xfe80 - 0xfedf */ + +/* RF6052 registers */ +#define RF6052_REG_AC 0x00 +#define RF6052_REG_IQADJ_G1 0x01 +#define RF6052_REG_IQADJ_G2 0x02 +#define RF6052_REG_BS_PA_APSET_G1_G4 0x03 +#define RF6052_REG_BS_PA_APSET_G5_G8 0x04 +#define RF6052_REG_POW_TRSW 0x05 +#define RF6052_REG_GAIN_RX 0x06 +#define RF6052_REG_GAIN_TX 0x07 +#define RF6052_REG_TXM_IDAC 0x08 +#define RF6052_REG_IPA_G 0x09 +#define RF6052_REG_TXBIAS_G 0x0a +#define RF6052_REG_TXPA_AG 0x0b +#define RF6052_REG_IPA_A 0x0c +#define RF6052_REG_TXBIAS_A 0x0d +#define RF6052_REG_BS_PA_APSET_G9_G11 0x0e +#define RF6052_REG_BS_IQGEN 0x0f +#define RF6052_REG_MODE1 0x10 +#define RF6052_REG_MODE2 0x11 +#define RF6052_REG_RX_AGC_HP 0x12 +#define RF6052_REG_TX_AGC 0x13 +#define RF6052_REG_BIAS 0x14 +#define RF6052_REG_IPA 0x15 +#define RF6052_REG_TXBIAS 0x16 +#define RF6052_REG_POW_ABILITY 0x17 +#define RF6052_REG_MODE_AG 0x18 /* RF channel and BW switch */ +#define MODE_AG_CHANNEL_MASK 0x3ff +#define MODE_AG_CHANNEL_20MHZ BIT(10) + +#define RF6052_REG_TOP 0x19 +#define RF6052_REG_RX_G1 0x1a +#define RF6052_REG_RX_G2 0x1b +#define RF6052_REG_RX_BB2 0x1c +#define RF6052_REG_RX_BB1 0x1d +#define RF6052_REG_RCK1 0x1e +#define RF6052_REG_RCK2 0x1f +#define RF6052_REG_TX_G1 0x20 +#define RF6052_REG_TX_G2 0x21 +#define RF6052_REG_TX_G3 0x22 +#define RF6052_REG_TX_BB1 0x23 +#define RF6052_REG_T_METER 0x24 +#define RF6052_REG_SYN_G1 0x25 /* RF TX Power control */ +#define RF6052_REG_SYN_G2 0x26 /* RF TX Power control */ +#define RF6052_REG_SYN_G3 0x27 /* RF TX Power control */ +#define RF6052_REG_SYN_G4 0x28 /* RF TX Power control */ +#define RF6052_REG_SYN_G5 0x29 /* RF TX Power control */ +#define RF6052_REG_SYN_G6 0x2a /* RF TX Power control */ +#define RF6052_REG_SYN_G7 0x2b /* RF TX Power control */ +#define RF6052_REG_SYN_G8 0x2c /* RF TX Power control */ + +#define RF6052_REG_RCK_OS 0x30 /* RF TX PA control */ + +#define RF6052_REG_TXPA_G1 0x31 /* RF TX PA control */ +#define RF6052_REG_TXPA_G2 0x32 /* RF TX PA control */ +#define RF6052_REG_TXPA_G3 0x33 /* RF TX PA control */ diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/realtek/rtlwifi/Kconfig index 73067cac289c..73067cac289c 100644 --- a/drivers/net/wireless/rtlwifi/Kconfig +++ b/drivers/net/wireless/realtek/rtlwifi/Kconfig diff --git a/drivers/net/wireless/rtlwifi/Makefile b/drivers/net/wireless/realtek/rtlwifi/Makefile index ad6d3c52ec57..ad6d3c52ec57 100644 --- a/drivers/net/wireless/rtlwifi/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/Makefile diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 0517a4f2d3f2..0517a4f2d3f2 100644 --- a/drivers/net/wireless/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h index 74233d601a90..74233d601a90 100644 --- a/drivers/net/wireless/rtlwifi/base.h +++ b/drivers/net/wireless/realtek/rtlwifi/base.h diff --git a/drivers/net/wireless/rtlwifi/btcoexist/Makefile b/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile index 47ceecfcb7dc..47ceecfcb7dc 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h index 39b9a3309cfd..39b9a3309cfd 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c index 53261d6f8578..53261d6f8578 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8192e2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8192e2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h index 75e1f7d0db06..75e1f7d0db06 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8192e2ant.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c index c4acd403e5f6..c4acd403e5f6 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b1ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h index 75f8094b7a34..75f8094b7a34 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b1ant.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c index f2b9d11adc9e..f2b9d11adc9e 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h index 567f354caf95..567f354caf95 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c index b72e5377bdbc..b72e5377bdbc 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a1ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h index 20e904890fc2..20e904890fc2 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a1ant.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c index cf819f02ed23..cf819f02ed23 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h index b4cf1f53d510..b4cf1f53d510 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a2ant.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index b2791c893417..b2791c893417 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h index 0a903ea179ef..0a903ea179ef 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h diff --git a/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c index b9b0cb7af8ea..b9b0cb7af8ea 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c diff --git a/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h index ccd5a0f91e3b..ccd5a0f91e3b 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/realtek/rtlwifi/cam.c index 8fe8b4cfae6c..8fe8b4cfae6c 100644 --- a/drivers/net/wireless/rtlwifi/cam.c +++ b/drivers/net/wireless/realtek/rtlwifi/cam.c diff --git a/drivers/net/wireless/rtlwifi/cam.h b/drivers/net/wireless/realtek/rtlwifi/cam.h index e2e647d511c1..e2e647d511c1 100644 --- a/drivers/net/wireless/rtlwifi/cam.h +++ b/drivers/net/wireless/realtek/rtlwifi/cam.h diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index c925a4dff599..c925a4dff599 100644 --- a/drivers/net/wireless/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c diff --git a/drivers/net/wireless/rtlwifi/core.h b/drivers/net/wireless/realtek/rtlwifi/core.h index 782ac2fc4b28..782ac2fc4b28 100644 --- a/drivers/net/wireless/rtlwifi/core.h +++ b/drivers/net/wireless/realtek/rtlwifi/core.h diff --git a/drivers/net/wireless/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c index fd25abad2b9e..fd25abad2b9e 100644 --- a/drivers/net/wireless/rtlwifi/debug.c +++ b/drivers/net/wireless/realtek/rtlwifi/debug.c diff --git a/drivers/net/wireless/rtlwifi/debug.h b/drivers/net/wireless/realtek/rtlwifi/debug.h index fc794b3e9f4a..fc794b3e9f4a 100644 --- a/drivers/net/wireless/rtlwifi/debug.h +++ b/drivers/net/wireless/realtek/rtlwifi/debug.h diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c index 0b4082c9272a..0b4082c9272a 100644 --- a/drivers/net/wireless/rtlwifi/efuse.c +++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c diff --git a/drivers/net/wireless/rtlwifi/efuse.h b/drivers/net/wireless/realtek/rtlwifi/efuse.h index be02e7894c61..be02e7894c61 100644 --- a/drivers/net/wireless/rtlwifi/efuse.h +++ b/drivers/net/wireless/realtek/rtlwifi/efuse.h diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index f46c9d7f6528..f46c9d7f6528 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h index d4567d12e07e..5da6703942d9 100644 --- a/drivers/net/wireless/rtlwifi/pci.h +++ b/drivers/net/wireless/realtek/rtlwifi/pci.h @@ -247,6 +247,8 @@ struct rtl_pci { /* MSI support */ bool msi_support; bool using_msi; + /* interrupt clear before set */ + bool int_clear; }; struct mp_adapter { diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c index b69321d45f04..b69321d45f04 100644 --- a/drivers/net/wireless/rtlwifi/ps.c +++ b/drivers/net/wireless/realtek/rtlwifi/ps.c diff --git a/drivers/net/wireless/rtlwifi/ps.h b/drivers/net/wireless/realtek/rtlwifi/ps.h index 29dfc514212d..29dfc514212d 100644 --- a/drivers/net/wireless/rtlwifi/ps.h +++ b/drivers/net/wireless/realtek/rtlwifi/ps.h diff --git a/drivers/net/wireless/rtlwifi/pwrseqcmd.h b/drivers/net/wireless/realtek/rtlwifi/pwrseqcmd.h index 17ce0cb2c35c..17ce0cb2c35c 100644 --- a/drivers/net/wireless/rtlwifi/pwrseqcmd.h +++ b/drivers/net/wireless/realtek/rtlwifi/pwrseqcmd.h diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c index 74c14ce28238..74c14ce28238 100644 --- a/drivers/net/wireless/rtlwifi/rc.c +++ b/drivers/net/wireless/realtek/rtlwifi/rc.c diff --git a/drivers/net/wireless/rtlwifi/rc.h b/drivers/net/wireless/realtek/rtlwifi/rc.h index f29643d60d6b..f29643d60d6b 100644 --- a/drivers/net/wireless/rtlwifi/rc.h +++ b/drivers/net/wireless/realtek/rtlwifi/rc.h diff --git a/drivers/net/wireless/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c index a62bf0a65c32..a62bf0a65c32 100644 --- a/drivers/net/wireless/rtlwifi/regd.c +++ b/drivers/net/wireless/realtek/rtlwifi/regd.c diff --git a/drivers/net/wireless/rtlwifi/regd.h b/drivers/net/wireless/realtek/rtlwifi/regd.h index f7f15bce35dd..f7f15bce35dd 100644 --- a/drivers/net/wireless/rtlwifi/regd.h +++ b/drivers/net/wireless/realtek/rtlwifi/regd.h diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile index a85419a37651..a85419a37651 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h index 0532b9852444..0532b9852444 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c index ce4da9d79fbd..ce4da9d79fbd 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h index 071ccee69eae..071ccee69eae 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c index 629125658b87..629125658b87 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h index 21bd4a5337ab..21bd4a5337ab 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c index 8ee83b093c0d..8ee83b093c0d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h index 1850fde881b5..1850fde881b5 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c index b504bd092fc4..b504bd092fc4 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.h index 4b325b75faaf..4b325b75faaf 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/led.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.h diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c index a2bb02c7b837..a2bb02c7b837 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.h index b29bd77210f4..b29bd77210f4 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.h diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.c index 02013df968a0..02013df968a0 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.c diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.h index f2d9c6116e5c..f2d9c6116e5c 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.h diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/reg.h index 15400ee6c04b..15400ee6c04b 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/reg.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/reg.h diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c index 40893cef7dfe..40893cef7dfe 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.h index 0eca030e3238..0eca030e3238 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/rf.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.h diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c index 11344121c55e..11344121c55e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h index 22398c3753a6..22398c3753a6 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.c index 68bcb7fe6a65..68bcb7fe6a65 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.c diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.h index 403c4ddd236f..403c4ddd236f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.h diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c index 791efbe6b18c..791efbe6b18c 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h index eab5ae0eb46c..eab5ae0eb46c 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile index aee42d7ae8a2..aee42d7ae8a2 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c index 03cbe4cf110b..03cbe4cf110b 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h index 4422e31fedd9..4422e31fedd9 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c index 43fcb25c885f..43fcb25c885f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h index 864806c19ca7..864806c19ca7 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/main.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c index 918b1d129e77..918b1d129e77 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/main.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c index 77e61b19bf36..77e61b19bf36 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h index 64bc49f4dbc6..64bc49f4dbc6 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile index c0cb0cfe7d37..c0cb0cfe7d37 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h index 690a7a1675e2..690a7a1675e2 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c index 09898cf2e07a..09898cf2e07a 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h index 38ba707015f5..38ba707015f5 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c index 04eb5c3f8464..04eb5c3f8464 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h index 98a086822aac..98a086822aac 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c index 8283e9b27639..8283e9b27639 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h index c5761066d383..c5761066d383 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/led.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c index 1ee5a6ae9960..1ee5a6ae9960 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h index e5e1353a94c3..e5e1353a94c3 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h index dc8460c0b32f..dc8460c0b32f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c index a9c406f33d0a..a9c406f33d0a 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h index ebd72cae10b6..ebd72cae10b6 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c index de6cb6c3a48c..de6cb6c3a48c 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h index d2367a5d0cf5..d2367a5d0cf5 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c index 752f943a84ae..752f943a84ae 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h index 8b79161f71be..8b79161f71be 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c index 84ddd4d07a1d..84ddd4d07a1d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h index 4bec4b07e3e0..4bec4b07e3e0 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile index ad2de6b839ef..ad2de6b839ef 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h index 74a479ac323d..74a479ac323d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c index c16209a336ea..c16209a336ea 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h index fafa6bac2a3f..fafa6bac2a3f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c index 25db369b5d18..34ce06441d1b 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c @@ -1946,6 +1946,14 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) rtl_write_word(rtlpriv, REG_RXFLTMAP2, *(u16 *)val); mac->rx_data_filter = *(u16 *)val; break; + case HW_VAR_KEEP_ALIVE:{ + u8 array[2]; + array[0] = 0xff; + array[1] = *((u8 *)val); + rtl92c_fill_h2c_cmd(hw, H2C_92C_KEEP_ALIVE_CTRL, 2, + array); + break; + } default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "switch case not processed\n"); diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h index 67588083e6cc..67588083e6cc 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c index 75a2deb23af1..75a2deb23af1 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h index 0f372278b7af..0f372278b7af 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/led.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c index 035713311a4a..035713311a4a 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h index 553a4bfac668..553a4bfac668 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c index c972fa50926d..c972fa50926d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h index 42b068660483..42b068660483 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h index 8b81465c629b..8b81465c629b 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c index 5624ade92cc0..5624ade92cc0 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h index 6f987de5b441..6f987de5b441 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c index fd4a5353d216..fd4a5353d216 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h index a1310abd0d54..a1310abd0d54 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c index 7903c154de00..7903c154de00 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h index 4b020e9e30b1..4b020e9e30b1 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c index 95880fe4106e..95880fe4106e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h index fd8051dcd98a..fd8051dcd98a 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile index e3213c8264b6..e3213c8264b6 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h index 0a443ed17cf4..0a443ed17cf4 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c index 7c1db7e7572d..7c1db7e7572d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h index f2d318ceeb28..f2d318ceeb28 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c index 62ef8209718f..62ef8209718f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h index 8a38daa316cb..8a38daa316cb 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c index f49b60d31450..f49b60d31450 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h index 1bc7b1a96d4a..1bc7b1a96d4a 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c index 76a57ae4af3e..76a57ae4af3e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h index a29df30c3025..a29df30c3025 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/led.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c index bb06fe836fe7..bb06fe836fe7 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h index 48d5c6835b6a..48d5c6835b6a 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h index 315a298bab06..315a298bab06 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/reg.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c index 6a6ac540d5b5..6a6ac540d5b5 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h index 7303d12c266f..7303d12c266f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/rf.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c index b19d0398215f..b19d0398215f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h index 0e6035b8fd86..0e6035b8fd86 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c index 8ea6f528dfa6..8ea6f528dfa6 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h index 8b724a86117a..8b724a86117a 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c index 1feaa629dd4f..1feaa629dd4f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h index fb5cf0634e8d..fb5cf0634e8d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile index 0315eeda9b60..0315eeda9b60 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h index 60f5728b4e2d..60f5728b4e2d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c index 459f3d0efa2f..459f3d0efa2f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.h index 107d5a488fa8..107d5a488fa8 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/dm.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c index 0708eedd9671..0708eedd9671 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h index 069da1e7e80a..069da1e7e80a 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c index 5f14308e8eb3..5f14308e8eb3 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h index 05413f189685..05413f189685 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c index 8388e371c8e2..8388e371c8e2 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.h index 8ef640a2ef7f..8ef640a2ef7f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/led.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c index 018340aedf09..018340aedf09 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h index c6e97c8df54c..c6e97c8df54c 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.c index 1a701d007f0c..1a701d007f0c 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/pwrseq.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.h index 781eeaa6af49..781eeaa6af49 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/pwrseq.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/reg.h index 1eaa1fab550d..1eaa1fab550d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/reg.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/reg.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c index c9bc33cd1090..c9bc33cd1090 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.h index 039c0133ad6b..039c0133ad6b 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/rf.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c index c31c6bfb536d..c31c6bfb536d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h index 21433d0332d0..21433d0332d0 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/sw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.c index abcdd0670fd8..abcdd0670fd8 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.h index bff9df88815d..bff9df88815d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c index d39ee67f6113..d39ee67f6113 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h index 8f78ac9e6040..8f78ac9e6040 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile index b7eb13819cbc..b7eb13819cbc 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h index 41466f957cdc..41466f957cdc 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c index 9bae5a92e30f..9bae5a92e30f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h index de6ac796c74d..de6ac796c74d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c index 331b1584a1a2..331b1584a1a2 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h index b1e44b86e8ed..b1e44b86e8ed 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c index 12b0978ba4fa..12b0978ba4fa 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h index 4cacee10f31e..4cacee10f31e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c index 44949b5cbb87..44949b5cbb87 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h index 2182dbeb5f32..2182dbeb5f32 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/led.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c index 4b4612fe2fdb..4b4612fe2fdb 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h index 8acf4765a7a6..8acf4765a7a6 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h index e13043479b71..e13043479b71 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c index 78a81c1e390b..78a81c1e390b 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h index 8a29eb94ab17..8a29eb94ab17 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/rf.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c index e1fd27c888bf..e1fd27c888bf 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h index 2eb88862ebe4..2eb88862ebe4 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c index f1a73f75127e..f1a73f75127e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h index 2feb73b71a4f..2feb73b71a4f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c index 125b29bd2f93..125b29bd2f93 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h index 5a13f17e3b41..5a13f17e3b41 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile index 6220672a96f4..6220672a96f4 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/btc.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/btc.h index 06c448c010fd..06c448c010fd 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/btc.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/btc.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h index bcdf2273688e..bcdf2273688e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c index 4c1c96c96a5a..4c1c96c96a5a 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h index 57111052e86b..57111052e86b 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c index b7c0d38ee5b5..b7c0d38ee5b5 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h index 9d1fe25db953..9d1fe25db953 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c index 5aac45d5a974..5aac45d5a974 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h index bcd64a22acc0..bcd64a22acc0 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c index 00a0531cc5f4..00a0531cc5f4 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h index 3723d7476717..3723d7476717 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c index a4b7eac6856f..a4b7eac6856f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h index 32c1ace97c3f..32c1ace97c3f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c index 13173351cbfd..13173351cbfd 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.h index c22b19f542a6..c22b19f542a6 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/led.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c index d367097f490b..d367097f490b 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.h index b85f5c7c5c01..b85f5c7c5c01 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.c index 2f7f81af8a55..2f7f81af8a55 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.c diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.h index 4ac7db526f15..4ac7db526f15 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/reg.h index 306059f9b9cc..306059f9b9cc 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/reg.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c index 9ebc8281ff99..9ebc8281ff99 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.h index 7b44ebc0fac9..7b44ebc0fac9 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c index 3859b3e3d158..3859b3e3d158 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h index 46478780d262..46478780d262 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c index 61e86045f15c..61e86045f15c 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h index 57a548ceba7d..57a548ceba7d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c index 2f7c144d7980..2f7c144d7980 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h index 017da7e194d8..017da7e194d8 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile index a77c34102792..a77c34102792 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h index 025ea5c0f3f6..025ea5c0f3f6 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c index 3a81cdba8ca3..3a81cdba8ca3 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.h index f752a2cad63d..f752a2cad63d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/dm.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c index d5da0f3c1217..d5da0f3c1217 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h index 067429669bda..067429669bda 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index c983d2fe147f..c983d2fe147f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h index eae863d08de8..eae863d08de8 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c index 4196efb723a2..4196efb723a2 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.h index c57de379ee8d..c57de379ee8d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/led.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c index b7b73cbe346d..b7b73cbe346d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.h index 9021d4745ab7..9021d4745ab7 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.c index a1bb1f6116fb..a1bb1f6116fb 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.c diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.h index 0fee5e0e55c2..0fee5e0e55c2 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/reg.h index 03581d2a5da0..03581d2a5da0 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/reg.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/reg.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c index 5ed4492d3c80..5ed4492d3c80 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.h index f423e157020f..f423e157020f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/rf.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c index d091f1d5f91e..d091f1d5f91e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h index a7b25e769950..a7b25e769950 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c index a180761e8810..a180761e8810 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h index dc17001632f7..dc17001632f7 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c index 338ec9a9d09b..338ec9a9d09b 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h index 45949ac4854c..45949ac4854c 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile index 345a68adcf38..345a68adcf38 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723com/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.c index 064340641913..064340641913 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.c diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.h index 5c1b94ce2f86..5c1b94ce2f86 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c index a2f5e89bedfe..a2f5e89bedfe 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h index 8ea372d1626e..8ea372d1626e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/main.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/main.c index 9014a94fac6a..9014a94fac6a 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723com/main.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/main.c diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c index 75cbd1509b52..75cbd1509b52 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.h index 83b891a9adb8..83b891a9adb8 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.h diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile index f7a26f71197e..f7a26f71197e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h index dfbdf539de1a..dfbdf539de1a 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c index b57cfd965196..b57cfd965196 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.h index 625a6bbb21fc..625a6bbb21fc 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.h diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c index 525eb234627c..525eb234627c 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h index 8f5b4aade3c9..8f5b4aade3c9 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index b7f18e2155eb..6e9418ed90c2 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -2253,11 +2253,28 @@ void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci) } } +static void rtl8821ae_clear_interrupt(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 tmp = rtl_read_dword(rtlpriv, REG_HISR); + + rtl_write_dword(rtlpriv, REG_HISR, tmp); + + tmp = rtl_read_dword(rtlpriv, REG_HISRE); + rtl_write_dword(rtlpriv, REG_HISRE, tmp); + + tmp = rtl_read_dword(rtlpriv, REG_HSISR); + rtl_write_dword(rtlpriv, REG_HSISR, tmp); +} + void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + if (!rtlpci->int_clear) + rtl8821ae_clear_interrupt(hw);/*clear it here first*/ + rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF); rtlpci->irq_enabled = true; diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h index a3553e3abaa1..a3553e3abaa1 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c index ba1946a0280e..ba1946a0280e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.h index 038e64e18ae8..038e64e18ae8 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/led.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.h diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c index 9b4d8a637915..9b4d8a637915 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h index c411f0a95cc4..c411f0a95cc4 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.c index 9ddf78a187dd..9ddf78a187dd 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.c diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.h index 36b3e91d996e..36b3e91d996e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.h diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h index 1d6110f9c1fb..1d6110f9c1fb 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c index 2922538160e5..2922538160e5 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.h index efd22bd0b139..efd22bd0b139 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/rf.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.h diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index a4988121e1ab..8ee141a55bc5 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c @@ -96,6 +96,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtl8821ae_bt_reg_init(hw); rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; + rtlpci->int_clear = rtlpriv->cfg->mod_params->int_clear; rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); rtlpriv->dm.dm_initialgain_enable = 1; @@ -167,6 +168,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; + rtlpci->msi_support = rtlpriv->cfg->mod_params->int_clear; if (rtlpriv->cfg->mod_params->disable_watchdog) pr_info("watchdog disabled\n"); rtlpriv->psc.reg_fwctrl_lps = 3; @@ -308,6 +310,7 @@ static struct rtl_mod_params rtl8821ae_mod_params = { .swctrl_lps = false, .fwctrl_lps = true, .msi_support = true, + .int_clear = true, .debug = DBG_EMERG, .disable_watchdog = 0, }; @@ -437,6 +440,7 @@ module_param_named(fwlps, rtl8821ae_mod_params.fwctrl_lps, bool, 0444); module_param_named(msi, rtl8821ae_mod_params.msi_support, bool, 0444); module_param_named(disable_watchdog, rtl8821ae_mod_params.disable_watchdog, bool, 0444); +module_param_named(int_clear, rtl8821ae_mod_params.int_clear, bool, 0444); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); @@ -444,6 +448,7 @@ MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n"); MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); +MODULE_PARM_DESC(int_clear, "Set to 1 to disable interrupt clear before set (default 0)\n"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h index d001e7ce3052..d001e7ce3052 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c index 62a0fb76f080..62a0fb76f080 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h index 24bcff6bc507..24bcff6bc507 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c index 174743aef943..174743aef943 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h index 31409042d8dd..31409042d8dd 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h diff --git a/drivers/net/wireless/rtlwifi/stats.c b/drivers/net/wireless/realtek/rtlwifi/stats.c index d8b30690b00d..d8b30690b00d 100644 --- a/drivers/net/wireless/rtlwifi/stats.c +++ b/drivers/net/wireless/realtek/rtlwifi/stats.c diff --git a/drivers/net/wireless/rtlwifi/stats.h b/drivers/net/wireless/realtek/rtlwifi/stats.h index 2b57dffef572..2b57dffef572 100644 --- a/drivers/net/wireless/rtlwifi/stats.h +++ b/drivers/net/wireless/realtek/rtlwifi/stats.h diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 2721cf89fb16..2721cf89fb16 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c diff --git a/drivers/net/wireless/rtlwifi/usb.h b/drivers/net/wireless/realtek/rtlwifi/usb.h index 685273ca9561..685273ca9561 100644 --- a/drivers/net/wireless/rtlwifi/usb.h +++ b/drivers/net/wireless/realtek/rtlwifi/usb.h diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index b90ca618b123..4544752a2ba8 100644 --- a/drivers/net/wireless/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2249,6 +2249,9 @@ struct rtl_mod_params { /* default 0: 1 means disable */ bool disable_watchdog; + + /* default 0: 1 means do not disable interrupts */ + bool int_clear; }; struct rtl_hal_usbint_cfg { diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index 5932306084fd..bf9afbf46c1b 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -1114,6 +1114,7 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x0db0, 0x871c) }, { USB_DEVICE(0x0db0, 0x899a) }, /* Ovislink */ + { USB_DEVICE(0x1b75, 0x3070) }, { USB_DEVICE(0x1b75, 0x3071) }, { USB_DEVICE(0x1b75, 0x3072) }, { USB_DEVICE(0x1b75, 0xa200) }, diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 929a6e7e5ecf..56ebd8267386 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -788,6 +788,12 @@ static void connect(struct backend_info *be) /* Use the number of queues requested by the frontend */ be->vif->queues = vzalloc(requested_num_queues * sizeof(struct xenvif_queue)); + if (!be->vif->queues) { + xenbus_dev_fatal(dev, -ENOMEM, + "allocating queues"); + return; + } + be->vif->num_queues = requested_num_queues; be->vif->stalled_queues = requested_num_queues; diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 9bf63c27a9b7..441b158d04f7 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1706,19 +1706,19 @@ static void xennet_destroy_queues(struct netfront_info *info) } static int xennet_create_queues(struct netfront_info *info, - unsigned int num_queues) + unsigned int *num_queues) { unsigned int i; int ret; - info->queues = kcalloc(num_queues, sizeof(struct netfront_queue), + info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue), GFP_KERNEL); if (!info->queues) return -ENOMEM; rtnl_lock(); - for (i = 0; i < num_queues; i++) { + for (i = 0; i < *num_queues; i++) { struct netfront_queue *queue = &info->queues[i]; queue->id = i; @@ -1728,7 +1728,7 @@ static int xennet_create_queues(struct netfront_info *info, if (ret < 0) { dev_warn(&info->netdev->dev, "only created %d queues\n", i); - num_queues = i; + *num_queues = i; break; } @@ -1738,11 +1738,11 @@ static int xennet_create_queues(struct netfront_info *info, napi_enable(&queue->napi); } - netif_set_real_num_tx_queues(info->netdev, num_queues); + netif_set_real_num_tx_queues(info->netdev, *num_queues); rtnl_unlock(); - if (num_queues == 0) { + if (*num_queues == 0) { dev_err(&info->netdev->dev, "no queues\n"); return -EINVAL; } @@ -1788,7 +1788,7 @@ static int talk_to_netback(struct xenbus_device *dev, if (info->queues) xennet_destroy_queues(info); - err = xennet_create_queues(info, num_queues); + err = xennet_create_queues(info, &num_queues); if (err < 0) goto destroy_ring; diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index d3c6676b3c0c..6fd4e5a5ef4a 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -67,7 +67,7 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, int rc; /* Stop the user from reading */ - if (pos > nvmem->size) + if (pos >= nvmem->size) return 0; if (pos + count > nvmem->size) @@ -92,7 +92,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, int rc; /* Stop the user from writing */ - if (pos > nvmem->size) + if (pos >= nvmem->size) return 0; if (pos + count > nvmem->size) @@ -825,7 +825,7 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem, return rc; /* shift bits in-place */ - if (cell->bit_offset || cell->bit_offset) + if (cell->bit_offset || cell->nbits) nvmem_shift_read_buffer_in_place(cell, buf); *len = cell->bytes; @@ -938,7 +938,7 @@ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) rc = regmap_raw_write(nvmem->regmap, cell->offset, buf, cell->bytes); /* free the tmp buffer */ - if (cell->bit_offset) + if (cell->bit_offset || cell->nbits) kfree(buf); if (IS_ERR_VALUE(rc)) diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c index 14777dd5212d..cfa3b85064dd 100644 --- a/drivers/nvmem/sunxi_sid.c +++ b/drivers/nvmem/sunxi_sid.c @@ -103,7 +103,7 @@ static int sunxi_sid_probe(struct platform_device *pdev) struct nvmem_device *nvmem; struct regmap *regmap; struct sunxi_sid *sid; - int i, size; + int ret, i, size; char *randomness; sid = devm_kzalloc(dev, sizeof(*sid), GFP_KERNEL); @@ -131,6 +131,11 @@ static int sunxi_sid_probe(struct platform_device *pdev) return PTR_ERR(nvmem); randomness = kzalloc(sizeof(u8) * size, GFP_KERNEL); + if (!randomness) { + ret = -EINVAL; + goto err_unreg_nvmem; + } + for (i = 0; i < size; i++) randomness[i] = sunxi_sid_read_byte(sid, i); @@ -140,6 +145,10 @@ static int sunxi_sid_probe(struct platform_device *pdev) platform_set_drvdata(pdev, nvmem); return 0; + +err_unreg_nvmem: + nvmem_unregister(nvmem); + return ret; } static int sunxi_sid_remove(struct platform_device *pdev) diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index d4497141d083..4a7da3c3e035 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -1243,6 +1243,10 @@ static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info) BUG_ON(!chip); if (!chip->irq_write_msi_msg) chip->irq_write_msi_msg = pci_msi_domain_write_msg; + if (!chip->irq_mask) + chip->irq_mask = pci_msi_mask_irq; + if (!chip->irq_unmask) + chip->irq_unmask = pci_msi_unmask_irq; } /** diff --git a/drivers/phy/phy-berlin-sata.c b/drivers/phy/phy-berlin-sata.c index 0062027afb1e..77a2e054fdea 100644 --- a/drivers/phy/phy-berlin-sata.c +++ b/drivers/phy/phy-berlin-sata.c @@ -276,6 +276,7 @@ static const struct of_device_id phy_berlin_sata_of_match[] = { { .compatible = "marvell,berlin2q-sata-phy" }, { }, }; +MODULE_DEVICE_TABLE(of, phy_berlin_sata_of_match); static struct platform_driver phy_berlin_sata_driver = { .probe = phy_berlin_sata_probe, diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c index 49a1ed0cef56..107cb57c3513 100644 --- a/drivers/phy/phy-qcom-ufs.c +++ b/drivers/phy/phy-qcom-ufs.c @@ -432,6 +432,7 @@ out_disable_src: out: return ret; } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_ref_clk); static int ufs_qcom_phy_disable_vreg(struct phy *phy, @@ -474,6 +475,7 @@ void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy) phy->is_ref_clk_enabled = false; } } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_ref_clk); #define UFS_REF_CLK_EN (1 << 5) @@ -517,11 +519,13 @@ void ufs_qcom_phy_enable_dev_ref_clk(struct phy *generic_phy) { ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true); } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_dev_ref_clk); void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy) { ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false); } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk); /* Turn ON M-PHY RMMI interface clocks */ int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy) @@ -550,6 +554,7 @@ int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy) out: return ret; } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_iface_clk); /* Turn OFF M-PHY RMMI interface clocks */ void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy) @@ -562,6 +567,7 @@ void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy) phy->is_iface_clk_enabled = false; } } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_iface_clk); int ufs_qcom_phy_start_serdes(struct phy *generic_phy) { @@ -578,6 +584,7 @@ int ufs_qcom_phy_start_serdes(struct phy *generic_phy) return ret; } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_start_serdes); int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes) { @@ -595,6 +602,7 @@ int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes) return ret; } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_set_tx_lane_enable); void ufs_qcom_phy_save_controller_version(struct phy *generic_phy, u8 major, u16 minor, u16 step) @@ -605,6 +613,7 @@ void ufs_qcom_phy_save_controller_version(struct phy *generic_phy, ufs_qcom_phy->host_ctrl_rev_minor = minor; ufs_qcom_phy->host_ctrl_rev_step = step; } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version); int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B) { @@ -625,6 +634,7 @@ int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B) return ret; } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy); int ufs_qcom_phy_remove(struct phy *generic_phy, struct ufs_qcom_phy *ufs_qcom_phy) @@ -662,6 +672,7 @@ int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy) return ufs_qcom_phy->phy_spec_ops-> is_physical_coding_sublayer_ready(ufs_qcom_phy); } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_is_pcs_ready); int ufs_qcom_phy_power_on(struct phy *generic_phy) { diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c index 5a5c073e72fe..91d6f342c565 100644 --- a/drivers/phy/phy-rockchip-usb.c +++ b/drivers/phy/phy-rockchip-usb.c @@ -98,6 +98,7 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev) struct device_node *child; struct regmap *grf; unsigned int reg_offset; + int err; grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); if (IS_ERR(grf)) { @@ -129,6 +130,11 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev) return PTR_ERR(rk_phy->phy); } phy_set_drvdata(rk_phy->phy, rk_phy); + + /* only power up usb phy when it use, so disable it when init*/ + err = rockchip_usb_phy_power(rk_phy, 1); + if (err) + return err; } phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); diff --git a/drivers/pinctrl/freescale/pinctrl-imx25.c b/drivers/pinctrl/freescale/pinctrl-imx25.c index faf635654312..293ed4381cc0 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx25.c +++ b/drivers/pinctrl/freescale/pinctrl-imx25.c @@ -26,7 +26,8 @@ #include "pinctrl-imx.h" enum imx25_pads { - MX25_PAD_RESERVE0 = 1, + MX25_PAD_RESERVE0 = 0, + MX25_PAD_RESERVE1 = 1, MX25_PAD_A10 = 2, MX25_PAD_A13 = 3, MX25_PAD_A14 = 4, @@ -169,6 +170,7 @@ enum imx25_pads { /* Pad names for the pinmux subsystem */ static const struct pinctrl_pin_desc imx25_pinctrl_pads[] = { IMX_PINCTRL_PIN(MX25_PAD_RESERVE0), + IMX_PINCTRL_PIN(MX25_PAD_RESERVE1), IMX_PINCTRL_PIN(MX25_PAD_A10), IMX_PINCTRL_PIN(MX25_PAD_A13), IMX_PINCTRL_PIN(MX25_PAD_A14), diff --git a/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c b/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c index 63676617bc59..f9a3f8f446f7 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c @@ -653,7 +653,7 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = { SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "spi1"), /* CS1 */ - SUNXI_FUNCTION(0x3, "uart3"), /* PWM1 */ + SUNXI_FUNCTION(0x3, "pwm"), /* PWM1 */ SUNXI_FUNCTION(0x5, "uart2"), /* CTS */ SUNXI_FUNCTION_IRQ(0x6, 13)), /* EINT13 */ }; diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c b/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c index 7e9dae54fcb2..2df8bbecebfc 100644 --- a/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c +++ b/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c @@ -22,49 +22,49 @@ #define DRIVER_NAME "ph1-sld8-pinctrl" static const struct pinctrl_pin_desc ph1_sld8_pins[] = { - UNIPHIER_PINCTRL_PIN(0, "PCA00", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(0, "PCA00", 0, 15, UNIPHIER_PIN_DRV_4_8, 15, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(1, "PCA01", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(1, "PCA01", 0, 16, UNIPHIER_PIN_DRV_4_8, 16, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(2, "PCA02", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(2, "PCA02", 0, 17, UNIPHIER_PIN_DRV_4_8, 17, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(3, "PCA03", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(3, "PCA03", 0, 18, UNIPHIER_PIN_DRV_4_8, 18, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(4, "PCA04", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(4, "PCA04", 0, 19, UNIPHIER_PIN_DRV_4_8, 19, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(5, "PCA05", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(5, "PCA05", 0, 20, UNIPHIER_PIN_DRV_4_8, 20, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(6, "PCA06", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(6, "PCA06", 0, 21, UNIPHIER_PIN_DRV_4_8, 21, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(7, "PCA07", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(7, "PCA07", 0, 22, UNIPHIER_PIN_DRV_4_8, 22, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(8, "PCA08", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(8, "PCA08", 0, 23, UNIPHIER_PIN_DRV_4_8, 23, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(9, "PCA09", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(9, "PCA09", 0, 24, UNIPHIER_PIN_DRV_4_8, 24, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(10, "PCA10", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(10, "PCA10", 0, 25, UNIPHIER_PIN_DRV_4_8, 25, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(11, "PCA11", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(11, "PCA11", 0, 26, UNIPHIER_PIN_DRV_4_8, 26, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(12, "PCA12", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(12, "PCA12", 0, 27, UNIPHIER_PIN_DRV_4_8, 27, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(13, "PCA13", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(13, "PCA13", 0, 28, UNIPHIER_PIN_DRV_4_8, 28, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(14, "PCA14", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(14, "PCA14", 0, 29, UNIPHIER_PIN_DRV_4_8, 29, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(15, "XNFRE_GB", UNIPHIER_PIN_IECTRL_NONE, @@ -118,199 +118,199 @@ static const struct pinctrl_pin_desc ph1_sld8_pins[] = { UNIPHIER_PINCTRL_PIN(31, "NFD7_GB", UNIPHIER_PIN_IECTRL_NONE, 36, UNIPHIER_PIN_DRV_8_12_16_20, 128, UNIPHIER_PIN_PULL_UP), - UNIPHIER_PINCTRL_PIN(32, "SDCLK", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(32, "SDCLK", 8, 40, UNIPHIER_PIN_DRV_8_12_16_20, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(33, "SDCMD", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(33, "SDCMD", 8, 44, UNIPHIER_PIN_DRV_8_12_16_20, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(34, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(34, "SDDAT0", 8, 48, UNIPHIER_PIN_DRV_8_12_16_20, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(35, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(35, "SDDAT1", 8, 52, UNIPHIER_PIN_DRV_8_12_16_20, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(36, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(36, "SDDAT2", 8, 56, UNIPHIER_PIN_DRV_8_12_16_20, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(37, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(37, "SDDAT3", 8, 60, UNIPHIER_PIN_DRV_8_12_16_20, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(38, "SDCD", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(38, "SDCD", 8, -1, UNIPHIER_PIN_DRV_FIXED_4, 129, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(39, "SDWP", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(39, "SDWP", 8, -1, UNIPHIER_PIN_DRV_FIXED_4, 130, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(40, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(40, "SDVOLC", 9, -1, UNIPHIER_PIN_DRV_FIXED_4, 131, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(41, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(41, "USB0VBUS", 0, 37, UNIPHIER_PIN_DRV_4_8, 37, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(42, "USB0OD", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(42, "USB0OD", 0, 38, UNIPHIER_PIN_DRV_4_8, 38, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(43, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(43, "USB1VBUS", 0, 39, UNIPHIER_PIN_DRV_4_8, 39, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(44, "USB1OD", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(44, "USB1OD", 0, 40, UNIPHIER_PIN_DRV_4_8, 40, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(45, "PCRESET", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(45, "PCRESET", 0, 41, UNIPHIER_PIN_DRV_4_8, 41, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(46, "PCREG", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(46, "PCREG", 0, 42, UNIPHIER_PIN_DRV_4_8, 42, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(47, "PCCE2", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(47, "PCCE2", 0, 43, UNIPHIER_PIN_DRV_4_8, 43, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(48, "PCVS1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(48, "PCVS1", 0, 44, UNIPHIER_PIN_DRV_4_8, 44, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(49, "PCCD2", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(49, "PCCD2", 0, 45, UNIPHIER_PIN_DRV_4_8, 45, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(50, "PCCD1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(50, "PCCD1", 0, 46, UNIPHIER_PIN_DRV_4_8, 46, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(51, "PCREADY", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(51, "PCREADY", 0, 47, UNIPHIER_PIN_DRV_4_8, 47, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(52, "PCDOE", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(52, "PCDOE", 0, 48, UNIPHIER_PIN_DRV_4_8, 48, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(53, "PCCE1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(53, "PCCE1", 0, 49, UNIPHIER_PIN_DRV_4_8, 49, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(54, "PCWE", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(54, "PCWE", 0, 50, UNIPHIER_PIN_DRV_4_8, 50, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(55, "PCOE", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(55, "PCOE", 0, 51, UNIPHIER_PIN_DRV_4_8, 51, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(56, "PCWAIT", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(56, "PCWAIT", 0, 52, UNIPHIER_PIN_DRV_4_8, 52, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(57, "PCIOWR", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(57, "PCIOWR", 0, 53, UNIPHIER_PIN_DRV_4_8, 53, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(58, "PCIORD", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(58, "PCIORD", 0, 54, UNIPHIER_PIN_DRV_4_8, 54, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(59, "HS0DIN0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(59, "HS0DIN0", 0, 55, UNIPHIER_PIN_DRV_4_8, 55, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(60, "HS0DIN1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(60, "HS0DIN1", 0, 56, UNIPHIER_PIN_DRV_4_8, 56, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(61, "HS0DIN2", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(61, "HS0DIN2", 0, 57, UNIPHIER_PIN_DRV_4_8, 57, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(62, "HS0DIN3", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(62, "HS0DIN3", 0, 58, UNIPHIER_PIN_DRV_4_8, 58, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(63, "HS0DIN4", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(63, "HS0DIN4", 0, 59, UNIPHIER_PIN_DRV_4_8, 59, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(64, "HS0DIN5", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(64, "HS0DIN5", 0, 60, UNIPHIER_PIN_DRV_4_8, 60, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(65, "HS0DIN6", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(65, "HS0DIN6", 0, 61, UNIPHIER_PIN_DRV_4_8, 61, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(66, "HS0DIN7", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(66, "HS0DIN7", 0, 62, UNIPHIER_PIN_DRV_4_8, 62, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(67, "HS0BCLKIN", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(67, "HS0BCLKIN", 0, 63, UNIPHIER_PIN_DRV_4_8, 63, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(68, "HS0VALIN", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(68, "HS0VALIN", 0, 64, UNIPHIER_PIN_DRV_4_8, 64, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(69, "HS0SYNCIN", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(69, "HS0SYNCIN", 0, 65, UNIPHIER_PIN_DRV_4_8, 65, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(70, "HSDOUT0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(70, "HSDOUT0", 0, 66, UNIPHIER_PIN_DRV_4_8, 66, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(71, "HSDOUT1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(71, "HSDOUT1", 0, 67, UNIPHIER_PIN_DRV_4_8, 67, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(72, "HSDOUT2", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(72, "HSDOUT2", 0, 68, UNIPHIER_PIN_DRV_4_8, 68, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(73, "HSDOUT3", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(73, "HSDOUT3", 0, 69, UNIPHIER_PIN_DRV_4_8, 69, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(74, "HSDOUT4", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(74, "HSDOUT4", 0, 70, UNIPHIER_PIN_DRV_4_8, 70, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(75, "HSDOUT5", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(75, "HSDOUT5", 0, 71, UNIPHIER_PIN_DRV_4_8, 71, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(76, "HSDOUT6", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(76, "HSDOUT6", 0, 72, UNIPHIER_PIN_DRV_4_8, 72, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(77, "HSDOUT7", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(77, "HSDOUT7", 0, 73, UNIPHIER_PIN_DRV_4_8, 73, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(78, "HSBCLKOUT", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(78, "HSBCLKOUT", 0, 74, UNIPHIER_PIN_DRV_4_8, 74, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(79, "HSVALOUT", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(79, "HSVALOUT", 0, 75, UNIPHIER_PIN_DRV_4_8, 75, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(80, "HSSYNCOUT", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(80, "HSSYNCOUT", 0, 76, UNIPHIER_PIN_DRV_4_8, 76, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(81, "HS1DIN0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(81, "HS1DIN0", 0, 77, UNIPHIER_PIN_DRV_4_8, 77, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(82, "HS1DIN1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(82, "HS1DIN1", 0, 78, UNIPHIER_PIN_DRV_4_8, 78, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(83, "HS1DIN2", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(83, "HS1DIN2", 0, 79, UNIPHIER_PIN_DRV_4_8, 79, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(84, "HS1DIN3", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(84, "HS1DIN3", 0, 80, UNIPHIER_PIN_DRV_4_8, 80, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(85, "HS1DIN4", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(85, "HS1DIN4", 0, 81, UNIPHIER_PIN_DRV_4_8, 81, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(86, "HS1DIN5", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(86, "HS1DIN5", 0, 82, UNIPHIER_PIN_DRV_4_8, 82, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(87, "HS1DIN6", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(87, "HS1DIN6", 0, 83, UNIPHIER_PIN_DRV_4_8, 83, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(88, "HS1DIN7", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(88, "HS1DIN7", 0, 84, UNIPHIER_PIN_DRV_4_8, 84, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(89, "HS1BCLKIN", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(89, "HS1BCLKIN", 0, 85, UNIPHIER_PIN_DRV_4_8, 85, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(90, "HS1VALIN", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(90, "HS1VALIN", 0, 86, UNIPHIER_PIN_DRV_4_8, 86, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(91, "HS1SYNCIN", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(91, "HS1SYNCIN", 0, 87, UNIPHIER_PIN_DRV_4_8, 87, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(92, "AGCI", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(92, "AGCI", 3, -1, UNIPHIER_PIN_DRV_FIXED_4, 132, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(93, "AGCR", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(93, "AGCR", 4, -1, UNIPHIER_PIN_DRV_FIXED_4, 133, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(94, "AGCBS", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(94, "AGCBS", 5, -1, UNIPHIER_PIN_DRV_FIXED_4, 134, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(95, "IECOUT", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(95, "IECOUT", 0, 88, UNIPHIER_PIN_DRV_4_8, 88, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(96, "ASMCK", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(96, "ASMCK", 0, 89, UNIPHIER_PIN_DRV_4_8, 89, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(97, "ABCKO", UNIPHIER_PIN_IECTRL_NONE, @@ -325,31 +325,31 @@ static const struct pinctrl_pin_desc ph1_sld8_pins[] = { UNIPHIER_PINCTRL_PIN(100, "ASDOUT1", UNIPHIER_PIN_IECTRL_NONE, 93, UNIPHIER_PIN_DRV_4_8, 93, UNIPHIER_PIN_PULL_UP), - UNIPHIER_PINCTRL_PIN(101, "ARCOUT", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(101, "ARCOUT", 0, 94, UNIPHIER_PIN_DRV_4_8, 94, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(102, "SDA0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(102, "SDA0", 10, -1, UNIPHIER_PIN_DRV_FIXED_4, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(103, "SCL0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(103, "SCL0", 10, -1, UNIPHIER_PIN_DRV_FIXED_4, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(104, "SDA1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(104, "SDA1", 11, -1, UNIPHIER_PIN_DRV_FIXED_4, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(105, "SCL1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(105, "SCL1", 11, -1, UNIPHIER_PIN_DRV_FIXED_4, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(106, "DMDSDA0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(106, "DMDSDA0", 12, -1, UNIPHIER_PIN_DRV_FIXED_4, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(107, "DMDSCL0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(107, "DMDSCL0", 12, -1, UNIPHIER_PIN_DRV_FIXED_4, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(108, "DMDSDA1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(108, "DMDSDA1", 13, -1, UNIPHIER_PIN_DRV_FIXED_4, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(109, "DMDSCL1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(109, "DMDSCL1", 13, -1, UNIPHIER_PIN_DRV_FIXED_4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(110, "SBO0", UNIPHIER_PIN_IECTRL_NONE, @@ -358,76 +358,76 @@ static const struct pinctrl_pin_desc ph1_sld8_pins[] = { UNIPHIER_PINCTRL_PIN(111, "SBI0", UNIPHIER_PIN_IECTRL_NONE, 96, UNIPHIER_PIN_DRV_4_8, 96, UNIPHIER_PIN_PULL_UP), - UNIPHIER_PINCTRL_PIN(112, "SBO1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(112, "SBO1", 0, 97, UNIPHIER_PIN_DRV_4_8, 97, UNIPHIER_PIN_PULL_UP), - UNIPHIER_PINCTRL_PIN(113, "SBI1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(113, "SBI1", 0, 98, UNIPHIER_PIN_DRV_4_8, 98, UNIPHIER_PIN_PULL_UP), - UNIPHIER_PINCTRL_PIN(114, "TXD1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(114, "TXD1", 0, 99, UNIPHIER_PIN_DRV_4_8, 99, UNIPHIER_PIN_PULL_UP), - UNIPHIER_PINCTRL_PIN(115, "RXD1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(115, "RXD1", 0, 100, UNIPHIER_PIN_DRV_4_8, 100, UNIPHIER_PIN_PULL_UP), - UNIPHIER_PINCTRL_PIN(116, "HIN", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(116, "HIN", 1, -1, UNIPHIER_PIN_DRV_FIXED_5, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(117, "VIN", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(117, "VIN", 2, -1, UNIPHIER_PIN_DRV_FIXED_5, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(118, "TCON0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(118, "TCON0", 0, 101, UNIPHIER_PIN_DRV_4_8, 101, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(119, "TCON1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(119, "TCON1", 0, 102, UNIPHIER_PIN_DRV_4_8, 102, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(120, "TCON2", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(120, "TCON2", 0, 103, UNIPHIER_PIN_DRV_4_8, 103, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(121, "TCON3", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(121, "TCON3", 0, 104, UNIPHIER_PIN_DRV_4_8, 104, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(122, "TCON4", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(122, "TCON4", 0, 105, UNIPHIER_PIN_DRV_4_8, 105, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(123, "TCON5", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(123, "TCON5", 0, 106, UNIPHIER_PIN_DRV_4_8, 106, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(124, "TCON6", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(124, "TCON6", 0, 107, UNIPHIER_PIN_DRV_4_8, 107, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(125, "TCON7", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(125, "TCON7", 0, 108, UNIPHIER_PIN_DRV_4_8, 108, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(126, "TCON8", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(126, "TCON8", 0, 109, UNIPHIER_PIN_DRV_4_8, 109, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(127, "PWMA", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(127, "PWMA", 0, 110, UNIPHIER_PIN_DRV_4_8, 110, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(128, "XIRQ0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(128, "XIRQ0", 0, 111, UNIPHIER_PIN_DRV_4_8, 111, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(129, "XIRQ1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(129, "XIRQ1", 0, 112, UNIPHIER_PIN_DRV_4_8, 112, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(130, "XIRQ2", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(130, "XIRQ2", 0, 113, UNIPHIER_PIN_DRV_4_8, 113, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(131, "XIRQ3", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(131, "XIRQ3", 0, 114, UNIPHIER_PIN_DRV_4_8, 114, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(132, "XIRQ4", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(132, "XIRQ4", 0, 115, UNIPHIER_PIN_DRV_4_8, 115, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(133, "XIRQ5", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(133, "XIRQ5", 0, 116, UNIPHIER_PIN_DRV_4_8, 116, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(134, "XIRQ6", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(134, "XIRQ6", 0, 117, UNIPHIER_PIN_DRV_4_8, 117, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(135, "XIRQ7", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(135, "XIRQ7", 0, 118, UNIPHIER_PIN_DRV_4_8, 118, UNIPHIER_PIN_PULL_DOWN), }; diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index 01bf3476a791..a9567af7cec0 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -192,9 +192,9 @@ static const struct regulator_desc axp22x_regulators[] = { AXP_DESC(AXP22X, DCDC3, "dcdc3", "vin3", 600, 1860, 20, AXP22X_DCDC3_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)), AXP_DESC(AXP22X, DCDC4, "dcdc4", "vin4", 600, 1540, 20, - AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)), + AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(4)), AXP_DESC(AXP22X, DCDC5, "dcdc5", "vin5", 1000, 2550, 50, - AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(4)), + AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(5)), /* secondary switchable output of DCDC1 */ AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", "dcdc1", 1600, 3400, 100, AXP22X_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(7)), diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 7849187d91ae..8a34f6acc801 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1403,6 +1403,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) return 0; } + /* Did the lookup explicitly defer for us? */ + if (ret == -EPROBE_DEFER) + return ret; + if (have_full_constraints()) { r = dummy_regulator_rdev; } else { diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index add419d6ff34..a56a7b243e91 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -212,6 +212,17 @@ static const struct file_operations twa_fops = { .llseek = noop_llseek, }; +/* + * The controllers use an inline buffer instead of a mapped SGL for small, + * single entry buffers. Note that we treat a zero-length transfer like + * a mapped SGL. + */ +static bool twa_command_mapped(struct scsi_cmnd *cmd) +{ + return scsi_sg_count(cmd) != 1 || + scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH; +} + /* This function will complete an aen request from the isr */ static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id) { @@ -1339,7 +1350,8 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance) } /* Now complete the io */ - scsi_dma_unmap(cmd); + if (twa_command_mapped(cmd)) + scsi_dma_unmap(cmd); cmd->scsi_done(cmd); tw_dev->state[request_id] = TW_S_COMPLETED; twa_free_request_id(tw_dev, request_id); @@ -1582,7 +1594,8 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev) struct scsi_cmnd *cmd = tw_dev->srb[i]; cmd->result = (DID_RESET << 16); - scsi_dma_unmap(cmd); + if (twa_command_mapped(cmd)) + scsi_dma_unmap(cmd); cmd->scsi_done(cmd); } } @@ -1765,12 +1778,14 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_ retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); switch (retval) { case SCSI_MLQUEUE_HOST_BUSY: - scsi_dma_unmap(SCpnt); + if (twa_command_mapped(SCpnt)) + scsi_dma_unmap(SCpnt); twa_free_request_id(tw_dev, request_id); break; case 1: SCpnt->result = (DID_ERROR << 16); - scsi_dma_unmap(SCpnt); + if (twa_command_mapped(SCpnt)) + scsi_dma_unmap(SCpnt); done(SCpnt); tw_dev->state[request_id] = TW_S_COMPLETED; twa_free_request_id(tw_dev, request_id); @@ -1831,8 +1846,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, /* Map sglist from scsi layer to cmd packet */ if (scsi_sg_count(srb)) { - if ((scsi_sg_count(srb) == 1) && - (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) { + if (!twa_command_mapped(srb)) { if (srb->sc_data_direction == DMA_TO_DEVICE || srb->sc_data_direction == DMA_BIDIRECTIONAL) scsi_sg_copy_to_buffer(srb, @@ -1905,7 +1919,7 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re { struct scsi_cmnd *cmd = tw_dev->srb[request_id]; - if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH && + if (!twa_command_mapped(cmd) && (cmd->sc_data_direction == DMA_FROM_DEVICE || cmd->sc_data_direction == DMA_BIDIRECTIONAL)) { if (scsi_sg_count(cmd) == 1) { diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index de6feb8964c9..804806e1cbb4 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -160,7 +160,7 @@ static struct scsi_transport_template *cxgb4i_stt; #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) #define RCV_BUFSIZ_MASK 0x3FFU -#define MAX_IMM_TX_PKT_LEN 128 +#define MAX_IMM_TX_PKT_LEN 256 static int push_tx_frames(struct cxgbi_sock *, int); diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 33c74d3436c9..6bffd91b973a 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -976,13 +976,13 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) wake_up(&conn->ehwait); } -static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) +static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) { struct iscsi_nopout hdr; struct iscsi_task *task; if (!rhdr && conn->ping_task) - return; + return -EINVAL; memset(&hdr, 0, sizeof(struct iscsi_nopout)); hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE; @@ -996,13 +996,16 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) hdr.ttt = RESERVED_ITT; task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); - if (!task) + if (!task) { iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n"); - else if (!rhdr) { + return -EIO; + } else if (!rhdr) { /* only track our nops */ conn->ping_task = task; conn->last_ping = jiffies; } + + return 0; } static int iscsi_nop_out_rsp(struct iscsi_task *task, @@ -2092,8 +2095,10 @@ static void iscsi_check_transport_timeouts(unsigned long data) if (time_before_eq(last_recv + recv_timeout, jiffies)) { /* send a ping to try to provoke some traffic */ ISCSI_DBG_CONN(conn, "Sending nopout as ping\n"); - iscsi_send_nopout(conn, NULL); - next_timeout = conn->last_ping + (conn->ping_timeout * HZ); + if (iscsi_send_nopout(conn, NULL)) + next_timeout = jiffies + (1 * HZ); + else + next_timeout = conn->last_ping + (conn->ping_timeout * HZ); } else next_timeout = last_recv + recv_timeout; diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index edb044a7b56d..0a2168e69bbc 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c @@ -111,7 +111,7 @@ static struct scsi_device_handler *scsi_dh_lookup(const char *name) dh = __scsi_dh_lookup(name); if (!dh) { - request_module(name); + request_module("scsi_dh_%s", name); dh = __scsi_dh_lookup(name); } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index cbfc5990052b..126a48c6431e 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1957,7 +1957,7 @@ static int scsi_mq_prep_fn(struct request *req) static void scsi_mq_done(struct scsi_cmnd *cmd) { trace_scsi_dispatch_cmd_done(cmd); - blk_mq_complete_request(cmd->request); + blk_mq_complete_request(cmd->request, cmd->request->errors); } static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index 3cf9faa6cc3f..a85d863d4a44 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -992,11 +992,12 @@ static int davinci_spi_probe(struct platform_device *pdev) goto free_master; } - dspi->irq = platform_get_irq(pdev, 0); - if (dspi->irq <= 0) { + ret = platform_get_irq(pdev, 0); + if (ret == 0) ret = -EINVAL; + if (ret < 0) goto free_master; - } + dspi->irq = ret; ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq, dummy_thread_fn, 0, dev_name(&pdev->dev), dspi); diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c index 769b61193d87..a9bc6e23fc25 100644 --- a/drivers/staging/lustre/lustre/llite/dir.c +++ b/drivers/staging/lustre/lustre/llite/dir.c @@ -224,7 +224,7 @@ static int ll_dir_filler(void *_hash, struct page *page0) prefetchw(&page->flags); ret = add_to_page_cache_lru(page, inode->i_mapping, offset, - GFP_KERNEL); + GFP_NOFS); if (ret == 0) { unlock_page(page); } else { diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c index 4299cf45f947..5e1f16c36b49 100644 --- a/drivers/staging/speakup/fakekey.c +++ b/drivers/staging/speakup/fakekey.c @@ -81,6 +81,7 @@ void speakup_fake_down_arrow(void) __this_cpu_write(reporting_keystroke, true); input_report_key(virt_keyboard, KEY_DOWN, PRESSED); input_report_key(virt_keyboard, KEY_DOWN, RELEASED); + input_sync(virt_keyboard); __this_cpu_write(reporting_keystroke, false); /* reenable preemption */ diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 20932cc9c8f7..b09023b07169 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -343,8 +343,7 @@ static void n_tty_packet_mode_flush(struct tty_struct *tty) spin_lock_irqsave(&tty->ctrl_lock, flags); tty->ctrl_status |= TIOCPKT_FLUSHREAD; spin_unlock_irqrestore(&tty->ctrl_lock, flags); - if (waitqueue_active(&tty->link->read_wait)) - wake_up_interruptible(&tty->link->read_wait); + wake_up_interruptible(&tty->link->read_wait); } } @@ -1382,8 +1381,7 @@ handle_newline: put_tty_queue(c, ldata); smp_store_release(&ldata->canon_head, ldata->read_head); kill_fasync(&tty->fasync, SIGIO, POLL_IN); - if (waitqueue_active(&tty->read_wait)) - wake_up_interruptible_poll(&tty->read_wait, POLLIN); + wake_up_interruptible_poll(&tty->read_wait, POLLIN); return 0; } } @@ -1667,8 +1665,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp, if ((read_cnt(ldata) >= ldata->minimum_to_wake) || L_EXTPROC(tty)) { kill_fasync(&tty->fasync, SIGIO, POLL_IN); - if (waitqueue_active(&tty->read_wait)) - wake_up_interruptible_poll(&tty->read_wait, POLLIN); + wake_up_interruptible_poll(&tty->read_wait, POLLIN); } } @@ -1887,10 +1884,8 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) } /* The termios change make the tty ready for I/O */ - if (waitqueue_active(&tty->write_wait)) - wake_up_interruptible(&tty->write_wait); - if (waitqueue_active(&tty->read_wait)) - wake_up_interruptible(&tty->read_wait); + wake_up_interruptible(&tty->write_wait); + wake_up_interruptible(&tty->read_wait); } /** diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index b1e0ba3e525b..0bbf34035d6a 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -261,6 +261,14 @@ configured less than Maximum supported fifo bytes */ UART_FCR7_64BYTE, .flags = UART_CAP_FIFO, }, + [PORT_RT2880] = { + .name = "Palmchip BK-3103", + .fifo_size = 16, + .tx_loadsz = 16, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, + .rxtrig_bytes = {1, 4, 8, 14}, + .flags = UART_CAP_FIFO, + }, }; /* Uart divisor latch read */ diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 5ca5cf3e9359..538ea03bc101 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -2786,7 +2786,7 @@ static int atmel_serial_probe(struct platform_device *pdev) ret = atmel_init_gpios(port, &pdev->dev); if (ret < 0) { dev_err(&pdev->dev, "Failed to initialize GPIOs."); - goto err; + goto err_clear_bit; } ret = atmel_init_port(port, pdev); diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index fe3d41cc8416..d0388a071ba1 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -1631,12 +1631,12 @@ imx_console_write(struct console *co, const char *s, unsigned int count) int locked = 1; int retval; - retval = clk_prepare_enable(sport->clk_per); + retval = clk_enable(sport->clk_per); if (retval) return; - retval = clk_prepare_enable(sport->clk_ipg); + retval = clk_enable(sport->clk_ipg); if (retval) { - clk_disable_unprepare(sport->clk_per); + clk_disable(sport->clk_per); return; } @@ -1675,8 +1675,8 @@ imx_console_write(struct console *co, const char *s, unsigned int count) if (locked) spin_unlock_irqrestore(&sport->port.lock, flags); - clk_disable_unprepare(sport->clk_ipg); - clk_disable_unprepare(sport->clk_per); + clk_disable(sport->clk_ipg); + clk_disable(sport->clk_per); } /* @@ -1777,7 +1777,15 @@ imx_console_setup(struct console *co, char *options) retval = uart_set_options(&sport->port, co, baud, parity, bits, flow); - clk_disable_unprepare(sport->clk_ipg); + clk_disable(sport->clk_ipg); + if (retval) { + clk_unprepare(sport->clk_ipg); + goto error_console; + } + + retval = clk_prepare(sport->clk_per); + if (retval) + clk_disable_unprepare(sport->clk_ipg); error_console: return retval; diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c index 5a3fa8913880..a660ab181cca 100644 --- a/drivers/tty/tty_buffer.c +++ b/drivers/tty/tty_buffer.c @@ -242,7 +242,10 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld) atomic_inc(&buf->priority); mutex_lock(&buf->lock); - while ((next = buf->head->next) != NULL) { + /* paired w/ release in __tty_buffer_request_room; ensures there are + * no pending memory accesses to the freed buffer + */ + while ((next = smp_load_acquire(&buf->head->next)) != NULL) { tty_buffer_free(port, buf->head); buf->head = next; } @@ -290,7 +293,10 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size, if (n != NULL) { n->flags = flags; buf->tail = n; - b->commit = b->used; + /* paired w/ acquire in flush_to_ldisc(); ensures + * flush_to_ldisc() sees buffer data. + */ + smp_store_release(&b->commit, b->used); /* paired w/ acquire in flush_to_ldisc(); ensures the * latest commit value can be read before the head is * advanced to the next buffer @@ -393,7 +399,10 @@ void tty_schedule_flip(struct tty_port *port) { struct tty_bufhead *buf = &port->buf; - buf->tail->commit = buf->tail->used; + /* paired w/ acquire in flush_to_ldisc(); ensures + * flush_to_ldisc() sees buffer data. + */ + smp_store_release(&buf->tail->commit, buf->tail->used); schedule_work(&buf->work); } EXPORT_SYMBOL(tty_schedule_flip); @@ -467,7 +476,7 @@ static void flush_to_ldisc(struct work_struct *work) struct tty_struct *tty; struct tty_ldisc *disc; - tty = port->itty; + tty = READ_ONCE(port->itty); if (tty == NULL) return; @@ -491,7 +500,10 @@ static void flush_to_ldisc(struct work_struct *work) * is advancing to the next buffer */ next = smp_load_acquire(&head->next); - count = head->commit - head->read; + /* paired w/ release in __tty_buffer_request_room() or in + * tty_buffer_flush(); ensures we see the committed buffer data + */ + count = smp_load_acquire(&head->commit) - head->read; if (!count) { if (next == NULL) { check_other_closed(tty); diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 02785d844354..2eefaa6e3e3a 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -2128,8 +2128,24 @@ retry_open: if (!noctty && current->signal->leader && !current->signal->tty && - tty->session == NULL) - __proc_set_tty(tty); + tty->session == NULL) { + /* + * Don't let a process that only has write access to the tty + * obtain the privileges associated with having a tty as + * controlling terminal (being able to reopen it with full + * access through /dev/tty, being able to perform pushback). + * Many distributions set the group of all ttys to "tty" and + * grant write-only access to all terminals for setgid tty + * binaries, which should not imply full privileges on all ttys. + * + * This could theoretically break old code that performs open() + * on a write-only file descriptor. In that case, it might be + * necessary to also permit this if + * inode_permission(inode, MAY_READ) == 0. + */ + if (filp->f_mode & FMODE_READ) + __proc_set_tty(tty); + } spin_unlock_irq(¤t->sighand->siglock); read_unlock(&tasklist_lock); tty_unlock(tty); @@ -2418,7 +2434,7 @@ static int fionbio(struct file *file, int __user *p) * Takes ->siglock() when updating signal->tty */ -static int tiocsctty(struct tty_struct *tty, int arg) +static int tiocsctty(struct tty_struct *tty, struct file *file, int arg) { int ret = 0; @@ -2452,6 +2468,13 @@ static int tiocsctty(struct tty_struct *tty, int arg) goto unlock; } } + + /* See the comment in tty_open(). */ + if ((file->f_mode & FMODE_READ) == 0 && !capable(CAP_SYS_ADMIN)) { + ret = -EPERM; + goto unlock; + } + proc_set_tty(tty); unlock: read_unlock(&tasklist_lock); @@ -2844,7 +2867,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg) no_tty(); return 0; case TIOCSCTTY: - return tiocsctty(tty, arg); + return tiocsctty(tty, file, arg); case TIOCGPGRP: return tiocgpgrp(tty, real_tty, p); case TIOCSPGRP: @@ -3151,13 +3174,18 @@ struct class *tty_class; static int tty_cdev_add(struct tty_driver *driver, dev_t dev, unsigned int index, unsigned int count) { + int err; + /* init here, since reused cdevs cause crashes */ driver->cdevs[index] = cdev_alloc(); if (!driver->cdevs[index]) return -ENOMEM; - cdev_init(driver->cdevs[index], &tty_fops); + driver->cdevs[index]->ops = &tty_fops; driver->cdevs[index]->owner = driver->owner; - return cdev_add(driver->cdevs[index], dev, count); + err = cdev_add(driver->cdevs[index], dev, count); + if (err) + kobject_put(&driver->cdevs[index]->kobj); + return err; } /** diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index d85abfed84cc..f5a381945db2 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -54,6 +54,13 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Logitech ConferenceCam CC3000e */ + { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x046d, 0x0848), .driver_info = USB_QUIRK_DELAY_INIT }, + + /* Logitech PTZ Pro Camera */ + { USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Logitech Quickcam Fusion */ { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -78,6 +85,12 @@ static const struct usb_device_id usb_quirk_list[] = { /* Philips PSC805 audio device */ { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Plantronic Audio 655 DSP */ + { USB_DEVICE(0x047f, 0xc008), .driver_info = USB_QUIRK_RESET_RESUME }, + + /* Plantronic Audio 648 USB */ + { USB_DEVICE(0x047f, 0xc013), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Artisman Watchdog Dongle */ { USB_DEVICE(0x04b4, 0x0526), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c index d1b81539d632..d6199507f861 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_ep.c +++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c @@ -159,8 +159,10 @@ static int ep_bd_list_alloc(struct bdc_ep *ep) bd_table->start_bd = dma_pool_alloc(bdc->bd_table_pool, GFP_ATOMIC, &dma); - if (!bd_table->start_bd) + if (!bd_table->start_bd) { + kfree(bd_table); goto fail; + } bd_table->dma = dma; diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c index 3ad5d19e4d04..23c794813e6a 100644 --- a/drivers/usb/misc/chaoskey.c +++ b/drivers/usb/misc/chaoskey.c @@ -472,7 +472,7 @@ static int chaoskey_rng_read(struct hwrng *rng, void *data, if (this_time > max) this_time = max; - memcpy(data, dev->buf, this_time); + memcpy(data, dev->buf + dev->used, this_time); dev->used += this_time; diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index 7b98e1d9194c..d82fa36c3465 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c @@ -476,6 +476,11 @@ static const struct of_device_id usbhs_of_match[] = { .compatible = "renesas,usbhs-r8a7794", .data = (void *)USBHS_TYPE_RCAR_GEN2, }, + { + /* Gen3 is compatible with Gen2 */ + .compatible = "renesas,usbhs-r8a7795", + .data = (void *)USBHS_TYPE_RCAR_GEN2, + }, { }, }; MODULE_DEVICE_TABLE(of, usbhs_of_match); @@ -493,7 +498,7 @@ static struct renesas_usbhs_platform_info *usbhs_parse_dt(struct device *dev) return NULL; dparam = &info->driver_param; - dparam->type = of_id ? (u32)of_id->data : 0; + dparam->type = of_id ? (uintptr_t)of_id->data : 0; if (!of_property_read_u32(dev->of_node, "renesas,buswait", &tmp)) dparam->buswait_bwait = tmp; gpio = of_get_named_gpio_flags(dev->of_node, "renesas,enable-gpio", 0, diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c index 0e5fde1d3ffb..9f9a7bef1ff6 100644 --- a/drivers/video/fbdev/broadsheetfb.c +++ b/drivers/video/fbdev/broadsheetfb.c @@ -752,7 +752,7 @@ static ssize_t broadsheet_loadstore_waveform(struct device *dev, if ((fw_entry->size < 8*1024) || (fw_entry->size > 64*1024)) { dev_err(dev, "Invalid waveform\n"); err = -EINVAL; - goto err_failed; + goto err_fw; } mutex_lock(&(par->io_lock)); @@ -762,13 +762,15 @@ static ssize_t broadsheet_loadstore_waveform(struct device *dev, mutex_unlock(&(par->io_lock)); if (err < 0) { dev_err(dev, "Failed to store broadsheet waveform\n"); - goto err_failed; + goto err_fw; } dev_info(dev, "Stored broadsheet waveform, size %zd\n", fw_entry->size); - return len; + err = len; +err_fw: + release_firmware(fw_entry); err_failed: return err; } diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c index 7fa2e6f9e322..b335c1ae8625 100644 --- a/drivers/video/fbdev/fsl-diu-fb.c +++ b/drivers/video/fbdev/fsl-diu-fb.c @@ -1628,9 +1628,16 @@ static int fsl_diu_suspend(struct platform_device *ofdev, pm_message_t state) static int fsl_diu_resume(struct platform_device *ofdev) { struct fsl_diu_data *data; + unsigned int i; data = dev_get_drvdata(&ofdev->dev); - enable_lcdc(data->fsl_diu_info); + + fsl_diu_enable_interrupts(data); + update_lcdc(data->fsl_diu_info); + for (i = 0; i < NUM_AOIS; i++) { + if (data->mfb[i].count) + fsl_diu_enable_panel(&data->fsl_diu_info[i]); + } return 0; } diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c index 9b8bebdf8f86..f9ec5c0484fa 100644 --- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c +++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c @@ -831,6 +831,7 @@ static struct of_device_id of_platform_mb862xx_tbl[] = { { .compatible = "fujitsu,coral", }, { /* end */ } }; +MODULE_DEVICE_TABLE(of, of_platform_mb862xx_tbl); static struct platform_driver of_platform_mb862xxfb_driver = { .driver = { diff --git a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c index a8ce920fa797..d811e6dcaef7 100644 --- a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c +++ b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c @@ -294,7 +294,7 @@ static int dvic_probe_of(struct platform_device *pdev) adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0); if (adapter_node) { - adapter = of_find_i2c_adapter_by_node(adapter_node); + adapter = of_get_i2c_adapter_by_node(adapter_node); if (adapter == NULL) { dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n"); omap_dss_put_device(ddata->in); diff --git a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c index 90cbc4c3406c..c581231c74a5 100644 --- a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c +++ b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c @@ -898,6 +898,7 @@ static const struct of_device_id acx565akm_of_match[] = { { .compatible = "omapdss,sony,acx565akm", }, {}, }; +MODULE_DEVICE_TABLE(of, acx565akm_of_match); static struct spi_driver acx565akm_driver = { .driver = { diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c index 7ed9a227f5ea..01b43e9ce941 100644 --- a/drivers/video/fbdev/tridentfb.c +++ b/drivers/video/fbdev/tridentfb.c @@ -226,7 +226,7 @@ static void blade_image_blit(struct tridentfb_par *par, const char *data, writemmr(par, DST1, point(x, y)); writemmr(par, DST2, point(x + w - 1, y + h - 1)); - memcpy(par->io_virt + 0x10000, data, 4 * size); + iowrite32_rep(par->io_virt + 0x10000, data, size); } static void blade_copy_rect(struct tridentfb_par *par, @@ -673,8 +673,14 @@ static int get_nativex(struct tridentfb_par *par) static inline void set_lwidth(struct tridentfb_par *par, int width) { write3X4(par, VGA_CRTC_OFFSET, width & 0xFF); - write3X4(par, AddColReg, - (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4)); + /* chips older than TGUI9660 have only 1 width bit in AddColReg */ + /* touching the other one breaks I2C/DDC */ + if (par->chip_id == TGUI9440 || par->chip_id == CYBER9320) + write3X4(par, AddColReg, + (read3X4(par, AddColReg) & 0xEF) | ((width & 0x100) >> 4)); + else + write3X4(par, AddColReg, + (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4)); } /* For resolutions smaller than FP resolution stretch */ diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c index 32d8275e4c88..8a1076beecd3 100644 --- a/drivers/video/of_display_timing.c +++ b/drivers/video/of_display_timing.c @@ -210,6 +210,7 @@ struct display_timings *of_get_display_timings(struct device_node *np) */ pr_err("%s: error in timing %d\n", of_node_full_name(np), disp->num_timings + 1); + kfree(dt); goto timingfail; } |