diff options
Diffstat (limited to 'drivers')
387 files changed, 54720 insertions, 5213 deletions
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig index 8b4221cfd118..380a2003231e 100644 --- a/drivers/bcma/Kconfig +++ b/drivers/bcma/Kconfig @@ -26,6 +26,7 @@ config BCMA_HOST_PCI_POSSIBLE config BCMA_HOST_PCI bool "Support for BCMA on PCI-host bus" depends on BCMA_HOST_PCI_POSSIBLE + default y config BCMA_DRIVER_PCI_HOSTMODE bool "Driver for PCI core working in hostmode" diff --git a/drivers/bcma/core.c b/drivers/bcma/core.c index 17b26ce7e051..37a5ffe673d5 100644 --- a/drivers/bcma/core.c +++ b/drivers/bcma/core.c @@ -9,6 +9,25 @@ #include <linux/export.h> #include <linux/bcma/bcma.h> +static bool bcma_core_wait_value(struct bcma_device *core, u16 reg, u32 mask, + u32 value, int timeout) +{ + unsigned long deadline = jiffies + timeout; + u32 val; + + do { + val = bcma_aread32(core, reg); + if ((val & mask) == value) + return true; + cpu_relax(); + udelay(10); + } while (!time_after_eq(jiffies, deadline)); + + bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg); + + return false; +} + bool bcma_core_is_enabled(struct bcma_device *core) { if ((bcma_aread32(core, BCMA_IOCTL) & (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC)) @@ -25,13 +44,15 @@ void bcma_core_disable(struct bcma_device *core, u32 flags) if (bcma_aread32(core, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET) return; - bcma_awrite32(core, BCMA_IOCTL, flags); - bcma_aread32(core, BCMA_IOCTL); - udelay(10); + bcma_core_wait_value(core, BCMA_RESET_ST, ~0, 0, 300); bcma_awrite32(core, BCMA_RESET_CTL, BCMA_RESET_CTL_RESET); bcma_aread32(core, BCMA_RESET_CTL); udelay(1); + + bcma_awrite32(core, BCMA_IOCTL, flags); + bcma_aread32(core, BCMA_IOCTL); + udelay(10); } EXPORT_SYMBOL_GPL(bcma_core_disable); @@ -43,6 +64,7 @@ int bcma_core_enable(struct bcma_device *core, u32 flags) bcma_aread32(core, BCMA_IOCTL); bcma_awrite32(core, BCMA_RESET_CTL, 0); + bcma_aread32(core, BCMA_RESET_CTL); udelay(1); bcma_awrite32(core, BCMA_IOCTL, (BCMA_IOCTL_CLK | flags)); diff --git a/drivers/bcma/driver_chipcommon_sflash.c b/drivers/bcma/driver_chipcommon_sflash.c index e6ed4fe5dced..4d07cce9c5d9 100644 --- a/drivers/bcma/driver_chipcommon_sflash.c +++ b/drivers/bcma/driver_chipcommon_sflash.c @@ -30,7 +30,7 @@ struct bcma_sflash_tbl_e { u16 numblocks; }; -static struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = { +static const struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = { { "M25P20", 0x11, 0x10000, 4, }, { "M25P40", 0x12, 0x10000, 8, }, @@ -41,7 +41,7 @@ static struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = { { 0 }, }; -static struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = { +static const struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = { { "SST25WF512", 1, 0x1000, 16, }, { "SST25VF512", 0x48, 0x1000, 16, }, { "SST25WF010", 2, 0x1000, 32, }, @@ -59,7 +59,7 @@ static struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = { { 0 }, }; -static struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = { +static const struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = { { "AT45DB011", 0xc, 256, 512, }, { "AT45DB021", 0x14, 256, 1024, }, { "AT45DB041", 0x1c, 256, 2048, }, @@ -89,7 +89,7 @@ int bcma_sflash_init(struct bcma_drv_cc *cc) { struct bcma_bus *bus = cc->core->bus; struct bcma_sflash *sflash = &cc->sflash; - struct bcma_sflash_tbl_e *e; + const struct bcma_sflash_tbl_e *e; u32 id, id2; switch (cc->capabilities & BCMA_CC_CAP_FLASHT) { diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c index 8934298a638d..de15b4f4b237 100644 --- a/drivers/bcma/sprom.c +++ b/drivers/bcma/sprom.c @@ -72,12 +72,12 @@ fail: * R/W ops. **************************************************/ -static void bcma_sprom_read(struct bcma_bus *bus, u16 offset, u16 *sprom) +static void bcma_sprom_read(struct bcma_bus *bus, u16 offset, u16 *sprom, + size_t words) { int i; - for (i = 0; i < SSB_SPROMSIZE_WORDS_R4; i++) - sprom[i] = bcma_read16(bus->drv_cc.core, - offset + (i * 2)); + for (i = 0; i < words; i++) + sprom[i] = bcma_read16(bus->drv_cc.core, offset + (i * 2)); } /************************************************** @@ -124,29 +124,29 @@ static inline u8 bcma_crc8(u8 crc, u8 data) return t[crc ^ data]; } -static u8 bcma_sprom_crc(const u16 *sprom) +static u8 bcma_sprom_crc(const u16 *sprom, size_t words) { int word; u8 crc = 0xFF; - for (word = 0; word < SSB_SPROMSIZE_WORDS_R4 - 1; word++) { + for (word = 0; word < words - 1; word++) { crc = bcma_crc8(crc, sprom[word] & 0x00FF); crc = bcma_crc8(crc, (sprom[word] & 0xFF00) >> 8); } - crc = bcma_crc8(crc, sprom[SSB_SPROMSIZE_WORDS_R4 - 1] & 0x00FF); + crc = bcma_crc8(crc, sprom[words - 1] & 0x00FF); crc ^= 0xFF; return crc; } -static int bcma_sprom_check_crc(const u16 *sprom) +static int bcma_sprom_check_crc(const u16 *sprom, size_t words) { u8 crc; u8 expected_crc; u16 tmp; - crc = bcma_sprom_crc(sprom); - tmp = sprom[SSB_SPROMSIZE_WORDS_R4 - 1] & SSB_SPROM_REVISION_CRC; + crc = bcma_sprom_crc(sprom, words); + tmp = sprom[words - 1] & SSB_SPROM_REVISION_CRC; expected_crc = tmp >> SSB_SPROM_REVISION_CRC_SHIFT; if (crc != expected_crc) return -EPROTO; @@ -154,21 +154,25 @@ static int bcma_sprom_check_crc(const u16 *sprom) return 0; } -static int bcma_sprom_valid(const u16 *sprom) +static int bcma_sprom_valid(struct bcma_bus *bus, const u16 *sprom, + size_t words) { u16 revision; int err; - err = bcma_sprom_check_crc(sprom); + err = bcma_sprom_check_crc(sprom, words); if (err) return err; - revision = sprom[SSB_SPROMSIZE_WORDS_R4 - 1] & SSB_SPROM_REVISION_REV; - if (revision != 8 && revision != 9) { + revision = sprom[words - 1] & SSB_SPROM_REVISION_REV; + if (revision != 8 && revision != 9 && revision != 10) { pr_err("Unsupported SPROM revision: %d\n", revision); return -ENOENT; } + bus->sprom.revision = revision; + bcma_debug(bus, "Found SPROM revision %d\n", revision); + return 0; } @@ -208,9 +212,6 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom) BUILD_BUG_ON(ARRAY_SIZE(pwr_info_offset) != ARRAY_SIZE(bus->sprom.core_pwr_info)); - bus->sprom.revision = sprom[SSB_SPROMSIZE_WORDS_R4 - 1] & - SSB_SPROM_REVISION_REV; - for (i = 0; i < 3; i++) { v = sprom[SPOFF(SSB_SPROM8_IL0MAC) + i]; *(((__be16 *)bus->sprom.il0mac) + i) = cpu_to_be16(v); @@ -502,7 +503,6 @@ static bool bcma_sprom_onchip_available(struct bcma_bus *bus) case BCMA_CHIP_ID_BCM4331: present = chip_status & BCMA_CC_CHIPST_4331_OTP_PRESENT; break; - case BCMA_CHIP_ID_BCM43224: case BCMA_CHIP_ID_BCM43225: /* for these chips OTP is always available */ @@ -550,7 +550,9 @@ int bcma_sprom_get(struct bcma_bus *bus) { u16 offset = BCMA_CC_SPROM; u16 *sprom; - int err = 0; + size_t sprom_sizes[] = { SSB_SPROMSIZE_WORDS_R4, + SSB_SPROMSIZE_WORDS_R10, }; + int i, err = 0; if (!bus->drv_cc.core) return -EOPNOTSUPP; @@ -579,32 +581,37 @@ int bcma_sprom_get(struct bcma_bus *bus) } } - sprom = kcalloc(SSB_SPROMSIZE_WORDS_R4, sizeof(u16), - GFP_KERNEL); - if (!sprom) - return -ENOMEM; - if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4331 || bus->chipinfo.id == BCMA_CHIP_ID_BCM43431) bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false); bcma_debug(bus, "SPROM offset 0x%x\n", offset); - bcma_sprom_read(bus, offset, sprom); + for (i = 0; i < ARRAY_SIZE(sprom_sizes); i++) { + size_t words = sprom_sizes[i]; + + sprom = kcalloc(words, sizeof(u16), GFP_KERNEL); + if (!sprom) + return -ENOMEM; + + bcma_sprom_read(bus, offset, sprom, words); + err = bcma_sprom_valid(bus, sprom, words); + if (!err) + break; + + kfree(sprom); + } if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4331 || bus->chipinfo.id == BCMA_CHIP_ID_BCM43431) bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true); - err = bcma_sprom_valid(sprom); if (err) { - bcma_warn(bus, "invalid sprom read from the PCIe card, try to use fallback sprom\n"); + bcma_warn(bus, "Invalid SPROM read from the PCIe card, trying to use fallback SPROM\n"); err = bcma_fill_sprom_with_fallback(bus, &bus->sprom); - goto out; + } else { + bcma_sprom_extract_r8(bus, sprom); + kfree(sprom); } - bcma_sprom_extract_r8(bus, sprom); - -out: - kfree(sprom); return err; } diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index 13693b7a0d5c..75c262694632 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -554,6 +554,7 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv) skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_ATOMIC); if (skb == NULL) { BT_ERR("No free skb"); + ret = -ENOMEM; goto exit; } diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 7a7e5f8ecadc..81f12757a842 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -57,6 +57,9 @@ static struct usb_device_id btusb_table[] = { /* Apple-specific (Broadcom) devices */ { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) }, + /* MediaTek MT76x0E */ + { USB_DEVICE(0x0e8d, 0x763f) }, + /* Broadcom SoftSailing reporting vendor specific */ { USB_DEVICE(0x0a5c, 0x21e1) }, diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 6961bbeab3ed..264f55099940 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1685,6 +1685,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) }, { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) }, { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) }, { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) }, @@ -2341,7 +2342,7 @@ struct hid_device *hid_allocate_device(void) init_waitqueue_head(&hdev->debug_wait); INIT_LIST_HEAD(&hdev->debug_list); - mutex_init(&hdev->debug_list_lock); + spin_lock_init(&hdev->debug_list_lock); sema_init(&hdev->driver_lock, 1); sema_init(&hdev->driver_input_lock, 1); diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 7e56cb3855e3..8453214ec376 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -579,15 +579,16 @@ void hid_debug_event(struct hid_device *hdev, char *buf) { int i; struct hid_debug_list *list; + unsigned long flags; - mutex_lock(&hdev->debug_list_lock); + spin_lock_irqsave(&hdev->debug_list_lock, flags); list_for_each_entry(list, &hdev->debug_list, node) { for (i = 0; i < strlen(buf); i++) list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] = buf[i]; list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE; } - mutex_unlock(&hdev->debug_list_lock); + spin_unlock_irqrestore(&hdev->debug_list_lock, flags); wake_up_interruptible(&hdev->debug_wait); } @@ -977,6 +978,7 @@ static int hid_debug_events_open(struct inode *inode, struct file *file) { int err = 0; struct hid_debug_list *list; + unsigned long flags; if (!(list = kzalloc(sizeof(struct hid_debug_list), GFP_KERNEL))) { err = -ENOMEM; @@ -992,9 +994,9 @@ static int hid_debug_events_open(struct inode *inode, struct file *file) file->private_data = list; mutex_init(&list->read_mutex); - mutex_lock(&list->hdev->debug_list_lock); + spin_lock_irqsave(&list->hdev->debug_list_lock, flags); list_add_tail(&list->node, &list->hdev->debug_list); - mutex_unlock(&list->hdev->debug_list_lock); + spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); out: return err; @@ -1088,10 +1090,11 @@ static unsigned int hid_debug_events_poll(struct file *file, poll_table *wait) static int hid_debug_events_release(struct inode *inode, struct file *file) { struct hid_debug_list *list = file->private_data; + unsigned long flags; - mutex_lock(&list->hdev->debug_list_lock); + spin_lock_irqsave(&list->hdev->debug_list_lock, flags); list_del(&list->node); - mutex_unlock(&list->hdev->debug_list_lock); + spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); kfree(list->hid_debug_buf); kfree(list); diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c index 9b0efb0083fe..d16491192112 100644 --- a/drivers/hid/hid-steelseries.c +++ b/drivers/hid/hid-steelseries.c @@ -18,7 +18,8 @@ #include "hid-ids.h" -#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) +#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \ + (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES)) #define SRWS1_NUMBER_LEDS 15 struct steelseries_srws1_data { __u16 led_state; @@ -107,7 +108,8 @@ static __u8 steelseries_srws1_rdesc_fixed[] = { 0xC0 /* End Collection */ }; -#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) +#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \ + (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES)) static void steelseries_srws1_set_leds(struct hid_device *hdev, __u16 leds) { struct list_head *report_list = &hdev->report_enum[HID_OUTPUT_REPORT].report_list; @@ -370,7 +372,8 @@ MODULE_DEVICE_TABLE(hid, steelseries_srws1_devices); static struct hid_driver steelseries_srws1_driver = { .name = "steelseries_srws1", .id_table = steelseries_srws1_devices, -#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) +#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \ + (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES)) .probe = steelseries_srws1_probe, .remove = steelseries_srws1_remove, #endif diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 0e8fab1913df..fa6964d8681a 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -273,6 +273,27 @@ static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = { .target_residency = 500, .enter = &intel_idle }, { + .name = "C8-HSW", + .desc = "MWAIT 0x40", + .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 300, + .target_residency = 900, + .enter = &intel_idle }, + { + .name = "C9-HSW", + .desc = "MWAIT 0x50", + .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 600, + .target_residency = 1800, + .enter = &intel_idle }, + { + .name = "C10-HSW", + .desc = "MWAIT 0x60", + .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 2600, + .target_residency = 7700, + .enter = &intel_idle }, + { .enter = NULL } }; diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index c6083132c4b8..0387e05cdb98 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -319,6 +319,9 @@ static void __cache_size_refresh(void) static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, enum data_mode *data_mode) { + unsigned noio_flag; + void *ptr; + if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) { *data_mode = DATA_MODE_SLAB; return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); @@ -332,7 +335,26 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, } *data_mode = DATA_MODE_VMALLOC; - return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); + + /* + * __vmalloc allocates the data pages and auxiliary structures with + * gfp_flags that were specified, but pagetables are always allocated + * with GFP_KERNEL, no matter what was specified as gfp_mask. + * + * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that + * all allocations done by this process (including pagetables) are done + * as if GFP_NOIO was specified. + */ + + if (gfp_mask & __GFP_NORETRY) + noio_flag = memalloc_noio_save(); + + ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); + + if (gfp_mask & __GFP_NORETRY) + memalloc_noio_restore(noio_flag); + + return ptr; } /* diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 83e995fece88..1af7255bbffb 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -1044,7 +1044,7 @@ void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd, struct dm_cache_statistics *stats) { down_read(&cmd->root_lock); - memcpy(stats, &cmd->stats, sizeof(*stats)); + *stats = cmd->stats; up_read(&cmd->root_lock); } @@ -1052,7 +1052,7 @@ void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd, struct dm_cache_statistics *stats) { down_write(&cmd->root_lock); - memcpy(&cmd->stats, stats, sizeof(*stats)); + cmd->stats = *stats; up_write(&cmd->root_lock); } diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h index 558bdfdabf5f..33369ca9614f 100644 --- a/drivers/md/dm-cache-policy.h +++ b/drivers/md/dm-cache-policy.h @@ -130,8 +130,8 @@ struct dm_cache_policy { * * Must not block. * - * Returns 1 iff in cache, 0 iff not, < 0 on error (-EWOULDBLOCK - * would be typical). + * Returns 0 if in cache, -ENOENT if not, < 0 for other errors + * (-EWOULDBLOCK would be typical). */ int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock); diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 10744091e6ca..df44b60e66f2 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -205,7 +205,7 @@ struct per_bio_data { /* * writethrough fields. These MUST remain at the end of this * structure and the 'cache' member must be the first as it - * is used to determine the offsetof the writethrough fields. + * is used to determine the offset of the writethrough fields. */ struct cache *cache; dm_cblock_t cblock; @@ -393,7 +393,7 @@ static int get_cell(struct cache *cache, return r; } - /*----------------------------------------------------------------*/ +/*----------------------------------------------------------------*/ static bool is_dirty(struct cache *cache, dm_cblock_t b) { @@ -419,6 +419,7 @@ static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cbl } /*----------------------------------------------------------------*/ + static bool block_size_is_power_of_two(struct cache *cache) { return cache->sectors_per_block_shift >= 0; @@ -667,7 +668,7 @@ static void writethrough_endio(struct bio *bio, int err) /* * We can't issue this bio directly, since we're in interrupt - * context. So it get's put on a bio list for processing by the + * context. So it gets put on a bio list for processing by the * worker thread. */ defer_writethrough_bio(pb->cache, bio); @@ -1445,6 +1446,7 @@ static void do_worker(struct work_struct *ws) static void do_waker(struct work_struct *ws) { struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker); + policy_tick(cache->policy); wake_worker(cache); queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD); } @@ -1809,7 +1811,37 @@ static int parse_cache_args(struct cache_args *ca, int argc, char **argv, static struct kmem_cache *migration_cache; -static int set_config_values(struct dm_cache_policy *p, int argc, const char **argv) +#define NOT_CORE_OPTION 1 + +static int process_config_option(struct cache *cache, const char *key, const char *value) +{ + unsigned long tmp; + + if (!strcasecmp(key, "migration_threshold")) { + if (kstrtoul(value, 10, &tmp)) + return -EINVAL; + + cache->migration_threshold = tmp; + return 0; + } + + return NOT_CORE_OPTION; +} + +static int set_config_value(struct cache *cache, const char *key, const char *value) +{ + int r = process_config_option(cache, key, value); + + if (r == NOT_CORE_OPTION) + r = policy_set_config_value(cache->policy, key, value); + + if (r) + DMWARN("bad config value for %s: %s", key, value); + + return r; +} + +static int set_config_values(struct cache *cache, int argc, const char **argv) { int r = 0; @@ -1819,12 +1851,9 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a } while (argc) { - r = policy_set_config_value(p, argv[0], argv[1]); - if (r) { - DMWARN("policy_set_config_value failed: key = '%s', value = '%s'", - argv[0], argv[1]); - return r; - } + r = set_config_value(cache, argv[0], argv[1]); + if (r) + break; argc -= 2; argv += 2; @@ -1836,8 +1865,6 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a static int create_cache_policy(struct cache *cache, struct cache_args *ca, char **error) { - int r; - cache->policy = dm_cache_policy_create(ca->policy_name, cache->cache_size, cache->origin_sectors, @@ -1847,14 +1874,7 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca, return -ENOMEM; } - r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv); - if (r) { - *error = "Error setting cache policy's config values"; - dm_cache_policy_destroy(cache->policy); - cache->policy = NULL; - } - - return r; + return 0; } /* @@ -1886,7 +1906,7 @@ static sector_t calculate_discard_block_size(sector_t cache_block_size, return discard_block_size; } -#define DEFAULT_MIGRATION_THRESHOLD (2048 * 100) +#define DEFAULT_MIGRATION_THRESHOLD 2048 static int cache_create(struct cache_args *ca, struct cache **result) { @@ -1911,7 +1931,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) ti->discards_supported = true; ti->discard_zeroes_data_unsupported = true; - memcpy(&cache->features, &ca->features, sizeof(cache->features)); + cache->features = ca->features; ti->per_bio_data_size = get_per_bio_data_size(cache); cache->callbacks.congested_fn = cache_is_congested; @@ -1948,7 +1968,15 @@ static int cache_create(struct cache_args *ca, struct cache **result) r = create_cache_policy(cache, ca, error); if (r) goto bad; + cache->policy_nr_args = ca->policy_argc; + cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD; + + r = set_config_values(cache, ca->policy_argc, ca->policy_argv); + if (r) { + *error = "Error setting cache policy's config values"; + goto bad; + } cmd = dm_cache_metadata_open(cache->metadata_dev->bdev, ca->block_size, may_format, @@ -1967,10 +1995,10 @@ static int cache_create(struct cache_args *ca, struct cache **result) INIT_LIST_HEAD(&cache->quiesced_migrations); INIT_LIST_HEAD(&cache->completed_migrations); INIT_LIST_HEAD(&cache->need_commit_migrations); - cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD; atomic_set(&cache->nr_migrations, 0); init_waitqueue_head(&cache->migration_wait); + r = -ENOMEM; cache->nr_dirty = 0; cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); if (!cache->dirty_bitset) { @@ -2517,23 +2545,6 @@ err: DMEMIT("Error"); } -#define NOT_CORE_OPTION 1 - -static int process_config_option(struct cache *cache, char **argv) -{ - unsigned long tmp; - - if (!strcasecmp(argv[0], "migration_threshold")) { - if (kstrtoul(argv[1], 10, &tmp)) - return -EINVAL; - - cache->migration_threshold = tmp; - return 0; - } - - return NOT_CORE_OPTION; -} - /* * Supports <key> <value>. * @@ -2541,17 +2552,12 @@ static int process_config_option(struct cache *cache, char **argv) */ static int cache_message(struct dm_target *ti, unsigned argc, char **argv) { - int r; struct cache *cache = ti->private; if (argc != 2) return -EINVAL; - r = process_config_option(cache, argv); - if (r == NOT_CORE_OPTION) - return policy_set_config_value(cache->policy, argv[0], argv[1]); - - return r; + return set_config_value(cache, argv[0], argv[1]); } static int cache_iterate_devices(struct dm_target *ti, @@ -2609,7 +2615,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) static struct target_type cache_target = { .name = "cache", - .version = {1, 1, 0}, + .version = {1, 1, 1}, .module = THIS_MODULE, .ctr = cache_ctr, .dtr = cache_dtr, diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 51bb81676be3..bdf26f5bd326 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -907,6 +907,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, ti->num_flush_bios = 1; ti->num_discard_bios = 1; + ti->num_write_same_bios = 1; return 0; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index c0e07026a8d1..c434e5aab2df 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1121,6 +1121,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache); if (!s->pending_pool) { ti->error = "Could not allocate mempool for pending exceptions"; + r = -ENOMEM; goto bad_pending_pool; } diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index ea5e878a30b9..d907ca6227ce 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -94,7 +94,7 @@ static int get_stripe(struct dm_target *ti, struct stripe_c *sc, static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct stripe_c *sc; - sector_t width; + sector_t width, tmp_len; uint32_t stripes; uint32_t chunk_size; int r; @@ -116,15 +116,16 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) } width = ti->len; - if (sector_div(width, chunk_size)) { + if (sector_div(width, stripes)) { ti->error = "Target length not divisible by " - "chunk size"; + "number of stripes"; return -EINVAL; } - if (sector_div(width, stripes)) { + tmp_len = width; + if (sector_div(tmp_len, chunk_size)) { ti->error = "Target length not divisible by " - "number of stripes"; + "chunk size"; return -EINVAL; } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index e50dad0c65f4..1ff252ab7d46 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1442,7 +1442,7 @@ static bool dm_table_supports_write_same(struct dm_table *t) return false; if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, device_not_write_same_capable, NULL)) + ti->type->iterate_devices(ti, device_not_write_same_capable, NULL)) return false; } diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 00cee02f8fc9..60bce435f4fa 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -1645,12 +1645,12 @@ int dm_thin_get_highest_mapped_block(struct dm_thin_device *td, return r; } -static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) +static int __resize_space_map(struct dm_space_map *sm, dm_block_t new_count) { int r; dm_block_t old_count; - r = dm_sm_get_nr_blocks(pmd->data_sm, &old_count); + r = dm_sm_get_nr_blocks(sm, &old_count); if (r) return r; @@ -1658,11 +1658,11 @@ static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) return 0; if (new_count < old_count) { - DMERR("cannot reduce size of data device"); + DMERR("cannot reduce size of space map"); return -EINVAL; } - return dm_sm_extend(pmd->data_sm, new_count - old_count); + return dm_sm_extend(sm, new_count - old_count); } int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) @@ -1671,7 +1671,19 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) down_write(&pmd->root_lock); if (!pmd->fail_io) - r = __resize_data_dev(pmd, new_count); + r = __resize_space_map(pmd->data_sm, new_count); + up_write(&pmd->root_lock); + + return r; +} + +int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) +{ + int r = -EINVAL; + + down_write(&pmd->root_lock); + if (!pmd->fail_io) + r = __resize_space_map(pmd->metadata_sm, new_count); up_write(&pmd->root_lock); return r; @@ -1684,3 +1696,17 @@ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd) dm_bm_set_read_only(pmd->bm); up_write(&pmd->root_lock); } + +int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, + dm_block_t threshold, + dm_sm_threshold_fn fn, + void *context) +{ + int r; + + down_write(&pmd->root_lock); + r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context); + up_write(&pmd->root_lock); + + return r; +} diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h index 0cecc3702885..845ebbe589a9 100644 --- a/drivers/md/dm-thin-metadata.h +++ b/drivers/md/dm-thin-metadata.h @@ -8,6 +8,7 @@ #define DM_THIN_METADATA_H #include "persistent-data/dm-block-manager.h" +#include "persistent-data/dm-space-map.h" #define THIN_METADATA_BLOCK_SIZE 4096 @@ -185,6 +186,7 @@ int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result); * blocks would be lost. */ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size); +int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_size); /* * Flicks the underlying block manager into read only mode, so you know @@ -192,6 +194,11 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size); */ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd); +int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, + dm_block_t threshold, + dm_sm_threshold_fn fn, + void *context); + /*----------------------------------------------------------------*/ #endif diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 004ad1652b73..759cffc45cab 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -922,7 +922,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) return r; if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) { - DMWARN("%s: reached low water mark, sending event.", + DMWARN("%s: reached low water mark for data device: sending event.", dm_device_name(pool->pool_md)); spin_lock_irqsave(&pool->lock, flags); pool->low_water_triggered = 1; @@ -1281,6 +1281,10 @@ static void process_bio_fail(struct thin_c *tc, struct bio *bio) bio_io_error(bio); } +/* + * FIXME: should we also commit due to size of transaction, measured in + * metadata blocks? + */ static int need_commit_due_to_time(struct pool *pool) { return jiffies < pool->last_commit_jiffies || @@ -1909,6 +1913,56 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf, return r; } +static void metadata_low_callback(void *context) +{ + struct pool *pool = context; + + DMWARN("%s: reached low water mark for metadata device: sending event.", + dm_device_name(pool->pool_md)); + + dm_table_event(pool->ti->table); +} + +static sector_t get_metadata_dev_size(struct block_device *bdev) +{ + sector_t metadata_dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; + char buffer[BDEVNAME_SIZE]; + + if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) { + DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.", + bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS); + metadata_dev_size = THIN_METADATA_MAX_SECTORS_WARNING; + } + + return metadata_dev_size; +} + +static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev) +{ + sector_t metadata_dev_size = get_metadata_dev_size(bdev); + + sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); + + return metadata_dev_size; +} + +/* + * When a metadata threshold is crossed a dm event is triggered, and + * userland should respond by growing the metadata device. We could let + * userland set the threshold, like we do with the data threshold, but I'm + * not sure they know enough to do this well. + */ +static dm_block_t calc_metadata_threshold(struct pool_c *pt) +{ + /* + * 4M is ample for all ops with the possible exception of thin + * device deletion which is harmless if it fails (just retry the + * delete after you've grown the device). + */ + dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4; + return min((dm_block_t)1024ULL /* 4M */, quarter); +} + /* * thin-pool <metadata dev> <data dev> * <data block size (sectors)> @@ -1931,8 +1985,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) unsigned long block_size; dm_block_t low_water_blocks; struct dm_dev *metadata_dev; - sector_t metadata_dev_size; - char b[BDEVNAME_SIZE]; + fmode_t metadata_mode; /* * FIXME Remove validation from scope of lock. @@ -1944,19 +1997,32 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) r = -EINVAL; goto out_unlock; } + as.argc = argc; as.argv = argv; - r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &metadata_dev); + /* + * Set default pool features. + */ + pool_features_init(&pf); + + dm_consume_args(&as, 4); + r = parse_pool_features(&as, &pf, ti); + if (r) + goto out_unlock; + + metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE); + r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev); if (r) { ti->error = "Error opening metadata block device"; goto out_unlock; } - metadata_dev_size = i_size_read(metadata_dev->bdev->bd_inode) >> SECTOR_SHIFT; - if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) - DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.", - bdevname(metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS); + /* + * Run for the side-effect of possibly issuing a warning if the + * device is too big. + */ + (void) get_metadata_dev_size(metadata_dev->bdev); r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev); if (r) { @@ -1979,16 +2045,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) goto out; } - /* - * Set default pool features. - */ - pool_features_init(&pf); - - dm_consume_args(&as, 4); - r = parse_pool_features(&as, &pf, ti); - if (r) - goto out; - pt = kzalloc(sizeof(*pt), GFP_KERNEL); if (!pt) { r = -ENOMEM; @@ -2040,6 +2096,13 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) } ti->private = pt; + r = dm_pool_register_metadata_threshold(pt->pool->pmd, + calc_metadata_threshold(pt), + metadata_low_callback, + pool); + if (r) + goto out_free_pt; + pt->callbacks.congested_fn = pool_is_congested; dm_table_add_target_callbacks(ti->table, &pt->callbacks); @@ -2079,18 +2142,7 @@ static int pool_map(struct dm_target *ti, struct bio *bio) return r; } -/* - * Retrieves the number of blocks of the data device from - * the superblock and compares it to the actual device size, - * thus resizing the data device in case it has grown. - * - * This both copes with opening preallocated data devices in the ctr - * being followed by a resume - * -and- - * calling the resume method individually after userspace has - * grown the data device in reaction to a table event. - */ -static int pool_preresume(struct dm_target *ti) +static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit) { int r; struct pool_c *pt = ti->private; @@ -2098,12 +2150,7 @@ static int pool_preresume(struct dm_target *ti) sector_t data_size = ti->len; dm_block_t sb_data_size; - /* - * Take control of the pool object. - */ - r = bind_control_target(pool, ti); - if (r) - return r; + *need_commit = false; (void) sector_div(data_size, pool->sectors_per_block); @@ -2114,7 +2161,7 @@ static int pool_preresume(struct dm_target *ti) } if (data_size < sb_data_size) { - DMERR("pool target too small, is %llu blocks (expected %llu)", + DMERR("pool target (%llu blocks) too small: expected %llu", (unsigned long long)data_size, sb_data_size); return -EINVAL; @@ -2122,17 +2169,90 @@ static int pool_preresume(struct dm_target *ti) r = dm_pool_resize_data_dev(pool->pmd, data_size); if (r) { DMERR("failed to resize data device"); - /* FIXME Stricter than necessary: Rollback transaction instead here */ set_pool_mode(pool, PM_READ_ONLY); return r; } - (void) commit_or_fallback(pool); + *need_commit = true; } return 0; } +static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit) +{ + int r; + struct pool_c *pt = ti->private; + struct pool *pool = pt->pool; + dm_block_t metadata_dev_size, sb_metadata_dev_size; + + *need_commit = false; + + metadata_dev_size = get_metadata_dev_size(pool->md_dev); + + r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size); + if (r) { + DMERR("failed to retrieve data device size"); + return r; + } + + if (metadata_dev_size < sb_metadata_dev_size) { + DMERR("metadata device (%llu sectors) too small: expected %llu", + metadata_dev_size, sb_metadata_dev_size); + return -EINVAL; + + } else if (metadata_dev_size > sb_metadata_dev_size) { + r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size); + if (r) { + DMERR("failed to resize metadata device"); + return r; + } + + *need_commit = true; + } + + return 0; +} + +/* + * Retrieves the number of blocks of the data device from + * the superblock and compares it to the actual device size, + * thus resizing the data device in case it has grown. + * + * This both copes with opening preallocated data devices in the ctr + * being followed by a resume + * -and- + * calling the resume method individually after userspace has + * grown the data device in reaction to a table event. + */ +static int pool_preresume(struct dm_target *ti) +{ + int r; + bool need_commit1, need_commit2; + struct pool_c *pt = ti->private; + struct pool *pool = pt->pool; + + /* + * Take control of the pool object. + */ + r = bind_control_target(pool, ti); + if (r) + return r; + + r = maybe_resize_data_dev(ti, &need_commit1); + if (r) + return r; + + r = maybe_resize_metadata_dev(ti, &need_commit2); + if (r) + return r; + + if (need_commit1 || need_commit2) + (void) commit_or_fallback(pool); + + return 0; +} + static void pool_resume(struct dm_target *ti) { struct pool_c *pt = ti->private; @@ -2549,7 +2669,7 @@ static struct target_type pool_target = { .name = "thin-pool", .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | DM_TARGET_IMMUTABLE, - .version = {1, 7, 0}, + .version = {1, 8, 0}, .module = THIS_MODULE, .ctr = pool_ctr, .dtr = pool_dtr, diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c index f6d29e614ab7..e735a6d5a793 100644 --- a/drivers/md/persistent-data/dm-space-map-disk.c +++ b/drivers/md/persistent-data/dm-space-map-disk.c @@ -248,7 +248,8 @@ static struct dm_space_map ops = { .new_block = sm_disk_new_block, .commit = sm_disk_commit, .root_size = sm_disk_root_size, - .copy_root = sm_disk_copy_root + .copy_root = sm_disk_copy_root, + .register_threshold_callback = NULL }; struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm, diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c index 906cf3df71af..1c959684caef 100644 --- a/drivers/md/persistent-data/dm-space-map-metadata.c +++ b/drivers/md/persistent-data/dm-space-map-metadata.c @@ -17,6 +17,55 @@ /*----------------------------------------------------------------*/ /* + * An edge triggered threshold. + */ +struct threshold { + bool threshold_set; + bool value_set; + dm_block_t threshold; + dm_block_t current_value; + dm_sm_threshold_fn fn; + void *context; +}; + +static void threshold_init(struct threshold *t) +{ + t->threshold_set = false; + t->value_set = false; +} + +static void set_threshold(struct threshold *t, dm_block_t value, + dm_sm_threshold_fn fn, void *context) +{ + t->threshold_set = true; + t->threshold = value; + t->fn = fn; + t->context = context; +} + +static bool below_threshold(struct threshold *t, dm_block_t value) +{ + return t->threshold_set && value <= t->threshold; +} + +static bool threshold_already_triggered(struct threshold *t) +{ + return t->value_set && below_threshold(t, t->current_value); +} + +static void check_threshold(struct threshold *t, dm_block_t value) +{ + if (below_threshold(t, value) && + !threshold_already_triggered(t)) + t->fn(t->context); + + t->value_set = true; + t->current_value = value; +} + +/*----------------------------------------------------------------*/ + +/* * Space map interface. * * The low level disk format is written using the standard btree and @@ -54,6 +103,8 @@ struct sm_metadata { unsigned allocated_this_transaction; unsigned nr_uncommitted; struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS]; + + struct threshold threshold; }; static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b) @@ -144,12 +195,6 @@ static void sm_metadata_destroy(struct dm_space_map *sm) kfree(smm); } -static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks) -{ - DMERR("doesn't support extend"); - return -EINVAL; -} - static int sm_metadata_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count) { struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); @@ -335,9 +380,19 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b) static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b) { + dm_block_t count; + struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); + int r = sm_metadata_new_block_(sm, b); if (r) DMERR("unable to allocate new metadata block"); + + r = sm_metadata_get_nr_free(sm, &count); + if (r) + DMERR("couldn't get free block count"); + + check_threshold(&smm->threshold, count); + return r; } @@ -357,6 +412,18 @@ static int sm_metadata_commit(struct dm_space_map *sm) return 0; } +static int sm_metadata_register_threshold_callback(struct dm_space_map *sm, + dm_block_t threshold, + dm_sm_threshold_fn fn, + void *context) +{ + struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); + + set_threshold(&smm->threshold, threshold, fn, context); + + return 0; +} + static int sm_metadata_root_size(struct dm_space_map *sm, size_t *result) { *result = sizeof(struct disk_sm_root); @@ -382,6 +449,8 @@ static int sm_metadata_copy_root(struct dm_space_map *sm, void *where_le, size_t return 0; } +static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks); + static struct dm_space_map ops = { .destroy = sm_metadata_destroy, .extend = sm_metadata_extend, @@ -395,7 +464,8 @@ static struct dm_space_map ops = { .new_block = sm_metadata_new_block, .commit = sm_metadata_commit, .root_size = sm_metadata_root_size, - .copy_root = sm_metadata_copy_root + .copy_root = sm_metadata_copy_root, + .register_threshold_callback = sm_metadata_register_threshold_callback }; /*----------------------------------------------------------------*/ @@ -410,7 +480,7 @@ static void sm_bootstrap_destroy(struct dm_space_map *sm) static int sm_bootstrap_extend(struct dm_space_map *sm, dm_block_t extra_blocks) { - DMERR("boostrap doesn't support extend"); + DMERR("bootstrap doesn't support extend"); return -EINVAL; } @@ -450,7 +520,7 @@ static int sm_bootstrap_count_is_more_than_one(struct dm_space_map *sm, static int sm_bootstrap_set_count(struct dm_space_map *sm, dm_block_t b, uint32_t count) { - DMERR("boostrap doesn't support set_count"); + DMERR("bootstrap doesn't support set_count"); return -EINVAL; } @@ -491,7 +561,7 @@ static int sm_bootstrap_commit(struct dm_space_map *sm) static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result) { - DMERR("boostrap doesn't support root_size"); + DMERR("bootstrap doesn't support root_size"); return -EINVAL; } @@ -499,7 +569,7 @@ static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result) static int sm_bootstrap_copy_root(struct dm_space_map *sm, void *where, size_t max) { - DMERR("boostrap doesn't support copy_root"); + DMERR("bootstrap doesn't support copy_root"); return -EINVAL; } @@ -517,11 +587,42 @@ static struct dm_space_map bootstrap_ops = { .new_block = sm_bootstrap_new_block, .commit = sm_bootstrap_commit, .root_size = sm_bootstrap_root_size, - .copy_root = sm_bootstrap_copy_root + .copy_root = sm_bootstrap_copy_root, + .register_threshold_callback = NULL }; /*----------------------------------------------------------------*/ +static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks) +{ + int r, i; + enum allocation_event ev; + struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); + dm_block_t old_len = smm->ll.nr_blocks; + + /* + * Flick into a mode where all blocks get allocated in the new area. + */ + smm->begin = old_len; + memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); + + /* + * Extend. + */ + r = sm_ll_extend(&smm->ll, extra_blocks); + + /* + * Switch back to normal behaviour. + */ + memcpy(&smm->sm, &ops, sizeof(smm->sm)); + for (i = old_len; !r && i < smm->begin; i++) + r = sm_ll_inc(&smm->ll, i, &ev); + + return r; +} + +/*----------------------------------------------------------------*/ + struct dm_space_map *dm_sm_metadata_init(void) { struct sm_metadata *smm; @@ -549,6 +650,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm, smm->recursion_count = 0; smm->allocated_this_transaction = 0; smm->nr_uncommitted = 0; + threshold_init(&smm->threshold); memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); @@ -590,6 +692,7 @@ int dm_sm_metadata_open(struct dm_space_map *sm, smm->recursion_count = 0; smm->allocated_this_transaction = 0; smm->nr_uncommitted = 0; + threshold_init(&smm->threshold); memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll)); return 0; diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h index 1cbfc6b1638a..3e6d1153b7c4 100644 --- a/drivers/md/persistent-data/dm-space-map.h +++ b/drivers/md/persistent-data/dm-space-map.h @@ -9,6 +9,8 @@ #include "dm-block-manager.h" +typedef void (*dm_sm_threshold_fn)(void *context); + /* * struct dm_space_map keeps a record of how many times each block in a device * is referenced. It needs to be fixed on disk as part of the transaction. @@ -59,6 +61,15 @@ struct dm_space_map { */ int (*root_size)(struct dm_space_map *sm, size_t *result); int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len); + + /* + * You can register one threshold callback which is edge-triggered + * when the free space in the space map drops below the threshold. + */ + int (*register_threshold_callback)(struct dm_space_map *sm, + dm_block_t threshold, + dm_sm_threshold_fn fn, + void *context); }; /*----------------------------------------------------------------*/ @@ -131,4 +142,16 @@ static inline int dm_sm_copy_root(struct dm_space_map *sm, void *copy_to_here_le return sm->copy_root(sm, copy_to_here_le, len); } +static inline int dm_sm_register_threshold_callback(struct dm_space_map *sm, + dm_block_t threshold, + dm_sm_threshold_fn fn, + void *context) +{ + if (sm->register_threshold_callback) + return sm->register_threshold_callback(sm, threshold, fn, context); + + return -EINVAL; +} + + #endif /* _LINUX_DM_SPACE_MAP_H */ diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index f8f0156dff4e..200020eb3005 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -280,5 +280,6 @@ source "drivers/net/wireless/rtlwifi/Kconfig" source "drivers/net/wireless/ti/Kconfig" source "drivers/net/wireless/zd1211rw/Kconfig" source "drivers/net/wireless/mwifiex/Kconfig" +source "drivers/net/wireless/cw1200/Kconfig" endif # WLAN diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 67156efe14c4..0fab227025be 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -57,3 +57,5 @@ obj-$(CONFIG_MWIFIEX) += mwifiex/ obj-$(CONFIG_BRCMFMAC) += brcm80211/ obj-$(CONFIG_BRCMSMAC) += brcm80211/ + +obj-$(CONFIG_CW1200) += cw1200/ diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig index 2c02b4e84094..1abf1d421173 100644 --- a/drivers/net/wireless/ath/Kconfig +++ b/drivers/net/wireless/ath/Kconfig @@ -31,5 +31,6 @@ source "drivers/net/wireless/ath/carl9170/Kconfig" source "drivers/net/wireless/ath/ath6kl/Kconfig" source "drivers/net/wireless/ath/ar5523/Kconfig" source "drivers/net/wireless/ath/wil6210/Kconfig" +source "drivers/net/wireless/ath/ath10k/Kconfig" endif diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile index 97b964ded2be..fb05cfd19361 100644 --- a/drivers/net/wireless/ath/Makefile +++ b/drivers/net/wireless/ath/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_CARL9170) += carl9170/ obj-$(CONFIG_ATH6KL) += ath6kl/ obj-$(CONFIG_AR5523) += ar5523/ obj-$(CONFIG_WIL6210) += wil6210/ +obj-$(CONFIG_ATH10K) += ath10k/ obj-$(CONFIG_ATH_COMMON) += ath.o diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig new file mode 100644 index 000000000000..cde58fe96254 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/Kconfig @@ -0,0 +1,39 @@ +config ATH10K + tristate "Atheros 802.11ac wireless cards support" + depends on MAC80211 + select ATH_COMMON + ---help--- + This module adds support for wireless adapters based on + Atheros IEEE 802.11ac family of chipsets. + + If you choose to build a module, it'll be called ath10k. + +config ATH10K_PCI + tristate "Atheros ath10k PCI support" + depends on ATH10K && PCI + ---help--- + This module adds support for PCIE bus + +config ATH10K_DEBUG + bool "Atheros ath10k debugging" + depends on ATH10K + ---help--- + Enables debug support + + If unsure, say Y to make it easier to debug problems. + +config ATH10K_DEBUGFS + bool "Atheros ath10k debugfs support" + depends on ATH10K + ---help--- + Enabled debugfs support + + If unsure, say Y to make it easier to debug problems. + +config ATH10K_TRACING + bool "Atheros ath10k tracing support" + depends on ATH10K + depends on EVENT_TRACING + ---help--- + Select this to ath10k use tracing infrastructure. + diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile new file mode 100644 index 000000000000..a4179f49ee1f --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/Makefile @@ -0,0 +1,20 @@ +obj-$(CONFIG_ATH10K) += ath10k_core.o +ath10k_core-y += mac.o \ + debug.o \ + core.o \ + htc.o \ + htt.o \ + htt_rx.o \ + htt_tx.o \ + txrx.o \ + wmi.o \ + bmi.o + +ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o + +obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o +ath10k_pci-y += pci.o \ + ce.o + +# for tracing framework to find trace.h +CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c new file mode 100644 index 000000000000..1a2ef51b69d9 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/bmi.c @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "bmi.h" +#include "hif.h" +#include "debug.h" +#include "htc.h" + +int ath10k_bmi_done(struct ath10k *ar) +{ + struct bmi_cmd cmd; + u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done); + int ret; + + if (ar->bmi.done_sent) { + ath10k_dbg(ATH10K_DBG_CORE, "%s skipped\n", __func__); + return 0; + } + + ar->bmi.done_sent = true; + cmd.id = __cpu_to_le32(BMI_DONE); + + ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL); + if (ret) { + ath10k_warn("unable to write to the device: %d\n", ret); + return ret; + } + + ath10k_dbg(ATH10K_DBG_CORE, "BMI done\n"); + return 0; +} + +int ath10k_bmi_get_target_info(struct ath10k *ar, + struct bmi_target_info *target_info) +{ + struct bmi_cmd cmd; + union bmi_resp resp; + u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info); + u32 resplen = sizeof(resp.get_target_info); + int ret; + + if (ar->bmi.done_sent) { + ath10k_warn("BMI Get Target Info Command disallowed\n"); + return -EBUSY; + } + + cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO); + + ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen); + if (ret) { + ath10k_warn("unable to get target info from device\n"); + return ret; + } + + if (resplen < sizeof(resp.get_target_info)) { + ath10k_warn("invalid get_target_info response length (%d)\n", + resplen); + return -EIO; + } + + target_info->version = __le32_to_cpu(resp.get_target_info.version); + target_info->type = __le32_to_cpu(resp.get_target_info.type); + return 0; +} + +int ath10k_bmi_read_memory(struct ath10k *ar, + u32 address, void *buffer, u32 length) +{ + struct bmi_cmd cmd; + union bmi_resp resp; + u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem); + u32 rxlen; + int ret; + + if (ar->bmi.done_sent) { + ath10k_warn("command disallowed\n"); + return -EBUSY; + } + + ath10k_dbg(ATH10K_DBG_CORE, + "%s: (device: 0x%p, address: 0x%x, length: %d)\n", + __func__, ar, address, length); + + while (length) { + rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE); + + cmd.id = __cpu_to_le32(BMI_READ_MEMORY); + cmd.read_mem.addr = __cpu_to_le32(address); + cmd.read_mem.len = __cpu_to_le32(rxlen); + + ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, + &resp, &rxlen); + if (ret) { + ath10k_warn("unable to read from the device\n"); + return ret; + } + + memcpy(buffer, resp.read_mem.payload, rxlen); + address += rxlen; + buffer += rxlen; + length -= rxlen; + } + + return 0; +} + +int ath10k_bmi_write_memory(struct ath10k *ar, + u32 address, const void *buffer, u32 length) +{ + struct bmi_cmd cmd; + u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem); + u32 txlen; + int ret; + + if (ar->bmi.done_sent) { + ath10k_warn("command disallowed\n"); + return -EBUSY; + } + + ath10k_dbg(ATH10K_DBG_CORE, + "%s: (device: 0x%p, address: 0x%x, length: %d)\n", + __func__, ar, address, length); + + while (length) { + txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen); + + /* copy before roundup to avoid reading beyond buffer*/ + memcpy(cmd.write_mem.payload, buffer, txlen); + txlen = roundup(txlen, 4); + + cmd.id = __cpu_to_le32(BMI_WRITE_MEMORY); + cmd.write_mem.addr = __cpu_to_le32(address); + cmd.write_mem.len = __cpu_to_le32(txlen); + + ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen, + NULL, NULL); + if (ret) { + ath10k_warn("unable to write to the device\n"); + return ret; + } + + /* fixup roundup() so `length` zeroes out for last chunk */ + txlen = min(txlen, length); + + address += txlen; + buffer += txlen; + length -= txlen; + } + + return 0; +} + +int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param) +{ + struct bmi_cmd cmd; + union bmi_resp resp; + u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute); + u32 resplen = sizeof(resp.execute); + int ret; + + if (ar->bmi.done_sent) { + ath10k_warn("command disallowed\n"); + return -EBUSY; + } + + ath10k_dbg(ATH10K_DBG_CORE, + "%s: (device: 0x%p, address: 0x%x, param: %d)\n", + __func__, ar, address, *param); + + cmd.id = __cpu_to_le32(BMI_EXECUTE); + cmd.execute.addr = __cpu_to_le32(address); + cmd.execute.param = __cpu_to_le32(*param); + + ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen); + if (ret) { + ath10k_warn("unable to read from the device\n"); + return ret; + } + + if (resplen < sizeof(resp.execute)) { + ath10k_warn("invalid execute response length (%d)\n", + resplen); + return ret; + } + + *param = __le32_to_cpu(resp.execute.result); + return 0; +} + +int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length) +{ + struct bmi_cmd cmd; + u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data); + u32 txlen; + int ret; + + if (ar->bmi.done_sent) { + ath10k_warn("command disallowed\n"); + return -EBUSY; + } + + while (length) { + txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen); + + WARN_ON_ONCE(txlen & 3); + + cmd.id = __cpu_to_le32(BMI_LZ_DATA); + cmd.lz_data.len = __cpu_to_le32(txlen); + memcpy(cmd.lz_data.payload, buffer, txlen); + + ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen, + NULL, NULL); + if (ret) { + ath10k_warn("unable to write to the device\n"); + return ret; + } + + buffer += txlen; + length -= txlen; + } + + return 0; +} + +int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address) +{ + struct bmi_cmd cmd; + u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start); + int ret; + + if (ar->bmi.done_sent) { + ath10k_warn("command disallowed\n"); + return -EBUSY; + } + + cmd.id = __cpu_to_le32(BMI_LZ_STREAM_START); + cmd.lz_start.addr = __cpu_to_le32(address); + + ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL); + if (ret) { + ath10k_warn("unable to Start LZ Stream to the device\n"); + return ret; + } + + return 0; +} + +int ath10k_bmi_fast_download(struct ath10k *ar, + u32 address, const void *buffer, u32 length) +{ + u8 trailer[4] = {}; + u32 head_len = rounddown(length, 4); + u32 trailer_len = length - head_len; + int ret; + + ret = ath10k_bmi_lz_stream_start(ar, address); + if (ret) + return ret; + + /* copy the last word into a zero padded buffer */ + if (trailer_len > 0) + memcpy(trailer, buffer + head_len, trailer_len); + + ret = ath10k_bmi_lz_data(ar, buffer, head_len); + if (ret) + return ret; + + if (trailer_len > 0) + ret = ath10k_bmi_lz_data(ar, trailer, 4); + + if (ret != 0) + return ret; + + /* + * Close compressed stream and open a new (fake) one. + * This serves mainly to flush Target caches. + */ + ret = ath10k_bmi_lz_stream_start(ar, 0x00); + + return ret; +} diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h new file mode 100644 index 000000000000..32c56aa33a5e --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/bmi.h @@ -0,0 +1,224 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _BMI_H_ +#define _BMI_H_ + +#include "core.h" + +/* + * Bootloader Messaging Interface (BMI) + * + * BMI is a very simple messaging interface used during initialization + * to read memory, write memory, execute code, and to define an + * application entry PC. + * + * It is used to download an application to QCA988x, to provide + * patches to code that is already resident on QCA988x, and generally + * to examine and modify state. The Host has an opportunity to use + * BMI only once during bootup. Once the Host issues a BMI_DONE + * command, this opportunity ends. + * + * The Host writes BMI requests to mailbox0, and reads BMI responses + * from mailbox0. BMI requests all begin with a command + * (see below for specific commands), and are followed by + * command-specific data. + * + * Flow control: + * The Host can only issue a command once the Target gives it a + * "BMI Command Credit", using AR8K Counter #4. As soon as the + * Target has completed a command, it issues another BMI Command + * Credit (so the Host can issue the next command). + * + * BMI handles all required Target-side cache flushing. + */ + +/* Maximum data size used for BMI transfers */ +#define BMI_MAX_DATA_SIZE 256 + +/* len = cmd + addr + length */ +#define BMI_MAX_CMDBUF_SIZE (BMI_MAX_DATA_SIZE + \ + sizeof(u32) + \ + sizeof(u32) + \ + sizeof(u32)) + +/* BMI Commands */ + +enum bmi_cmd_id { + BMI_NO_COMMAND = 0, + BMI_DONE = 1, + BMI_READ_MEMORY = 2, + BMI_WRITE_MEMORY = 3, + BMI_EXECUTE = 4, + BMI_SET_APP_START = 5, + BMI_READ_SOC_REGISTER = 6, + BMI_READ_SOC_WORD = 6, + BMI_WRITE_SOC_REGISTER = 7, + BMI_WRITE_SOC_WORD = 7, + BMI_GET_TARGET_ID = 8, + BMI_GET_TARGET_INFO = 8, + BMI_ROMPATCH_INSTALL = 9, + BMI_ROMPATCH_UNINSTALL = 10, + BMI_ROMPATCH_ACTIVATE = 11, + BMI_ROMPATCH_DEACTIVATE = 12, + BMI_LZ_STREAM_START = 13, /* should be followed by LZ_DATA */ + BMI_LZ_DATA = 14, + BMI_NVRAM_PROCESS = 15, +}; + +#define BMI_NVRAM_SEG_NAME_SZ 16 + +struct bmi_cmd { + __le32 id; /* enum bmi_cmd_id */ + union { + struct { + } done; + struct { + __le32 addr; + __le32 len; + } read_mem; + struct { + __le32 addr; + __le32 len; + u8 payload[0]; + } write_mem; + struct { + __le32 addr; + __le32 param; + } execute; + struct { + __le32 addr; + } set_app_start; + struct { + __le32 addr; + } read_soc_reg; + struct { + __le32 addr; + __le32 value; + } write_soc_reg; + struct { + } get_target_info; + struct { + __le32 rom_addr; + __le32 ram_addr; /* or value */ + __le32 size; + __le32 activate; /* 0=install, but dont activate */ + } rompatch_install; + struct { + __le32 patch_id; + } rompatch_uninstall; + struct { + __le32 count; + __le32 patch_ids[0]; /* length of @count */ + } rompatch_activate; + struct { + __le32 count; + __le32 patch_ids[0]; /* length of @count */ + } rompatch_deactivate; + struct { + __le32 addr; + } lz_start; + struct { + __le32 len; /* max BMI_MAX_DATA_SIZE */ + u8 payload[0]; /* length of @len */ + } lz_data; + struct { + u8 name[BMI_NVRAM_SEG_NAME_SZ]; + } nvram_process; + u8 payload[BMI_MAX_CMDBUF_SIZE]; + }; +} __packed; + +union bmi_resp { + struct { + u8 payload[0]; + } read_mem; + struct { + __le32 result; + } execute; + struct { + __le32 value; + } read_soc_reg; + struct { + __le32 len; + __le32 version; + __le32 type; + } get_target_info; + struct { + __le32 patch_id; + } rompatch_install; + struct { + __le32 patch_id; + } rompatch_uninstall; + struct { + /* 0 = nothing executed + * otherwise = NVRAM segment return value */ + __le32 result; + } nvram_process; + u8 payload[BMI_MAX_CMDBUF_SIZE]; +} __packed; + +struct bmi_target_info { + u32 version; + u32 type; +}; + + +/* in msec */ +#define BMI_COMMUNICATION_TIMEOUT_HZ (1*HZ) + +#define BMI_CE_NUM_TO_TARG 0 +#define BMI_CE_NUM_TO_HOST 1 + +int ath10k_bmi_done(struct ath10k *ar); +int ath10k_bmi_get_target_info(struct ath10k *ar, + struct bmi_target_info *target_info); +int ath10k_bmi_read_memory(struct ath10k *ar, u32 address, + void *buffer, u32 length); +int ath10k_bmi_write_memory(struct ath10k *ar, u32 address, + const void *buffer, u32 length); + +#define ath10k_bmi_read32(ar, item, val) \ + ({ \ + int ret; \ + u32 addr; \ + __le32 tmp; \ + \ + addr = host_interest_item_address(HI_ITEM(item)); \ + ret = ath10k_bmi_read_memory(ar, addr, (u8 *)&tmp, 4); \ + *val = __le32_to_cpu(tmp); \ + ret; \ + }) + +#define ath10k_bmi_write32(ar, item, val) \ + ({ \ + int ret; \ + u32 address; \ + __le32 v = __cpu_to_le32(val); \ + \ + address = host_interest_item_address(HI_ITEM(item)); \ + ret = ath10k_bmi_write_memory(ar, address, \ + (u8 *)&v, sizeof(v)); \ + ret; \ + }) + +int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param); +int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address); +int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length); +int ath10k_bmi_fast_download(struct ath10k *ar, u32 address, + const void *buffer, u32 length); +#endif /* _BMI_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c new file mode 100644 index 000000000000..61a8ac70d3ca --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -0,0 +1,1189 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hif.h" +#include "pci.h" +#include "ce.h" +#include "debug.h" + +/* + * Support for Copy Engine hardware, which is mainly used for + * communication between Host and Target over a PCIe interconnect. + */ + +/* + * A single CopyEngine (CE) comprises two "rings": + * a source ring + * a destination ring + * + * Each ring consists of a number of descriptors which specify + * an address, length, and meta-data. + * + * Typically, one side of the PCIe interconnect (Host or Target) + * controls one ring and the other side controls the other ring. + * The source side chooses when to initiate a transfer and it + * chooses what to send (buffer address, length). The destination + * side keeps a supply of "anonymous receive buffers" available and + * it handles incoming data as it arrives (when the destination + * recieves an interrupt). + * + * The sender may send a simple buffer (address/length) or it may + * send a small list of buffers. When a small list is sent, hardware + * "gathers" these and they end up in a single destination buffer + * with a single interrupt. + * + * There are several "contexts" managed by this layer -- more, it + * may seem -- than should be needed. These are provided mainly for + * maximum flexibility and especially to facilitate a simpler HIF + * implementation. There are per-CopyEngine recv, send, and watermark + * contexts. These are supplied by the caller when a recv, send, + * or watermark handler is established and they are echoed back to + * the caller when the respective callbacks are invoked. There is + * also a per-transfer context supplied by the caller when a buffer + * (or sendlist) is sent and when a buffer is enqueued for recv. + * These per-transfer contexts are echoed back to the caller when + * the buffer is sent/received. + */ + +static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar, + u32 ce_ctrl_addr, + unsigned int n) +{ + ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n); +} + +static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar, + u32 ce_ctrl_addr) +{ + return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS); +} + +static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar, + u32 ce_ctrl_addr, + unsigned int n) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + void __iomem *indicator_addr; + + if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) { + ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n); + return; + } + + /* workaround for QCA988x_1.0 HW CE */ + indicator_addr = ar_pci->mem + ce_ctrl_addr + DST_WATERMARK_ADDRESS; + + if (ce_ctrl_addr == ath10k_ce_base_address(CDC_WAR_DATA_CE)) { + iowrite32((CDC_WAR_MAGIC_STR | n), indicator_addr); + } else { + unsigned long irq_flags; + local_irq_save(irq_flags); + iowrite32(1, indicator_addr); + + /* + * PCIE write waits for ACK in IPQ8K, there is no + * need to read back value. + */ + (void)ioread32(indicator_addr); + (void)ioread32(indicator_addr); /* conservative */ + + ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n); + + iowrite32(0, indicator_addr); + local_irq_restore(irq_flags); + } +} + +static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar, + u32 ce_ctrl_addr) +{ + return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS); +} + +static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar, + u32 ce_ctrl_addr) +{ + return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS); +} + +static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar, + u32 ce_ctrl_addr, + unsigned int addr) +{ + ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr); +} + +static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar, + u32 ce_ctrl_addr, + unsigned int n) +{ + ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n); +} + +static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar, + u32 ce_ctrl_addr, + unsigned int n) +{ + u32 ctrl1_addr = ath10k_pci_read32((ar), + (ce_ctrl_addr) + CE_CTRL1_ADDRESS); + + ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, + (ctrl1_addr & ~CE_CTRL1_DMAX_LENGTH_MASK) | + CE_CTRL1_DMAX_LENGTH_SET(n)); +} + +static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar, + u32 ce_ctrl_addr, + unsigned int n) +{ + u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS); + + ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, + (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) | + CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n)); +} + +static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar, + u32 ce_ctrl_addr, + unsigned int n) +{ + u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS); + + ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, + (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) | + CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n)); +} + +static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar, + u32 ce_ctrl_addr) +{ + return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS); +} + +static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar, + u32 ce_ctrl_addr, + u32 addr) +{ + ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr); +} + +static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar, + u32 ce_ctrl_addr, + unsigned int n) +{ + ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n); +} + +static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar, + u32 ce_ctrl_addr, + unsigned int n) +{ + u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS); + + ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS, + (addr & ~SRC_WATERMARK_HIGH_MASK) | + SRC_WATERMARK_HIGH_SET(n)); +} + +static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar, + u32 ce_ctrl_addr, + unsigned int n) +{ + u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS); + + ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS, + (addr & ~SRC_WATERMARK_LOW_MASK) | + SRC_WATERMARK_LOW_SET(n)); +} + +static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar, + u32 ce_ctrl_addr, + unsigned int n) +{ + u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS); + + ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS, + (addr & ~DST_WATERMARK_HIGH_MASK) | + DST_WATERMARK_HIGH_SET(n)); +} + +static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar, + u32 ce_ctrl_addr, + unsigned int n) +{ + u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS); + + ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS, + (addr & ~DST_WATERMARK_LOW_MASK) | + DST_WATERMARK_LOW_SET(n)); +} + +static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar, + u32 ce_ctrl_addr) +{ + u32 host_ie_addr = ath10k_pci_read32(ar, + ce_ctrl_addr + HOST_IE_ADDRESS); + + ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, + host_ie_addr | HOST_IE_COPY_COMPLETE_MASK); +} + +static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar, + u32 ce_ctrl_addr) +{ + u32 host_ie_addr = ath10k_pci_read32(ar, + ce_ctrl_addr + HOST_IE_ADDRESS); + + ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, + host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK); +} + +static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar, + u32 ce_ctrl_addr) +{ + u32 host_ie_addr = ath10k_pci_read32(ar, + ce_ctrl_addr + HOST_IE_ADDRESS); + + ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, + host_ie_addr & ~CE_WATERMARK_MASK); +} + +static inline void ath10k_ce_error_intr_enable(struct ath10k *ar, + u32 ce_ctrl_addr) +{ + u32 misc_ie_addr = ath10k_pci_read32(ar, + ce_ctrl_addr + MISC_IE_ADDRESS); + + ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS, + misc_ie_addr | CE_ERROR_MASK); +} + +static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar, + u32 ce_ctrl_addr, + unsigned int mask) +{ + ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask); +} + + +/* + * Guts of ath10k_ce_send, used by both ath10k_ce_send and + * ath10k_ce_sendlist_send. + * The caller takes responsibility for any needed locking. + */ +static int ath10k_ce_send_nolock(struct ce_state *ce_state, + void *per_transfer_context, + u32 buffer, + unsigned int nbytes, + unsigned int transfer_id, + unsigned int flags) +{ + struct ath10k *ar = ce_state->ar; + struct ce_ring_state *src_ring = ce_state->src_ring; + struct ce_desc *desc, *sdesc; + unsigned int nentries_mask = src_ring->nentries_mask; + unsigned int sw_index = src_ring->sw_index; + unsigned int write_index = src_ring->write_index; + u32 ctrl_addr = ce_state->ctrl_addr; + u32 desc_flags = 0; + int ret = 0; + + if (nbytes > ce_state->src_sz_max) + ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n", + __func__, nbytes, ce_state->src_sz_max); + + ath10k_pci_wake(ar); + + if (unlikely(CE_RING_DELTA(nentries_mask, + write_index, sw_index - 1) <= 0)) { + ret = -EIO; + goto exit; + } + + desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space, + write_index); + sdesc = CE_SRC_RING_TO_DESC(src_ring->shadow_base, write_index); + + desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA); + + if (flags & CE_SEND_FLAG_GATHER) + desc_flags |= CE_DESC_FLAGS_GATHER; + if (flags & CE_SEND_FLAG_BYTE_SWAP) + desc_flags |= CE_DESC_FLAGS_BYTE_SWAP; + + sdesc->addr = __cpu_to_le32(buffer); + sdesc->nbytes = __cpu_to_le16(nbytes); + sdesc->flags = __cpu_to_le16(desc_flags); + + *desc = *sdesc; + + src_ring->per_transfer_context[write_index] = per_transfer_context; + + /* Update Source Ring Write Index */ + write_index = CE_RING_IDX_INCR(nentries_mask, write_index); + + /* WORKAROUND */ + if (!(flags & CE_SEND_FLAG_GATHER)) + ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index); + + src_ring->write_index = write_index; +exit: + ath10k_pci_sleep(ar); + return ret; +} + +int ath10k_ce_send(struct ce_state *ce_state, + void *per_transfer_context, + u32 buffer, + unsigned int nbytes, + unsigned int transfer_id, + unsigned int flags) +{ + struct ath10k *ar = ce_state->ar; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int ret; + + spin_lock_bh(&ar_pci->ce_lock); + ret = ath10k_ce_send_nolock(ce_state, per_transfer_context, + buffer, nbytes, transfer_id, flags); + spin_unlock_bh(&ar_pci->ce_lock); + + return ret; +} + +void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, u32 buffer, + unsigned int nbytes, u32 flags) +{ + unsigned int num_items = sendlist->num_items; + struct ce_sendlist_item *item; + + item = &sendlist->item[num_items]; + item->data = buffer; + item->u.nbytes = nbytes; + item->flags = flags; + sendlist->num_items++; +} + +int ath10k_ce_sendlist_send(struct ce_state *ce_state, + void *per_transfer_context, + struct ce_sendlist *sendlist, + unsigned int transfer_id) +{ + struct ce_ring_state *src_ring = ce_state->src_ring; + struct ce_sendlist_item *item; + struct ath10k *ar = ce_state->ar; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + unsigned int nentries_mask = src_ring->nentries_mask; + unsigned int num_items = sendlist->num_items; + unsigned int sw_index; + unsigned int write_index; + int i, delta, ret = -ENOMEM; + + spin_lock_bh(&ar_pci->ce_lock); + + sw_index = src_ring->sw_index; + write_index = src_ring->write_index; + + delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); + + if (delta >= num_items) { + /* + * Handle all but the last item uniformly. + */ + for (i = 0; i < num_items - 1; i++) { + item = &sendlist->item[i]; + ret = ath10k_ce_send_nolock(ce_state, + CE_SENDLIST_ITEM_CTXT, + (u32) item->data, + item->u.nbytes, transfer_id, + item->flags | + CE_SEND_FLAG_GATHER); + if (ret) + ath10k_warn("CE send failed for item: %d\n", i); + } + /* + * Provide valid context pointer for final item. + */ + item = &sendlist->item[i]; + ret = ath10k_ce_send_nolock(ce_state, per_transfer_context, + (u32) item->data, item->u.nbytes, + transfer_id, item->flags); + if (ret) + ath10k_warn("CE send failed for last item: %d\n", i); + } + + spin_unlock_bh(&ar_pci->ce_lock); + + return ret; +} + +int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state, + void *per_recv_context, + u32 buffer) +{ + struct ce_ring_state *dest_ring = ce_state->dest_ring; + u32 ctrl_addr = ce_state->ctrl_addr; + struct ath10k *ar = ce_state->ar; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + unsigned int nentries_mask = dest_ring->nentries_mask; + unsigned int write_index; + unsigned int sw_index; + int ret; + + spin_lock_bh(&ar_pci->ce_lock); + write_index = dest_ring->write_index; + sw_index = dest_ring->sw_index; + + ath10k_pci_wake(ar); + + if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) { + struct ce_desc *base = dest_ring->base_addr_owner_space; + struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index); + + /* Update destination descriptor */ + desc->addr = __cpu_to_le32(buffer); + desc->nbytes = 0; + + dest_ring->per_transfer_context[write_index] = + per_recv_context; + + /* Update Destination Ring Write Index */ + write_index = CE_RING_IDX_INCR(nentries_mask, write_index); + ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index); + dest_ring->write_index = write_index; + ret = 0; + } else { + ret = -EIO; + } + ath10k_pci_sleep(ar); + spin_unlock_bh(&ar_pci->ce_lock); + + return ret; +} + +/* + * Guts of ath10k_ce_completed_recv_next. + * The caller takes responsibility for any necessary locking. + */ +static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state, + void **per_transfer_contextp, + u32 *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + unsigned int *flagsp) +{ + struct ce_ring_state *dest_ring = ce_state->dest_ring; + unsigned int nentries_mask = dest_ring->nentries_mask; + unsigned int sw_index = dest_ring->sw_index; + + struct ce_desc *base = dest_ring->base_addr_owner_space; + struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index); + struct ce_desc sdesc; + u16 nbytes; + + /* Copy in one go for performance reasons */ + sdesc = *desc; + + nbytes = __le16_to_cpu(sdesc.nbytes); + if (nbytes == 0) { + /* + * This closes a relatively unusual race where the Host + * sees the updated DRRI before the update to the + * corresponding descriptor has completed. We treat this + * as a descriptor that is not yet done. + */ + return -EIO; + } + + desc->nbytes = 0; + + /* Return data from completed destination descriptor */ + *bufferp = __le32_to_cpu(sdesc.addr); + *nbytesp = nbytes; + *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA); + + if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP) + *flagsp = CE_RECV_FLAG_SWAPPED; + else + *flagsp = 0; + + if (per_transfer_contextp) + *per_transfer_contextp = + dest_ring->per_transfer_context[sw_index]; + + /* sanity */ + dest_ring->per_transfer_context[sw_index] = NULL; + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + dest_ring->sw_index = sw_index; + + return 0; +} + +int ath10k_ce_completed_recv_next(struct ce_state *ce_state, + void **per_transfer_contextp, + u32 *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + unsigned int *flagsp) +{ + struct ath10k *ar = ce_state->ar; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int ret; + + spin_lock_bh(&ar_pci->ce_lock); + ret = ath10k_ce_completed_recv_next_nolock(ce_state, + per_transfer_contextp, + bufferp, nbytesp, + transfer_idp, flagsp); + spin_unlock_bh(&ar_pci->ce_lock); + + return ret; +} + +int ath10k_ce_revoke_recv_next(struct ce_state *ce_state, + void **per_transfer_contextp, + u32 *bufferp) +{ + struct ce_ring_state *dest_ring; + unsigned int nentries_mask; + unsigned int sw_index; + unsigned int write_index; + int ret; + struct ath10k *ar; + struct ath10k_pci *ar_pci; + + dest_ring = ce_state->dest_ring; + + if (!dest_ring) + return -EIO; + + ar = ce_state->ar; + ar_pci = ath10k_pci_priv(ar); + + spin_lock_bh(&ar_pci->ce_lock); + + nentries_mask = dest_ring->nentries_mask; + sw_index = dest_ring->sw_index; + write_index = dest_ring->write_index; + if (write_index != sw_index) { + struct ce_desc *base = dest_ring->base_addr_owner_space; + struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index); + + /* Return data from completed destination descriptor */ + *bufferp = __le32_to_cpu(desc->addr); + + if (per_transfer_contextp) + *per_transfer_contextp = + dest_ring->per_transfer_context[sw_index]; + + /* sanity */ + dest_ring->per_transfer_context[sw_index] = NULL; + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + dest_ring->sw_index = sw_index; + ret = 0; + } else { + ret = -EIO; + } + + spin_unlock_bh(&ar_pci->ce_lock); + + return ret; +} + +/* + * Guts of ath10k_ce_completed_send_next. + * The caller takes responsibility for any necessary locking. + */ +static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state, + void **per_transfer_contextp, + u32 *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp) +{ + struct ce_ring_state *src_ring = ce_state->src_ring; + u32 ctrl_addr = ce_state->ctrl_addr; + struct ath10k *ar = ce_state->ar; + unsigned int nentries_mask = src_ring->nentries_mask; + unsigned int sw_index = src_ring->sw_index; + unsigned int read_index; + int ret = -EIO; + + if (src_ring->hw_index == sw_index) { + /* + * The SW completion index has caught up with the cached + * version of the HW completion index. + * Update the cached HW completion index to see whether + * the SW has really caught up to the HW, or if the cached + * value of the HW index has become stale. + */ + ath10k_pci_wake(ar); + src_ring->hw_index = + ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); + ath10k_pci_sleep(ar); + } + read_index = src_ring->hw_index; + + if ((read_index != sw_index) && (read_index != 0xffffffff)) { + struct ce_desc *sbase = src_ring->shadow_base; + struct ce_desc *sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index); + + /* Return data from completed source descriptor */ + *bufferp = __le32_to_cpu(sdesc->addr); + *nbytesp = __le16_to_cpu(sdesc->nbytes); + *transfer_idp = MS(__le16_to_cpu(sdesc->flags), + CE_DESC_FLAGS_META_DATA); + + if (per_transfer_contextp) + *per_transfer_contextp = + src_ring->per_transfer_context[sw_index]; + + /* sanity */ + src_ring->per_transfer_context[sw_index] = NULL; + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + src_ring->sw_index = sw_index; + ret = 0; + } + + return ret; +} + +/* NB: Modeled after ath10k_ce_completed_send_next */ +int ath10k_ce_cancel_send_next(struct ce_state *ce_state, + void **per_transfer_contextp, + u32 *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp) +{ + struct ce_ring_state *src_ring; + unsigned int nentries_mask; + unsigned int sw_index; + unsigned int write_index; + int ret; + struct ath10k *ar; + struct ath10k_pci *ar_pci; + + src_ring = ce_state->src_ring; + + if (!src_ring) + return -EIO; + + ar = ce_state->ar; + ar_pci = ath10k_pci_priv(ar); + + spin_lock_bh(&ar_pci->ce_lock); + + nentries_mask = src_ring->nentries_mask; + sw_index = src_ring->sw_index; + write_index = src_ring->write_index; + + if (write_index != sw_index) { + struct ce_desc *base = src_ring->base_addr_owner_space; + struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index); + + /* Return data from completed source descriptor */ + *bufferp = __le32_to_cpu(desc->addr); + *nbytesp = __le16_to_cpu(desc->nbytes); + *transfer_idp = MS(__le16_to_cpu(desc->flags), + CE_DESC_FLAGS_META_DATA); + + if (per_transfer_contextp) + *per_transfer_contextp = + src_ring->per_transfer_context[sw_index]; + + /* sanity */ + src_ring->per_transfer_context[sw_index] = NULL; + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + src_ring->sw_index = sw_index; + ret = 0; + } else { + ret = -EIO; + } + + spin_unlock_bh(&ar_pci->ce_lock); + + return ret; +} + +int ath10k_ce_completed_send_next(struct ce_state *ce_state, + void **per_transfer_contextp, + u32 *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp) +{ + struct ath10k *ar = ce_state->ar; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int ret; + + spin_lock_bh(&ar_pci->ce_lock); + ret = ath10k_ce_completed_send_next_nolock(ce_state, + per_transfer_contextp, + bufferp, nbytesp, + transfer_idp); + spin_unlock_bh(&ar_pci->ce_lock); + + return ret; +} + +/* + * Guts of interrupt handler for per-engine interrupts on a particular CE. + * + * Invokes registered callbacks for recv_complete, + * send_complete, and watermarks. + */ +void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id]; + u32 ctrl_addr = ce_state->ctrl_addr; + void *transfer_context; + u32 buf; + unsigned int nbytes; + unsigned int id; + unsigned int flags; + + ath10k_pci_wake(ar); + spin_lock_bh(&ar_pci->ce_lock); + + /* Clear the copy-complete interrupts that will be handled here. */ + ath10k_ce_engine_int_status_clear(ar, ctrl_addr, + HOST_IS_COPY_COMPLETE_MASK); + + if (ce_state->recv_cb) { + /* + * Pop completed recv buffers and call the registered + * recv callback for each + */ + while (ath10k_ce_completed_recv_next_nolock(ce_state, + &transfer_context, + &buf, &nbytes, + &id, &flags) == 0) { + spin_unlock_bh(&ar_pci->ce_lock); + ce_state->recv_cb(ce_state, transfer_context, buf, + nbytes, id, flags); + spin_lock_bh(&ar_pci->ce_lock); + } + } + + if (ce_state->send_cb) { + /* + * Pop completed send buffers and call the registered + * send callback for each + */ + while (ath10k_ce_completed_send_next_nolock(ce_state, + &transfer_context, + &buf, + &nbytes, + &id) == 0) { + spin_unlock_bh(&ar_pci->ce_lock); + ce_state->send_cb(ce_state, transfer_context, + buf, nbytes, id); + spin_lock_bh(&ar_pci->ce_lock); + } + } + + /* + * Misc CE interrupts are not being handled, but still need + * to be cleared. + */ + ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK); + + spin_unlock_bh(&ar_pci->ce_lock); + ath10k_pci_sleep(ar); +} + +/* + * Handler for per-engine interrupts on ALL active CEs. + * This is used in cases where the system is sharing a + * single interrput for all CEs + */ + +void ath10k_ce_per_engine_service_any(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int ce_id; + u32 intr_summary; + + ath10k_pci_wake(ar); + intr_summary = CE_INTERRUPT_SUMMARY(ar); + + for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) { + if (intr_summary & (1 << ce_id)) + intr_summary &= ~(1 << ce_id); + else + /* no intr pending on this CE */ + continue; + + ath10k_ce_per_engine_service(ar, ce_id); + } + + ath10k_pci_sleep(ar); +} + +/* + * Adjust interrupts for the copy complete handler. + * If it's needed for either send or recv, then unmask + * this interrupt; otherwise, mask it. + * + * Called with ce_lock held. + */ +static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state, + int disable_copy_compl_intr) +{ + u32 ctrl_addr = ce_state->ctrl_addr; + struct ath10k *ar = ce_state->ar; + + ath10k_pci_wake(ar); + + if ((!disable_copy_compl_intr) && + (ce_state->send_cb || ce_state->recv_cb)) + ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr); + else + ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); + + ath10k_ce_watermark_intr_disable(ar, ctrl_addr); + + ath10k_pci_sleep(ar); +} + +void ath10k_ce_disable_interrupts(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int ce_id; + + ath10k_pci_wake(ar); + for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) { + struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id]; + u32 ctrl_addr = ce_state->ctrl_addr; + + ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); + } + ath10k_pci_sleep(ar); +} + +void ath10k_ce_send_cb_register(struct ce_state *ce_state, + void (*send_cb) (struct ce_state *ce_state, + void *transfer_context, + u32 buffer, + unsigned int nbytes, + unsigned int transfer_id), + int disable_interrupts) +{ + struct ath10k *ar = ce_state->ar; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + spin_lock_bh(&ar_pci->ce_lock); + ce_state->send_cb = send_cb; + ath10k_ce_per_engine_handler_adjust(ce_state, disable_interrupts); + spin_unlock_bh(&ar_pci->ce_lock); +} + +void ath10k_ce_recv_cb_register(struct ce_state *ce_state, + void (*recv_cb) (struct ce_state *ce_state, + void *transfer_context, + u32 buffer, + unsigned int nbytes, + unsigned int transfer_id, + unsigned int flags)) +{ + struct ath10k *ar = ce_state->ar; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + spin_lock_bh(&ar_pci->ce_lock); + ce_state->recv_cb = recv_cb; + ath10k_ce_per_engine_handler_adjust(ce_state, 0); + spin_unlock_bh(&ar_pci->ce_lock); +} + +static int ath10k_ce_init_src_ring(struct ath10k *ar, + unsigned int ce_id, + struct ce_state *ce_state, + const struct ce_attr *attr) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ce_ring_state *src_ring; + unsigned int nentries = attr->src_nentries; + unsigned int ce_nbytes; + u32 ctrl_addr = ath10k_ce_base_address(ce_id); + dma_addr_t base_addr; + char *ptr; + + nentries = roundup_pow_of_two(nentries); + + if (ce_state->src_ring) { + WARN_ON(ce_state->src_ring->nentries != nentries); + return 0; + } + + ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *)); + ptr = kzalloc(ce_nbytes, GFP_KERNEL); + if (ptr == NULL) + return -ENOMEM; + + ce_state->src_ring = (struct ce_ring_state *)ptr; + src_ring = ce_state->src_ring; + + ptr += sizeof(struct ce_ring_state); + src_ring->nentries = nentries; + src_ring->nentries_mask = nentries - 1; + + ath10k_pci_wake(ar); + src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); + src_ring->hw_index = src_ring->sw_index; + + src_ring->write_index = + ath10k_ce_src_ring_write_index_get(ar, ctrl_addr); + ath10k_pci_sleep(ar); + + src_ring->per_transfer_context = (void **)ptr; + + /* + * Legacy platforms that do not support cache + * coherent DMA are unsupported + */ + src_ring->base_addr_owner_space_unaligned = + pci_alloc_consistent(ar_pci->pdev, + (nentries * sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + &base_addr); + src_ring->base_addr_ce_space_unaligned = base_addr; + + src_ring->base_addr_owner_space = PTR_ALIGN( + src_ring->base_addr_owner_space_unaligned, + CE_DESC_RING_ALIGN); + src_ring->base_addr_ce_space = ALIGN( + src_ring->base_addr_ce_space_unaligned, + CE_DESC_RING_ALIGN); + + /* + * Also allocate a shadow src ring in regular + * mem to use for faster access. + */ + src_ring->shadow_base_unaligned = + kmalloc((nentries * sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), GFP_KERNEL); + + src_ring->shadow_base = PTR_ALIGN( + src_ring->shadow_base_unaligned, + CE_DESC_RING_ALIGN); + + ath10k_pci_wake(ar); + ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, + src_ring->base_addr_ce_space); + ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries); + ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max); + ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0); + ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0); + ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries); + ath10k_pci_sleep(ar); + + return 0; +} + +static int ath10k_ce_init_dest_ring(struct ath10k *ar, + unsigned int ce_id, + struct ce_state *ce_state, + const struct ce_attr *attr) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ce_ring_state *dest_ring; + unsigned int nentries = attr->dest_nentries; + unsigned int ce_nbytes; + u32 ctrl_addr = ath10k_ce_base_address(ce_id); + dma_addr_t base_addr; + char *ptr; + + nentries = roundup_pow_of_two(nentries); + + if (ce_state->dest_ring) { + WARN_ON(ce_state->dest_ring->nentries != nentries); + return 0; + } + + ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *)); + ptr = kzalloc(ce_nbytes, GFP_KERNEL); + if (ptr == NULL) + return -ENOMEM; + + ce_state->dest_ring = (struct ce_ring_state *)ptr; + dest_ring = ce_state->dest_ring; + + ptr += sizeof(struct ce_ring_state); + dest_ring->nentries = nentries; + dest_ring->nentries_mask = nentries - 1; + + ath10k_pci_wake(ar); + dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr); + dest_ring->write_index = + ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr); + ath10k_pci_sleep(ar); + + dest_ring->per_transfer_context = (void **)ptr; + + /* + * Legacy platforms that do not support cache + * coherent DMA are unsupported + */ + dest_ring->base_addr_owner_space_unaligned = + pci_alloc_consistent(ar_pci->pdev, + (nentries * sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + &base_addr); + dest_ring->base_addr_ce_space_unaligned = base_addr; + + /* + * Correctly initialize memory to 0 to prevent garbage + * data crashing system when download firmware + */ + memset(dest_ring->base_addr_owner_space_unaligned, 0, + nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN); + + dest_ring->base_addr_owner_space = PTR_ALIGN( + dest_ring->base_addr_owner_space_unaligned, + CE_DESC_RING_ALIGN); + dest_ring->base_addr_ce_space = ALIGN( + dest_ring->base_addr_ce_space_unaligned, + CE_DESC_RING_ALIGN); + + ath10k_pci_wake(ar); + ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, + dest_ring->base_addr_ce_space); + ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries); + ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0); + ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0); + ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries); + ath10k_pci_sleep(ar); + + return 0; +} + +static struct ce_state *ath10k_ce_init_state(struct ath10k *ar, + unsigned int ce_id, + const struct ce_attr *attr) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ce_state *ce_state = NULL; + u32 ctrl_addr = ath10k_ce_base_address(ce_id); + + spin_lock_bh(&ar_pci->ce_lock); + + if (!ar_pci->ce_id_to_state[ce_id]) { + ce_state = kzalloc(sizeof(*ce_state), GFP_ATOMIC); + if (ce_state == NULL) { + spin_unlock_bh(&ar_pci->ce_lock); + return NULL; + } + + ar_pci->ce_id_to_state[ce_id] = ce_state; + ce_state->ar = ar; + ce_state->id = ce_id; + ce_state->ctrl_addr = ctrl_addr; + ce_state->state = CE_RUNNING; + /* Save attribute flags */ + ce_state->attr_flags = attr->flags; + ce_state->src_sz_max = attr->src_sz_max; + } + + spin_unlock_bh(&ar_pci->ce_lock); + + return ce_state; +} + +/* + * Initialize a Copy Engine based on caller-supplied attributes. + * This may be called once to initialize both source and destination + * rings or it may be called twice for separate source and destination + * initialization. It may be that only one side or the other is + * initialized by software/firmware. + */ +struct ce_state *ath10k_ce_init(struct ath10k *ar, + unsigned int ce_id, + const struct ce_attr *attr) +{ + struct ce_state *ce_state; + u32 ctrl_addr = ath10k_ce_base_address(ce_id); + + ce_state = ath10k_ce_init_state(ar, ce_id, attr); + if (!ce_state) { + ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id); + return NULL; + } + + if (attr->src_nentries) { + if (ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr)) { + ath10k_err("Failed to initialize CE src ring for ID: %d\n", + ce_id); + ath10k_ce_deinit(ce_state); + return NULL; + } + } + + if (attr->dest_nentries) { + if (ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr)) { + ath10k_err("Failed to initialize CE dest ring for ID: %d\n", + ce_id); + ath10k_ce_deinit(ce_state); + return NULL; + } + } + + /* Enable CE error interrupts */ + ath10k_pci_wake(ar); + ath10k_ce_error_intr_enable(ar, ctrl_addr); + ath10k_pci_sleep(ar); + + return ce_state; +} + +void ath10k_ce_deinit(struct ce_state *ce_state) +{ + unsigned int ce_id = ce_state->id; + struct ath10k *ar = ce_state->ar; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + ce_state->state = CE_UNUSED; + ar_pci->ce_id_to_state[ce_id] = NULL; + + if (ce_state->src_ring) { + kfree(ce_state->src_ring->shadow_base_unaligned); + pci_free_consistent(ar_pci->pdev, + (ce_state->src_ring->nentries * + sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + ce_state->src_ring->base_addr_owner_space, + ce_state->src_ring->base_addr_ce_space); + kfree(ce_state->src_ring); + } + + if (ce_state->dest_ring) { + pci_free_consistent(ar_pci->pdev, + (ce_state->dest_ring->nentries * + sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + ce_state->dest_ring->base_addr_owner_space, + ce_state->dest_ring->base_addr_ce_space); + kfree(ce_state->dest_ring); + } + kfree(ce_state); +} diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h new file mode 100644 index 000000000000..c17f07c026f4 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -0,0 +1,516 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _CE_H_ +#define _CE_H_ + +#include "hif.h" + + +/* Maximum number of Copy Engine's supported */ +#define CE_COUNT_MAX 8 +#define CE_HTT_H2T_MSG_SRC_NENTRIES 2048 + +/* Descriptor rings must be aligned to this boundary */ +#define CE_DESC_RING_ALIGN 8 +#define CE_SENDLIST_ITEMS_MAX 12 +#define CE_SEND_FLAG_GATHER 0x00010000 + +/* + * Copy Engine support: low-level Target-side Copy Engine API. + * This is a hardware access layer used by code that understands + * how to use copy engines. + */ + +struct ce_state; + + +/* Copy Engine operational state */ +enum ce_op_state { + CE_UNUSED, + CE_PAUSED, + CE_RUNNING, +}; + +#define CE_DESC_FLAGS_GATHER (1 << 0) +#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1) +#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC +#define CE_DESC_FLAGS_META_DATA_LSB 3 + +struct ce_desc { + __le32 addr; + __le16 nbytes; + __le16 flags; /* %CE_DESC_FLAGS_ */ +}; + +/* Copy Engine Ring internal state */ +struct ce_ring_state { + /* Number of entries in this ring; must be power of 2 */ + unsigned int nentries; + unsigned int nentries_mask; + + /* + * For dest ring, this is the next index to be processed + * by software after it was/is received into. + * + * For src ring, this is the last descriptor that was sent + * and completion processed by software. + * + * Regardless of src or dest ring, this is an invariant + * (modulo ring size): + * write index >= read index >= sw_index + */ + unsigned int sw_index; + /* cached copy */ + unsigned int write_index; + /* + * For src ring, this is the next index not yet processed by HW. + * This is a cached copy of the real HW index (read index), used + * for avoiding reading the HW index register more often than + * necessary. + * This extends the invariant: + * write index >= read index >= hw_index >= sw_index + * + * For dest ring, this is currently unused. + */ + /* cached copy */ + unsigned int hw_index; + + /* Start of DMA-coherent area reserved for descriptors */ + /* Host address space */ + void *base_addr_owner_space_unaligned; + /* CE address space */ + u32 base_addr_ce_space_unaligned; + + /* + * Actual start of descriptors. + * Aligned to descriptor-size boundary. + * Points into reserved DMA-coherent area, above. + */ + /* Host address space */ + void *base_addr_owner_space; + + /* CE address space */ + u32 base_addr_ce_space; + /* + * Start of shadow copy of descriptors, within regular memory. + * Aligned to descriptor-size boundary. + */ + void *shadow_base_unaligned; + struct ce_desc *shadow_base; + + void **per_transfer_context; +}; + +/* Copy Engine internal state */ +struct ce_state { + struct ath10k *ar; + unsigned int id; + + unsigned int attr_flags; + + u32 ctrl_addr; + enum ce_op_state state; + + void (*send_cb) (struct ce_state *ce_state, + void *per_transfer_send_context, + u32 buffer, + unsigned int nbytes, + unsigned int transfer_id); + void (*recv_cb) (struct ce_state *ce_state, + void *per_transfer_recv_context, + u32 buffer, + unsigned int nbytes, + unsigned int transfer_id, + unsigned int flags); + + unsigned int src_sz_max; + struct ce_ring_state *src_ring; + struct ce_ring_state *dest_ring; +}; + +struct ce_sendlist_item { + /* e.g. buffer or desc list */ + dma_addr_t data; + union { + /* simple buffer */ + unsigned int nbytes; + /* Rx descriptor list */ + unsigned int ndesc; + } u; + /* externally-specified flags; OR-ed with internal flags */ + u32 flags; +}; + +struct ce_sendlist { + unsigned int num_items; + struct ce_sendlist_item item[CE_SENDLIST_ITEMS_MAX]; +}; + +/* Copy Engine settable attributes */ +struct ce_attr; + +/*==================Send====================*/ + +/* ath10k_ce_send flags */ +#define CE_SEND_FLAG_BYTE_SWAP 1 + +/* + * Queue a source buffer to be sent to an anonymous destination buffer. + * ce - which copy engine to use + * buffer - address of buffer + * nbytes - number of bytes to send + * transfer_id - arbitrary ID; reflected to destination + * flags - CE_SEND_FLAG_* values + * Returns 0 on success; otherwise an error status. + * + * Note: If no flags are specified, use CE's default data swap mode. + * + * Implementation note: pushes 1 buffer to Source ring + */ +int ath10k_ce_send(struct ce_state *ce_state, + void *per_transfer_send_context, + u32 buffer, + unsigned int nbytes, + /* 14 bits */ + unsigned int transfer_id, + unsigned int flags); + +void ath10k_ce_send_cb_register(struct ce_state *ce_state, + void (*send_cb) (struct ce_state *ce_state, + void *transfer_context, + u32 buffer, + unsigned int nbytes, + unsigned int transfer_id), + int disable_interrupts); + +/* Append a simple buffer (address/length) to a sendlist. */ +void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, + u32 buffer, + unsigned int nbytes, + /* OR-ed with internal flags */ + u32 flags); + +/* + * Queue a "sendlist" of buffers to be sent using gather to a single + * anonymous destination buffer + * ce - which copy engine to use + * sendlist - list of simple buffers to send using gather + * transfer_id - arbitrary ID; reflected to destination + * Returns 0 on success; otherwise an error status. + * + * Implemenation note: Pushes multiple buffers with Gather to Source ring. + */ +int ath10k_ce_sendlist_send(struct ce_state *ce_state, + void *per_transfer_send_context, + struct ce_sendlist *sendlist, + /* 14 bits */ + unsigned int transfer_id); + +/*==================Recv=======================*/ + +/* + * Make a buffer available to receive. The buffer must be at least of a + * minimal size appropriate for this copy engine (src_sz_max attribute). + * ce - which copy engine to use + * per_transfer_recv_context - context passed back to caller's recv_cb + * buffer - address of buffer in CE space + * Returns 0 on success; otherwise an error status. + * + * Implemenation note: Pushes a buffer to Dest ring. + */ +int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state, + void *per_transfer_recv_context, + u32 buffer); + +void ath10k_ce_recv_cb_register(struct ce_state *ce_state, + void (*recv_cb) (struct ce_state *ce_state, + void *transfer_context, + u32 buffer, + unsigned int nbytes, + unsigned int transfer_id, + unsigned int flags)); + +/* recv flags */ +/* Data is byte-swapped */ +#define CE_RECV_FLAG_SWAPPED 1 + +/* + * Supply data for the next completed unprocessed receive descriptor. + * Pops buffer from Dest ring. + */ +int ath10k_ce_completed_recv_next(struct ce_state *ce_state, + void **per_transfer_contextp, + u32 *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + unsigned int *flagsp); +/* + * Supply data for the next completed unprocessed send descriptor. + * Pops 1 completed send buffer from Source ring. + */ +int ath10k_ce_completed_send_next(struct ce_state *ce_state, + void **per_transfer_contextp, + u32 *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp); + +/*==================CE Engine Initialization=======================*/ + +/* Initialize an instance of a CE */ +struct ce_state *ath10k_ce_init(struct ath10k *ar, + unsigned int ce_id, + const struct ce_attr *attr); + +/*==================CE Engine Shutdown=======================*/ +/* + * Support clean shutdown by allowing the caller to revoke + * receive buffers. Target DMA must be stopped before using + * this API. + */ +int ath10k_ce_revoke_recv_next(struct ce_state *ce_state, + void **per_transfer_contextp, + u32 *bufferp); + +/* + * Support clean shutdown by allowing the caller to cancel + * pending sends. Target DMA must be stopped before using + * this API. + */ +int ath10k_ce_cancel_send_next(struct ce_state *ce_state, + void **per_transfer_contextp, + u32 *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp); + +void ath10k_ce_deinit(struct ce_state *ce_state); + +/*==================CE Interrupt Handlers====================*/ +void ath10k_ce_per_engine_service_any(struct ath10k *ar); +void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id); +void ath10k_ce_disable_interrupts(struct ath10k *ar); + +/* ce_attr.flags values */ +/* Use NonSnooping PCIe accesses? */ +#define CE_ATTR_NO_SNOOP 1 + +/* Byte swap data words */ +#define CE_ATTR_BYTE_SWAP_DATA 2 + +/* Swizzle descriptors? */ +#define CE_ATTR_SWIZZLE_DESCRIPTORS 4 + +/* no interrupt on copy completion */ +#define CE_ATTR_DIS_INTR 8 + +/* Attributes of an instance of a Copy Engine */ +struct ce_attr { + /* CE_ATTR_* values */ + unsigned int flags; + + /* currently not in use */ + unsigned int priority; + + /* #entries in source ring - Must be a power of 2 */ + unsigned int src_nentries; + + /* + * Max source send size for this CE. + * This is also the minimum size of a destination buffer. + */ + unsigned int src_sz_max; + + /* #entries in destination ring - Must be a power of 2 */ + unsigned int dest_nentries; + + /* Future use */ + void *reserved; +}; + +/* + * When using sendlist_send to transfer multiple buffer fragments, the + * transfer context of each fragment, except last one, will be filled + * with CE_SENDLIST_ITEM_CTXT. ce_completed_send will return success for + * each fragment done with send and the transfer context would be + * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the + * status of a send completion. + */ +#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef) + +#define SR_BA_ADDRESS 0x0000 +#define SR_SIZE_ADDRESS 0x0004 +#define DR_BA_ADDRESS 0x0008 +#define DR_SIZE_ADDRESS 0x000c +#define CE_CMD_ADDRESS 0x0018 + +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MSB 17 +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17 +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000 +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \ + (((0 | (x)) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \ + CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) + +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MSB 16 +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16 +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000 +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_GET(x) \ + (((x) & CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) >> \ + CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \ + (((0 | (x)) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \ + CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) + +#define CE_CTRL1_DMAX_LENGTH_MSB 15 +#define CE_CTRL1_DMAX_LENGTH_LSB 0 +#define CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff +#define CE_CTRL1_DMAX_LENGTH_GET(x) \ + (((x) & CE_CTRL1_DMAX_LENGTH_MASK) >> CE_CTRL1_DMAX_LENGTH_LSB) +#define CE_CTRL1_DMAX_LENGTH_SET(x) \ + (((0 | (x)) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK) + +#define CE_CTRL1_ADDRESS 0x0010 +#define CE_CTRL1_HW_MASK 0x0007ffff +#define CE_CTRL1_SW_MASK 0x0007ffff +#define CE_CTRL1_HW_WRITE_MASK 0x00000000 +#define CE_CTRL1_SW_WRITE_MASK 0x0007ffff +#define CE_CTRL1_RSTMASK 0xffffffff +#define CE_CTRL1_RESET 0x00000080 + +#define CE_CMD_HALT_STATUS_MSB 3 +#define CE_CMD_HALT_STATUS_LSB 3 +#define CE_CMD_HALT_STATUS_MASK 0x00000008 +#define CE_CMD_HALT_STATUS_GET(x) \ + (((x) & CE_CMD_HALT_STATUS_MASK) >> CE_CMD_HALT_STATUS_LSB) +#define CE_CMD_HALT_STATUS_SET(x) \ + (((0 | (x)) << CE_CMD_HALT_STATUS_LSB) & CE_CMD_HALT_STATUS_MASK) +#define CE_CMD_HALT_STATUS_RESET 0 +#define CE_CMD_HALT_MSB 0 +#define CE_CMD_HALT_MASK 0x00000001 + +#define HOST_IE_COPY_COMPLETE_MSB 0 +#define HOST_IE_COPY_COMPLETE_LSB 0 +#define HOST_IE_COPY_COMPLETE_MASK 0x00000001 +#define HOST_IE_COPY_COMPLETE_GET(x) \ + (((x) & HOST_IE_COPY_COMPLETE_MASK) >> HOST_IE_COPY_COMPLETE_LSB) +#define HOST_IE_COPY_COMPLETE_SET(x) \ + (((0 | (x)) << HOST_IE_COPY_COMPLETE_LSB) & HOST_IE_COPY_COMPLETE_MASK) +#define HOST_IE_COPY_COMPLETE_RESET 0 +#define HOST_IE_ADDRESS 0x002c + +#define HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010 +#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008 +#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004 +#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002 +#define HOST_IS_COPY_COMPLETE_MASK 0x00000001 +#define HOST_IS_ADDRESS 0x0030 + +#define MISC_IE_ADDRESS 0x0034 + +#define MISC_IS_AXI_ERR_MASK 0x00000400 + +#define MISC_IS_DST_ADDR_ERR_MASK 0x00000200 +#define MISC_IS_SRC_LEN_ERR_MASK 0x00000100 +#define MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080 +#define MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040 +#define MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020 + +#define MISC_IS_ADDRESS 0x0038 + +#define SR_WR_INDEX_ADDRESS 0x003c + +#define DST_WR_INDEX_ADDRESS 0x0040 + +#define CURRENT_SRRI_ADDRESS 0x0044 + +#define CURRENT_DRRI_ADDRESS 0x0048 + +#define SRC_WATERMARK_LOW_MSB 31 +#define SRC_WATERMARK_LOW_LSB 16 +#define SRC_WATERMARK_LOW_MASK 0xffff0000 +#define SRC_WATERMARK_LOW_GET(x) \ + (((x) & SRC_WATERMARK_LOW_MASK) >> SRC_WATERMARK_LOW_LSB) +#define SRC_WATERMARK_LOW_SET(x) \ + (((0 | (x)) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK) +#define SRC_WATERMARK_LOW_RESET 0 +#define SRC_WATERMARK_HIGH_MSB 15 +#define SRC_WATERMARK_HIGH_LSB 0 +#define SRC_WATERMARK_HIGH_MASK 0x0000ffff +#define SRC_WATERMARK_HIGH_GET(x) \ + (((x) & SRC_WATERMARK_HIGH_MASK) >> SRC_WATERMARK_HIGH_LSB) +#define SRC_WATERMARK_HIGH_SET(x) \ + (((0 | (x)) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK) +#define SRC_WATERMARK_HIGH_RESET 0 +#define SRC_WATERMARK_ADDRESS 0x004c + +#define DST_WATERMARK_LOW_LSB 16 +#define DST_WATERMARK_LOW_MASK 0xffff0000 +#define DST_WATERMARK_LOW_SET(x) \ + (((0 | (x)) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK) +#define DST_WATERMARK_LOW_RESET 0 +#define DST_WATERMARK_HIGH_MSB 15 +#define DST_WATERMARK_HIGH_LSB 0 +#define DST_WATERMARK_HIGH_MASK 0x0000ffff +#define DST_WATERMARK_HIGH_GET(x) \ + (((x) & DST_WATERMARK_HIGH_MASK) >> DST_WATERMARK_HIGH_LSB) +#define DST_WATERMARK_HIGH_SET(x) \ + (((0 | (x)) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK) +#define DST_WATERMARK_HIGH_RESET 0 +#define DST_WATERMARK_ADDRESS 0x0050 + + +static inline u32 ath10k_ce_base_address(unsigned int ce_id) +{ + return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id; +} + +#define CE_WATERMARK_MASK (HOST_IS_SRC_RING_LOW_WATERMARK_MASK | \ + HOST_IS_SRC_RING_HIGH_WATERMARK_MASK | \ + HOST_IS_DST_RING_LOW_WATERMARK_MASK | \ + HOST_IS_DST_RING_HIGH_WATERMARK_MASK) + +#define CE_ERROR_MASK (MISC_IS_AXI_ERR_MASK | \ + MISC_IS_DST_ADDR_ERR_MASK | \ + MISC_IS_SRC_LEN_ERR_MASK | \ + MISC_IS_DST_MAX_LEN_VIO_MASK | \ + MISC_IS_DST_RING_OVERFLOW_MASK | \ + MISC_IS_SRC_RING_OVERFLOW_MASK) + +#define CE_SRC_RING_TO_DESC(baddr, idx) \ + (&(((struct ce_desc *)baddr)[idx])) + +#define CE_DEST_RING_TO_DESC(baddr, idx) \ + (&(((struct ce_desc *)baddr)[idx])) + +/* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */ +#define CE_RING_DELTA(nentries_mask, fromidx, toidx) \ + (((int)(toidx)-(int)(fromidx)) & (nentries_mask)) + +#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask)) + +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8 +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00 +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \ + (((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \ + CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB) +#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000 + +#define CE_INTERRUPT_SUMMARY(ar) \ + CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \ + ath10k_pci_read32((ar), CE_WRAPPER_BASE_ADDRESS + \ + CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS)) + +#endif /* _CE_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c new file mode 100644 index 000000000000..2b3426b1ff3f --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -0,0 +1,665 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/firmware.h> + +#include "core.h" +#include "mac.h" +#include "htc.h" +#include "hif.h" +#include "wmi.h" +#include "bmi.h" +#include "debug.h" +#include "htt.h" + +unsigned int ath10k_debug_mask; +static bool uart_print; +static unsigned int ath10k_p2p; +module_param_named(debug_mask, ath10k_debug_mask, uint, 0644); +module_param(uart_print, bool, 0644); +module_param_named(p2p, ath10k_p2p, uint, 0644); +MODULE_PARM_DESC(debug_mask, "Debugging mask"); +MODULE_PARM_DESC(uart_print, "Uart target debugging"); +MODULE_PARM_DESC(p2p, "Enable ath10k P2P support"); + +static const struct ath10k_hw_params ath10k_hw_params_list[] = { + { + .id = QCA988X_HW_1_0_VERSION, + .name = "qca988x hw1.0", + .patch_load_addr = QCA988X_HW_1_0_PATCH_LOAD_ADDR, + .fw = { + .dir = QCA988X_HW_1_0_FW_DIR, + .fw = QCA988X_HW_1_0_FW_FILE, + .otp = QCA988X_HW_1_0_OTP_FILE, + .board = QCA988X_HW_1_0_BOARD_DATA_FILE, + }, + }, + { + .id = QCA988X_HW_2_0_VERSION, + .name = "qca988x hw2.0", + .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, + .fw = { + .dir = QCA988X_HW_2_0_FW_DIR, + .fw = QCA988X_HW_2_0_FW_FILE, + .otp = QCA988X_HW_2_0_OTP_FILE, + .board = QCA988X_HW_2_0_BOARD_DATA_FILE, + }, + }, +}; + +static void ath10k_send_suspend_complete(struct ath10k *ar) +{ + ath10k_dbg(ATH10K_DBG_CORE, "%s\n", __func__); + + ar->is_target_paused = true; + wake_up(&ar->event_queue); +} + +static int ath10k_check_fw_version(struct ath10k *ar) +{ + char version[32]; + + if (ar->fw_version_major >= SUPPORTED_FW_MAJOR && + ar->fw_version_minor >= SUPPORTED_FW_MINOR && + ar->fw_version_release >= SUPPORTED_FW_RELEASE && + ar->fw_version_build >= SUPPORTED_FW_BUILD) + return 0; + + snprintf(version, sizeof(version), "%u.%u.%u.%u", + SUPPORTED_FW_MAJOR, SUPPORTED_FW_MINOR, + SUPPORTED_FW_RELEASE, SUPPORTED_FW_BUILD); + + ath10k_warn("WARNING: Firmware version %s is not officially supported.\n", + ar->hw->wiphy->fw_version); + ath10k_warn("Please upgrade to version %s (or newer)\n", version); + + return 0; +} + +static int ath10k_init_connect_htc(struct ath10k *ar) +{ + int status; + + status = ath10k_wmi_connect_htc_service(ar); + if (status) + goto conn_fail; + + /* Start HTC */ + status = ath10k_htc_start(ar->htc); + if (status) + goto conn_fail; + + /* Wait for WMI event to be ready */ + status = ath10k_wmi_wait_for_service_ready(ar); + if (status <= 0) { + ath10k_warn("wmi service ready event not received"); + status = -ETIMEDOUT; + goto timeout; + } + + ath10k_dbg(ATH10K_DBG_CORE, "core wmi ready\n"); + return 0; + +timeout: + ath10k_htc_stop(ar->htc); +conn_fail: + return status; +} + +static int ath10k_init_configure_target(struct ath10k *ar) +{ + u32 param_host; + int ret; + + /* tell target which HTC version it is used*/ + ret = ath10k_bmi_write32(ar, hi_app_host_interest, + HTC_PROTOCOL_VERSION); + if (ret) { + ath10k_err("settings HTC version failed\n"); + return ret; + } + + /* set the firmware mode to STA/IBSS/AP */ + ret = ath10k_bmi_read32(ar, hi_option_flag, ¶m_host); + if (ret) { + ath10k_err("setting firmware mode (1/2) failed\n"); + return ret; + } + + /* TODO following parameters need to be re-visited. */ + /* num_device */ + param_host |= (1 << HI_OPTION_NUM_DEV_SHIFT); + /* Firmware mode */ + /* FIXME: Why FW_MODE_AP ??.*/ + param_host |= (HI_OPTION_FW_MODE_AP << HI_OPTION_FW_MODE_SHIFT); + /* mac_addr_method */ + param_host |= (1 << HI_OPTION_MAC_ADDR_METHOD_SHIFT); + /* firmware_bridge */ + param_host |= (0 << HI_OPTION_FW_BRIDGE_SHIFT); + /* fwsubmode */ + param_host |= (0 << HI_OPTION_FW_SUBMODE_SHIFT); + + ret = ath10k_bmi_write32(ar, hi_option_flag, param_host); + if (ret) { + ath10k_err("setting firmware mode (2/2) failed\n"); + return ret; + } + + /* We do all byte-swapping on the host */ + ret = ath10k_bmi_write32(ar, hi_be, 0); + if (ret) { + ath10k_err("setting host CPU BE mode failed\n"); + return ret; + } + + /* FW descriptor/Data swap flags */ + ret = ath10k_bmi_write32(ar, hi_fw_swap, 0); + + if (ret) { + ath10k_err("setting FW data/desc swap flags failed\n"); + return ret; + } + + return 0; +} + +static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar, + const char *dir, + const char *file) +{ + char filename[100]; + const struct firmware *fw; + int ret; + + if (file == NULL) + return ERR_PTR(-ENOENT); + + if (dir == NULL) + dir = "."; + + snprintf(filename, sizeof(filename), "%s/%s", dir, file); + ret = request_firmware(&fw, filename, ar->dev); + if (ret) + return ERR_PTR(ret); + + return fw; +} + +static int ath10k_push_board_ext_data(struct ath10k *ar, + const struct firmware *fw) +{ + u32 board_data_size = QCA988X_BOARD_DATA_SZ; + u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ; + u32 board_ext_data_addr; + int ret; + + ret = ath10k_bmi_read32(ar, hi_board_ext_data, &board_ext_data_addr); + if (ret) { + ath10k_err("could not read board ext data addr (%d)\n", ret); + return ret; + } + + ath10k_dbg(ATH10K_DBG_CORE, + "ath10k: Board extended Data download addr: 0x%x\n", + board_ext_data_addr); + + if (board_ext_data_addr == 0) + return 0; + + if (fw->size != (board_data_size + board_ext_data_size)) { + ath10k_err("invalid board (ext) data sizes %zu != %d+%d\n", + fw->size, board_data_size, board_ext_data_size); + return -EINVAL; + } + + ret = ath10k_bmi_write_memory(ar, board_ext_data_addr, + fw->data + board_data_size, + board_ext_data_size); + if (ret) { + ath10k_err("could not write board ext data (%d)\n", ret); + return ret; + } + + ret = ath10k_bmi_write32(ar, hi_board_ext_data_config, + (board_ext_data_size << 16) | 1); + if (ret) { + ath10k_err("could not write board ext data bit (%d)\n", ret); + return ret; + } + + return 0; +} + +static int ath10k_download_board_data(struct ath10k *ar) +{ + u32 board_data_size = QCA988X_BOARD_DATA_SZ; + u32 address; + const struct firmware *fw; + int ret; + + fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, + ar->hw_params.fw.board); + if (IS_ERR(fw)) { + ath10k_err("could not fetch board data fw file (%ld)\n", + PTR_ERR(fw)); + return PTR_ERR(fw); + } + + ret = ath10k_push_board_ext_data(ar, fw); + if (ret) { + ath10k_err("could not push board ext data (%d)\n", ret); + goto exit; + } + + ret = ath10k_bmi_read32(ar, hi_board_data, &address); + if (ret) { + ath10k_err("could not read board data addr (%d)\n", ret); + goto exit; + } + + ret = ath10k_bmi_write_memory(ar, address, fw->data, + min_t(u32, board_data_size, fw->size)); + if (ret) { + ath10k_err("could not write board data (%d)\n", ret); + goto exit; + } + + ret = ath10k_bmi_write32(ar, hi_board_data_initialized, 1); + if (ret) { + ath10k_err("could not write board data bit (%d)\n", ret); + goto exit; + } + +exit: + release_firmware(fw); + return ret; +} + +static int ath10k_download_and_run_otp(struct ath10k *ar) +{ + const struct firmware *fw; + u32 address; + u32 exec_param; + int ret; + + /* OTP is optional */ + + if (ar->hw_params.fw.otp == NULL) { + ath10k_info("otp file not defined\n"); + return 0; + } + + address = ar->hw_params.patch_load_addr; + + fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, + ar->hw_params.fw.otp); + if (IS_ERR(fw)) { + ath10k_warn("could not fetch otp (%ld)\n", PTR_ERR(fw)); + return 0; + } + + ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size); + if (ret) { + ath10k_err("could not write otp (%d)\n", ret); + goto exit; + } + + exec_param = 0; + ret = ath10k_bmi_execute(ar, address, &exec_param); + if (ret) { + ath10k_err("could not execute otp (%d)\n", ret); + goto exit; + } + +exit: + release_firmware(fw); + return ret; +} + +static int ath10k_download_fw(struct ath10k *ar) +{ + const struct firmware *fw; + u32 address; + int ret; + + if (ar->hw_params.fw.fw == NULL) + return -EINVAL; + + address = ar->hw_params.patch_load_addr; + + fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, + ar->hw_params.fw.fw); + if (IS_ERR(fw)) { + ath10k_err("could not fetch fw (%ld)\n", PTR_ERR(fw)); + return PTR_ERR(fw); + } + + ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size); + if (ret) { + ath10k_err("could not write fw (%d)\n", ret); + goto exit; + } + +exit: + release_firmware(fw); + return ret; +} + +static int ath10k_init_download_firmware(struct ath10k *ar) +{ + int ret; + + ret = ath10k_download_board_data(ar); + if (ret) + return ret; + + ret = ath10k_download_and_run_otp(ar); + if (ret) + return ret; + + ret = ath10k_download_fw(ar); + if (ret) + return ret; + + return ret; +} + +static int ath10k_init_uart(struct ath10k *ar) +{ + int ret; + + /* + * Explicitly setting UART prints to zero as target turns it on + * based on scratch registers. + */ + ret = ath10k_bmi_write32(ar, hi_serial_enable, 0); + if (ret) { + ath10k_warn("could not disable UART prints (%d)\n", ret); + return ret; + } + + if (!uart_print) { + ath10k_info("UART prints disabled\n"); + return 0; + } + + ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, 7); + if (ret) { + ath10k_warn("could not enable UART prints (%d)\n", ret); + return ret; + } + + ret = ath10k_bmi_write32(ar, hi_serial_enable, 1); + if (ret) { + ath10k_warn("could not enable UART prints (%d)\n", ret); + return ret; + } + + ath10k_info("UART prints enabled\n"); + return 0; +} + +static int ath10k_init_hw_params(struct ath10k *ar) +{ + const struct ath10k_hw_params *uninitialized_var(hw_params); + int i; + + for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) { + hw_params = &ath10k_hw_params_list[i]; + + if (hw_params->id == ar->target_version) + break; + } + + if (i == ARRAY_SIZE(ath10k_hw_params_list)) { + ath10k_err("Unsupported hardware version: 0x%x\n", + ar->target_version); + return -EINVAL; + } + + ar->hw_params = *hw_params; + + ath10k_info("Hardware name %s version 0x%x\n", + ar->hw_params.name, ar->target_version); + + return 0; +} + +struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, + enum ath10k_bus bus, + const struct ath10k_hif_ops *hif_ops) +{ + struct ath10k *ar; + + ar = ath10k_mac_create(); + if (!ar) + return NULL; + + ar->ath_common.priv = ar; + ar->ath_common.hw = ar->hw; + + ar->p2p = !!ath10k_p2p; + ar->dev = dev; + + ar->hif.priv = hif_priv; + ar->hif.ops = hif_ops; + ar->hif.bus = bus; + + ar->free_vdev_map = 0xFF; /* 8 vdevs */ + + init_completion(&ar->scan.started); + init_completion(&ar->scan.completed); + init_completion(&ar->scan.on_channel); + + init_completion(&ar->install_key_done); + init_completion(&ar->vdev_setup_done); + + setup_timer(&ar->scan.timeout, ath10k_reset_scan, (unsigned long)ar); + + ar->workqueue = create_singlethread_workqueue("ath10k_wq"); + if (!ar->workqueue) + goto err_wq; + + mutex_init(&ar->conf_mutex); + spin_lock_init(&ar->data_lock); + + INIT_LIST_HEAD(&ar->peers); + init_waitqueue_head(&ar->peer_mapping_wq); + + init_completion(&ar->offchan_tx_completed); + INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work); + skb_queue_head_init(&ar->offchan_tx_queue); + + init_waitqueue_head(&ar->event_queue); + + return ar; + +err_wq: + ath10k_mac_destroy(ar); + return NULL; +} +EXPORT_SYMBOL(ath10k_core_create); + +void ath10k_core_destroy(struct ath10k *ar) +{ + flush_workqueue(ar->workqueue); + destroy_workqueue(ar->workqueue); + + ath10k_mac_destroy(ar); +} +EXPORT_SYMBOL(ath10k_core_destroy); + + +int ath10k_core_register(struct ath10k *ar) +{ + struct ath10k_htc_ops htc_ops; + struct bmi_target_info target_info; + int status; + + memset(&target_info, 0, sizeof(target_info)); + status = ath10k_bmi_get_target_info(ar, &target_info); + if (status) + goto err; + + ar->target_version = target_info.version; + ar->hw->wiphy->hw_version = target_info.version; + + status = ath10k_init_hw_params(ar); + if (status) + goto err; + + if (ath10k_init_configure_target(ar)) { + status = -EINVAL; + goto err; + } + + status = ath10k_init_download_firmware(ar); + if (status) + goto err; + + status = ath10k_init_uart(ar); + if (status) + goto err; + + htc_ops.target_send_suspend_complete = ath10k_send_suspend_complete; + + ar->htc = ath10k_htc_create(ar, &htc_ops); + if (IS_ERR(ar->htc)) { + status = PTR_ERR(ar->htc); + ath10k_err("could not create HTC (%d)\n", status); + goto err; + } + + status = ath10k_bmi_done(ar); + if (status) + goto err_htc_destroy; + + status = ath10k_wmi_attach(ar); + if (status) { + ath10k_err("WMI attach failed: %d\n", status); + goto err_htc_destroy; + } + + status = ath10k_htc_wait_target(ar->htc); + if (status) + goto err_wmi_detach; + + ar->htt = ath10k_htt_attach(ar); + if (!ar->htt) { + status = -ENOMEM; + goto err_wmi_detach; + } + + status = ath10k_init_connect_htc(ar); + if (status) + goto err_htt_detach; + + ath10k_info("firmware %s booted\n", ar->hw->wiphy->fw_version); + + status = ath10k_check_fw_version(ar); + if (status) + goto err_disconnect_htc; + + status = ath10k_wmi_cmd_init(ar); + if (status) { + ath10k_err("could not send WMI init command (%d)\n", status); + goto err_disconnect_htc; + } + + status = ath10k_wmi_wait_for_unified_ready(ar); + if (status <= 0) { + ath10k_err("wmi unified ready event not received\n"); + status = -ETIMEDOUT; + goto err_disconnect_htc; + } + + status = ath10k_htt_attach_target(ar->htt); + if (status) + goto err_disconnect_htc; + + status = ath10k_mac_register(ar); + if (status) + goto err_disconnect_htc; + + status = ath10k_debug_create(ar); + if (status) { + ath10k_err("unable to initialize debugfs\n"); + goto err_unregister_mac; + } + + return 0; + +err_unregister_mac: + ath10k_mac_unregister(ar); +err_disconnect_htc: + ath10k_htc_stop(ar->htc); +err_htt_detach: + ath10k_htt_detach(ar->htt); +err_wmi_detach: + ath10k_wmi_detach(ar); +err_htc_destroy: + ath10k_htc_destroy(ar->htc); +err: + return status; +} +EXPORT_SYMBOL(ath10k_core_register); + +void ath10k_core_unregister(struct ath10k *ar) +{ + /* We must unregister from mac80211 before we stop HTC and HIF. + * Otherwise we will fail to submit commands to FW and mac80211 will be + * unhappy about callback failures. */ + ath10k_mac_unregister(ar); + ath10k_htc_stop(ar->htc); + ath10k_htt_detach(ar->htt); + ath10k_wmi_detach(ar); + ath10k_htc_destroy(ar->htc); +} +EXPORT_SYMBOL(ath10k_core_unregister); + +int ath10k_core_target_suspend(struct ath10k *ar) +{ + int ret; + + ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__); + + ret = ath10k_wmi_pdev_suspend_target(ar); + if (ret) + ath10k_warn("could not suspend target (%d)\n", ret); + + return ret; +} +EXPORT_SYMBOL(ath10k_core_target_suspend); + +int ath10k_core_target_resume(struct ath10k *ar) +{ + int ret; + + ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__); + + ret = ath10k_wmi_pdev_resume_target(ar); + if (ret) + ath10k_warn("could not resume target (%d)\n", ret); + + return ret; +} +EXPORT_SYMBOL(ath10k_core_target_resume); + +MODULE_AUTHOR("Qualcomm Atheros"); +MODULE_DESCRIPTION("Core module for QCA988X PCIe devices."); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h new file mode 100644 index 000000000000..539336d1be4b --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -0,0 +1,369 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _CORE_H_ +#define _CORE_H_ + +#include <linux/completion.h> +#include <linux/if_ether.h> +#include <linux/types.h> +#include <linux/pci.h> + +#include "htc.h" +#include "hw.h" +#include "targaddrs.h" +#include "wmi.h" +#include "../ath.h" +#include "../regd.h" + +#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB) +#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) +#define WO(_f) ((_f##_OFFSET) >> 2) + +#define ATH10K_SCAN_ID 0 +#define WMI_READY_TIMEOUT (5 * HZ) +#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ) + +/* Antenna noise floor */ +#define ATH10K_DEFAULT_NOISE_FLOOR -95 + +struct ath10k; + +enum ath10k_bus { + ATH10K_BUS_PCI, +}; + +struct ath10k_skb_cb { + dma_addr_t paddr; + bool is_mapped; + bool is_aborted; + + struct { + u8 vdev_id; + u16 msdu_id; + u8 tid; + bool is_offchan; + bool is_conf; + bool discard; + bool no_ack; + u8 refcount; + struct sk_buff *txfrag; + struct sk_buff *msdu; + } __packed htt; + + /* 4 bytes left on 64bit arch */ +} __packed; + +static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) > + IEEE80211_TX_INFO_DRIVER_DATA_SIZE); + return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data; +} + +static inline int ath10k_skb_map(struct device *dev, struct sk_buff *skb) +{ + if (ATH10K_SKB_CB(skb)->is_mapped) + return -EINVAL; + + ATH10K_SKB_CB(skb)->paddr = dma_map_single(dev, skb->data, skb->len, + DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(dev, ATH10K_SKB_CB(skb)->paddr))) + return -EIO; + + ATH10K_SKB_CB(skb)->is_mapped = true; + return 0; +} + +static inline int ath10k_skb_unmap(struct device *dev, struct sk_buff *skb) +{ + if (!ATH10K_SKB_CB(skb)->is_mapped) + return -EINVAL; + + dma_unmap_single(dev, ATH10K_SKB_CB(skb)->paddr, skb->len, + DMA_TO_DEVICE); + ATH10K_SKB_CB(skb)->is_mapped = false; + return 0; +} + +static inline u32 host_interest_item_address(u32 item_offset) +{ + return QCA988X_HOST_INTEREST_ADDRESS + item_offset; +} + +struct ath10k_bmi { + bool done_sent; +}; + +struct ath10k_wmi { + enum ath10k_htc_ep_id eid; + struct completion service_ready; + struct completion unified_ready; + atomic_t pending_tx_count; + wait_queue_head_t wq; + + struct sk_buff_head wmi_event_list; + struct work_struct wmi_event_work; +}; + +struct ath10k_peer_stat { + u8 peer_macaddr[ETH_ALEN]; + u32 peer_rssi; + u32 peer_tx_rate; +}; + +struct ath10k_target_stats { + /* PDEV stats */ + s32 ch_noise_floor; + u32 tx_frame_count; + u32 rx_frame_count; + u32 rx_clear_count; + u32 cycle_count; + u32 phy_err_count; + u32 chan_tx_power; + + /* PDEV TX stats */ + s32 comp_queued; + s32 comp_delivered; + s32 msdu_enqued; + s32 mpdu_enqued; + s32 wmm_drop; + s32 local_enqued; + s32 local_freed; + s32 hw_queued; + s32 hw_reaped; + s32 underrun; + s32 tx_abort; + s32 mpdus_requed; + u32 tx_ko; + u32 data_rc; + u32 self_triggers; + u32 sw_retry_failure; + u32 illgl_rate_phy_err; + u32 pdev_cont_xretry; + u32 pdev_tx_timeout; + u32 pdev_resets; + u32 phy_underrun; + u32 txop_ovf; + + /* PDEV RX stats */ + s32 mid_ppdu_route_change; + s32 status_rcvd; + s32 r0_frags; + s32 r1_frags; + s32 r2_frags; + s32 r3_frags; + s32 htt_msdus; + s32 htt_mpdus; + s32 loc_msdus; + s32 loc_mpdus; + s32 oversize_amsdu; + s32 phy_errs; + s32 phy_err_drop; + s32 mpdu_errs; + + /* VDEV STATS */ + + /* PEER STATS */ + u8 peers; + struct ath10k_peer_stat peer_stat[TARGET_NUM_PEERS]; + + /* TODO: Beacon filter stats */ + +}; + +#define ATH10K_MAX_NUM_PEER_IDS (1 << 11) /* htt rx_desc limit */ + +struct ath10k_peer { + struct list_head list; + int vdev_id; + u8 addr[ETH_ALEN]; + DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS); + struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1]; +}; + +#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ) + +struct ath10k_vif { + u32 vdev_id; + enum wmi_vdev_type vdev_type; + enum wmi_vdev_subtype vdev_subtype; + u32 beacon_interval; + u32 dtim_period; + + struct ath10k *ar; + struct ieee80211_vif *vif; + + struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1]; + u8 def_wep_key_index; + + u16 tx_seq_no; + + union { + struct { + u8 bssid[ETH_ALEN]; + u32 uapsd; + } sta; + struct { + /* 127 stations; wmi limit */ + u8 tim_bitmap[16]; + u8 tim_len; + u32 ssid_len; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + bool hidden_ssid; + /* P2P_IE with NoA attribute for P2P_GO case */ + u32 noa_len; + u8 *noa_data; + } ap; + struct { + u8 bssid[ETH_ALEN]; + } ibss; + } u; +}; + +struct ath10k_vif_iter { + u32 vdev_id; + struct ath10k_vif *arvif; +}; + +struct ath10k_debug { + struct dentry *debugfs_phy; + + struct ath10k_target_stats target_stats; + u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE]; + + struct completion event_stats_compl; +}; + +struct ath10k { + struct ath_common ath_common; + struct ieee80211_hw *hw; + struct device *dev; + u8 mac_addr[ETH_ALEN]; + + u32 target_version; + u8 fw_version_major; + u32 fw_version_minor; + u16 fw_version_release; + u16 fw_version_build; + u32 phy_capability; + u32 hw_min_tx_power; + u32 hw_max_tx_power; + u32 ht_cap_info; + u32 vht_cap_info; + + struct targetdef *targetdef; + struct hostdef *hostdef; + + bool p2p; + + struct { + void *priv; + enum ath10k_bus bus; + const struct ath10k_hif_ops *ops; + } hif; + + struct ath10k_wmi wmi; + + wait_queue_head_t event_queue; + bool is_target_paused; + + struct ath10k_bmi bmi; + + struct ath10k_htc *htc; + struct ath10k_htt *htt; + + struct ath10k_hw_params { + u32 id; + const char *name; + u32 patch_load_addr; + + struct ath10k_hw_params_fw { + const char *dir; + const char *fw; + const char *otp; + const char *board; + } fw; + } hw_params; + + struct { + struct completion started; + struct completion completed; + struct completion on_channel; + struct timer_list timeout; + bool is_roc; + bool in_progress; + bool aborting; + int vdev_id; + int roc_freq; + } scan; + + struct { + struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; + } mac; + + /* should never be NULL; needed for regular htt rx */ + struct ieee80211_channel *rx_channel; + + /* valid during scan; needed for mgmt rx during scan */ + struct ieee80211_channel *scan_channel; + + int free_vdev_map; + int monitor_vdev_id; + bool monitor_enabled; + bool monitor_present; + unsigned int filter_flags; + + struct wmi_pdev_set_wmm_params_arg wmm_params; + struct completion install_key_done; + + struct completion vdev_setup_done; + + struct workqueue_struct *workqueue; + + /* prevents concurrent FW reconfiguration */ + struct mutex conf_mutex; + + /* protects shared structure data */ + spinlock_t data_lock; + + struct list_head peers; + wait_queue_head_t peer_mapping_wq; + + struct work_struct offchan_tx_work; + struct sk_buff_head offchan_tx_queue; + struct completion offchan_tx_completed; + struct sk_buff *offchan_tx_skb; + +#ifdef CONFIG_ATH10K_DEBUGFS + struct ath10k_debug debug; +#endif +}; + +struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, + enum ath10k_bus bus, + const struct ath10k_hif_ops *hif_ops); +void ath10k_core_destroy(struct ath10k *ar); + +int ath10k_core_register(struct ath10k *ar); +void ath10k_core_unregister(struct ath10k *ar); + +int ath10k_core_target_suspend(struct ath10k *ar); +int ath10k_core_target_resume(struct ath10k *ar); + +#endif /* _CORE_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c new file mode 100644 index 000000000000..499034b873d1 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -0,0 +1,503 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/debugfs.h> + +#include "core.h" +#include "debug.h" + +static int ath10k_printk(const char *level, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + int rtn; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + rtn = printk("%sath10k: %pV", level, &vaf); + + va_end(args); + + return rtn; +} + +int ath10k_info(const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + int ret; + + va_start(args, fmt); + vaf.va = &args; + ret = ath10k_printk(KERN_INFO, "%pV", &vaf); + trace_ath10k_log_info(&vaf); + va_end(args); + + return ret; +} +EXPORT_SYMBOL(ath10k_info); + +int ath10k_err(const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + int ret; + + va_start(args, fmt); + vaf.va = &args; + ret = ath10k_printk(KERN_ERR, "%pV", &vaf); + trace_ath10k_log_err(&vaf); + va_end(args); + + return ret; +} +EXPORT_SYMBOL(ath10k_err); + +int ath10k_warn(const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + int ret = 0; + + va_start(args, fmt); + vaf.va = &args; + + if (net_ratelimit()) + ret = ath10k_printk(KERN_WARNING, "%pV", &vaf); + + trace_ath10k_log_warn(&vaf); + + va_end(args); + + return ret; +} +EXPORT_SYMBOL(ath10k_warn); + +#ifdef CONFIG_ATH10K_DEBUGFS + +void ath10k_debug_read_service_map(struct ath10k *ar, + void *service_map, + size_t map_size) +{ + memcpy(ar->debug.wmi_service_bitmap, service_map, map_size); +} + +static ssize_t ath10k_read_wmi_services(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + char *buf; + unsigned int len = 0, buf_len = 1500; + const char *status; + ssize_t ret_cnt; + int i; + + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&ar->conf_mutex); + + if (len > buf_len) + len = buf_len; + + for (i = 0; i < WMI_SERVICE_LAST; i++) { + if (WMI_SERVICE_IS_ENABLED(ar->debug.wmi_service_bitmap, i)) + status = "enabled"; + else + status = "disabled"; + + len += scnprintf(buf + len, buf_len - len, + "0x%02x - %20s - %s\n", + i, wmi_service_name(i), status); + } + + ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); + + mutex_unlock(&ar->conf_mutex); + + kfree(buf); + return ret_cnt; +} + +static const struct file_operations fops_wmi_services = { + .read = ath10k_read_wmi_services, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +void ath10k_debug_read_target_stats(struct ath10k *ar, + struct wmi_stats_event *ev) +{ + u8 *tmp = ev->data; + struct ath10k_target_stats *stats; + int num_pdev_stats, num_vdev_stats, num_peer_stats; + struct wmi_pdev_stats *ps; + int i; + + mutex_lock(&ar->conf_mutex); + + stats = &ar->debug.target_stats; + + num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); /* 0 or 1 */ + num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); /* 0 or max vdevs */ + num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */ + + if (num_pdev_stats) { + ps = (struct wmi_pdev_stats *)tmp; + + stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf); + stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count); + stats->rx_frame_count = __le32_to_cpu(ps->rx_frame_count); + stats->rx_clear_count = __le32_to_cpu(ps->rx_clear_count); + stats->cycle_count = __le32_to_cpu(ps->cycle_count); + stats->phy_err_count = __le32_to_cpu(ps->phy_err_count); + stats->chan_tx_power = __le32_to_cpu(ps->chan_tx_pwr); + + stats->comp_queued = __le32_to_cpu(ps->wal.tx.comp_queued); + stats->comp_delivered = + __le32_to_cpu(ps->wal.tx.comp_delivered); + stats->msdu_enqued = __le32_to_cpu(ps->wal.tx.msdu_enqued); + stats->mpdu_enqued = __le32_to_cpu(ps->wal.tx.mpdu_enqued); + stats->wmm_drop = __le32_to_cpu(ps->wal.tx.wmm_drop); + stats->local_enqued = __le32_to_cpu(ps->wal.tx.local_enqued); + stats->local_freed = __le32_to_cpu(ps->wal.tx.local_freed); + stats->hw_queued = __le32_to_cpu(ps->wal.tx.hw_queued); + stats->hw_reaped = __le32_to_cpu(ps->wal.tx.hw_reaped); + stats->underrun = __le32_to_cpu(ps->wal.tx.underrun); + stats->tx_abort = __le32_to_cpu(ps->wal.tx.tx_abort); + stats->mpdus_requed = __le32_to_cpu(ps->wal.tx.mpdus_requed); + stats->tx_ko = __le32_to_cpu(ps->wal.tx.tx_ko); + stats->data_rc = __le32_to_cpu(ps->wal.tx.data_rc); + stats->self_triggers = __le32_to_cpu(ps->wal.tx.self_triggers); + stats->sw_retry_failure = + __le32_to_cpu(ps->wal.tx.sw_retry_failure); + stats->illgl_rate_phy_err = + __le32_to_cpu(ps->wal.tx.illgl_rate_phy_err); + stats->pdev_cont_xretry = + __le32_to_cpu(ps->wal.tx.pdev_cont_xretry); + stats->pdev_tx_timeout = + __le32_to_cpu(ps->wal.tx.pdev_tx_timeout); + stats->pdev_resets = __le32_to_cpu(ps->wal.tx.pdev_resets); + stats->phy_underrun = __le32_to_cpu(ps->wal.tx.phy_underrun); + stats->txop_ovf = __le32_to_cpu(ps->wal.tx.txop_ovf); + + stats->mid_ppdu_route_change = + __le32_to_cpu(ps->wal.rx.mid_ppdu_route_change); + stats->status_rcvd = __le32_to_cpu(ps->wal.rx.status_rcvd); + stats->r0_frags = __le32_to_cpu(ps->wal.rx.r0_frags); + stats->r1_frags = __le32_to_cpu(ps->wal.rx.r1_frags); + stats->r2_frags = __le32_to_cpu(ps->wal.rx.r2_frags); + stats->r3_frags = __le32_to_cpu(ps->wal.rx.r3_frags); + stats->htt_msdus = __le32_to_cpu(ps->wal.rx.htt_msdus); + stats->htt_mpdus = __le32_to_cpu(ps->wal.rx.htt_mpdus); + stats->loc_msdus = __le32_to_cpu(ps->wal.rx.loc_msdus); + stats->loc_mpdus = __le32_to_cpu(ps->wal.rx.loc_mpdus); + stats->oversize_amsdu = + __le32_to_cpu(ps->wal.rx.oversize_amsdu); + stats->phy_errs = __le32_to_cpu(ps->wal.rx.phy_errs); + stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop); + stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs); + + tmp += sizeof(struct wmi_pdev_stats); + } + + /* 0 or max vdevs */ + /* Currently firmware does not support VDEV stats */ + if (num_vdev_stats) { + struct wmi_vdev_stats *vdev_stats; + + for (i = 0; i < num_vdev_stats; i++) { + vdev_stats = (struct wmi_vdev_stats *)tmp; + tmp += sizeof(struct wmi_vdev_stats); + } + } + + if (num_peer_stats) { + struct wmi_peer_stats *peer_stats; + struct ath10k_peer_stat *s; + + stats->peers = num_peer_stats; + + for (i = 0; i < num_peer_stats; i++) { + peer_stats = (struct wmi_peer_stats *)tmp; + s = &stats->peer_stat[i]; + + WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_stats->peer_macaddr, + s->peer_macaddr); + s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi); + s->peer_tx_rate = + __le32_to_cpu(peer_stats->peer_tx_rate); + + tmp += sizeof(struct wmi_peer_stats); + } + } + + mutex_unlock(&ar->conf_mutex); + complete(&ar->debug.event_stats_compl); +} + +static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + struct ath10k_target_stats *fw_stats; + char *buf; + unsigned int len = 0, buf_len = 2500; + ssize_t ret_cnt; + long left; + int i; + int ret; + + fw_stats = &ar->debug.target_stats; + + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT); + if (ret) { + ath10k_warn("could not request stats (%d)\n", ret); + kfree(buf); + return -EIO; + } + + left = wait_for_completion_timeout(&ar->debug.event_stats_compl, 1*HZ); + + if (left <= 0) { + kfree(buf); + return -ETIMEDOUT; + } + + mutex_lock(&ar->conf_mutex); + + len += scnprintf(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, "%30s\n", + "ath10k PDEV stats"); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "================="); + + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Channel noise floor", fw_stats->ch_noise_floor); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "Channel TX power", fw_stats->chan_tx_power); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "TX frame count", fw_stats->tx_frame_count); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "RX frame count", fw_stats->rx_frame_count); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "RX clear count", fw_stats->rx_clear_count); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "Cycle count", fw_stats->cycle_count); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "PHY error count", fw_stats->phy_err_count); + + len += scnprintf(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, "%30s\n", + "ath10k PDEV TX stats"); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "================="); + + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "HTT cookies queued", fw_stats->comp_queued); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "HTT cookies disp.", fw_stats->comp_delivered); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MSDU queued", fw_stats->msdu_enqued); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDU queued", fw_stats->mpdu_enqued); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MSDUs dropped", fw_stats->wmm_drop); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Local enqued", fw_stats->local_enqued); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Local freed", fw_stats->local_freed); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "HW queued", fw_stats->hw_queued); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "PPDUs reaped", fw_stats->hw_reaped); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Num underruns", fw_stats->underrun); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "PPDUs cleaned", fw_stats->tx_abort); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDUs requed", fw_stats->mpdus_requed); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Excessive retries", fw_stats->tx_ko); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "HW rate", fw_stats->data_rc); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Sched self tiggers", fw_stats->self_triggers); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Dropped due to SW retries", + fw_stats->sw_retry_failure); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Illegal rate phy errors", + fw_stats->illgl_rate_phy_err); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Pdev continous xretry", fw_stats->pdev_cont_xretry); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "TX timeout", fw_stats->pdev_tx_timeout); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "PDEV resets", fw_stats->pdev_resets); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "PHY underrun", fw_stats->phy_underrun); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDU is more than txop limit", fw_stats->txop_ovf); + + len += scnprintf(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, "%30s\n", + "ath10k PDEV RX stats"); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "================="); + + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Mid PPDU route change", + fw_stats->mid_ppdu_route_change); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Tot. number of statuses", fw_stats->status_rcvd); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Extra frags on rings 0", fw_stats->r0_frags); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Extra frags on rings 1", fw_stats->r1_frags); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Extra frags on rings 2", fw_stats->r2_frags); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Extra frags on rings 3", fw_stats->r3_frags); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MSDUs delivered to HTT", fw_stats->htt_msdus); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDUs delivered to HTT", fw_stats->htt_mpdus); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MSDUs delivered to stack", fw_stats->loc_msdus); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDUs delivered to stack", fw_stats->loc_mpdus); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Oversized AMSUs", fw_stats->oversize_amsdu); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "PHY errors", fw_stats->phy_errs); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "PHY errors drops", fw_stats->phy_err_drop); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs); + + len += scnprintf(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, "%30s\n", + "ath10k PEER stats"); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "================="); + + for (i = 0; i < fw_stats->peers; i++) { + len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", + "Peer MAC address", + fw_stats->peer_stat[i].peer_macaddr); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "Peer RSSI", fw_stats->peer_stat[i].peer_rssi); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "Peer TX rate", + fw_stats->peer_stat[i].peer_tx_rate); + len += scnprintf(buf + len, buf_len - len, "\n"); + } + + if (len > buf_len) + len = buf_len; + + ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); + + mutex_unlock(&ar->conf_mutex); + + kfree(buf); + return ret_cnt; +} + +static const struct file_operations fops_fw_stats = { + .read = ath10k_read_fw_stats, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +int ath10k_debug_create(struct ath10k *ar) +{ + ar->debug.debugfs_phy = debugfs_create_dir("ath10k", + ar->hw->wiphy->debugfsdir); + + if (!ar->debug.debugfs_phy) + return -ENOMEM; + + init_completion(&ar->debug.event_stats_compl); + + debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar, + &fops_fw_stats); + + debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar, + &fops_wmi_services); + + return 0; +} +#endif /* CONFIG_ATH10K_DEBUGFS */ + +#ifdef CONFIG_ATH10K_DEBUG +void ath10k_dbg(enum ath10k_debug_mask mask, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + if (ath10k_debug_mask & mask) + ath10k_printk(KERN_DEBUG, "%pV", &vaf); + + trace_ath10k_log_dbg(mask, &vaf); + + va_end(args); +} +EXPORT_SYMBOL(ath10k_dbg); + +void ath10k_dbg_dump(enum ath10k_debug_mask mask, + const char *msg, const char *prefix, + const void *buf, size_t len) +{ + if (ath10k_debug_mask & mask) { + if (msg) + ath10k_dbg(mask, "%s\n", msg); + + print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len); + } + + /* tracing code doesn't like null strings :/ */ + trace_ath10k_log_dbg_dump(msg ? msg : "", prefix ? prefix : "", + buf, len); +} +EXPORT_SYMBOL(ath10k_dbg_dump); + +#endif /* CONFIG_ATH10K_DEBUG */ diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h new file mode 100644 index 000000000000..168140c54028 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/debug.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _DEBUG_H_ +#define _DEBUG_H_ + +#include <linux/types.h> +#include "trace.h" + +enum ath10k_debug_mask { + ATH10K_DBG_PCI = 0x00000001, + ATH10K_DBG_WMI = 0x00000002, + ATH10K_DBG_HTC = 0x00000004, + ATH10K_DBG_HTT = 0x00000008, + ATH10K_DBG_MAC = 0x00000010, + ATH10K_DBG_CORE = 0x00000020, + ATH10K_DBG_PCI_DUMP = 0x00000040, + ATH10K_DBG_HTT_DUMP = 0x00000080, + ATH10K_DBG_MGMT = 0x00000100, + ATH10K_DBG_DATA = 0x00000200, + ATH10K_DBG_ANY = 0xffffffff, +}; + +extern unsigned int ath10k_debug_mask; + +extern __printf(1, 2) int ath10k_info(const char *fmt, ...); +extern __printf(1, 2) int ath10k_err(const char *fmt, ...); +extern __printf(1, 2) int ath10k_warn(const char *fmt, ...); + +#ifdef CONFIG_ATH10K_DEBUGFS +int ath10k_debug_create(struct ath10k *ar); +void ath10k_debug_read_service_map(struct ath10k *ar, + void *service_map, + size_t map_size); +void ath10k_debug_read_target_stats(struct ath10k *ar, + struct wmi_stats_event *ev); + +#else +static inline int ath10k_debug_create(struct ath10k *ar) +{ + return 0; +} + +static inline void ath10k_debug_read_service_map(struct ath10k *ar, + void *service_map, + size_t map_size) +{ +} + +static inline void ath10k_debug_read_target_stats(struct ath10k *ar, + struct wmi_stats_event *ev) +{ +} +#endif /* CONFIG_ATH10K_DEBUGFS */ + +#ifdef CONFIG_ATH10K_DEBUG +extern __printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask, + const char *fmt, ...); +void ath10k_dbg_dump(enum ath10k_debug_mask mask, + const char *msg, const char *prefix, + const void *buf, size_t len); +#else /* CONFIG_ATH10K_DEBUG */ + +static inline int ath10k_dbg(enum ath10k_debug_mask dbg_mask, + const char *fmt, ...) +{ + return 0; +} + +static inline void ath10k_dbg_dump(enum ath10k_debug_mask mask, + const char *msg, const char *prefix, + const void *buf, size_t len) +{ +} +#endif /* CONFIG_ATH10K_DEBUG */ +#endif /* _DEBUG_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h new file mode 100644 index 000000000000..73a24d44d1b4 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/hif.h @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HIF_H_ +#define _HIF_H_ + +#include <linux/kernel.h> +#include "core.h" + +struct ath10k_hif_cb { + int (*tx_completion)(struct ath10k *ar, + struct sk_buff *wbuf, + unsigned transfer_id); + int (*rx_completion)(struct ath10k *ar, + struct sk_buff *wbuf, + u8 pipe_id); +}; + +struct ath10k_hif_ops { + /* Send the head of a buffer to HIF for transmission to the target. */ + int (*send_head)(struct ath10k *ar, u8 pipe_id, + unsigned int transfer_id, + unsigned int nbytes, + struct sk_buff *buf); + + /* + * API to handle HIF-specific BMI message exchanges, this API is + * synchronous and only allowed to be called from a context that + * can block (sleep) + */ + int (*exchange_bmi_msg)(struct ath10k *ar, + void *request, u32 request_len, + void *response, u32 *response_len); + + int (*start)(struct ath10k *ar); + + void (*stop)(struct ath10k *ar); + + int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id, + u8 *ul_pipe, u8 *dl_pipe, + int *ul_is_polled, int *dl_is_polled); + + void (*get_default_pipe)(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe); + + /* + * Check if prior sends have completed. + * + * Check whether the pipe in question has any completed + * sends that have not yet been processed. + * This function is only relevant for HIF pipes that are configured + * to be polled rather than interrupt-driven. + */ + void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force); + + void (*init)(struct ath10k *ar, + struct ath10k_hif_cb *callbacks); + + u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id); +}; + + +static inline int ath10k_hif_send_head(struct ath10k *ar, u8 pipe_id, + unsigned int transfer_id, + unsigned int nbytes, + struct sk_buff *buf) +{ + return ar->hif.ops->send_head(ar, pipe_id, transfer_id, nbytes, buf); +} + +static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar, + void *request, u32 request_len, + void *response, u32 *response_len) +{ + return ar->hif.ops->exchange_bmi_msg(ar, request, request_len, + response, response_len); +} + +static inline int ath10k_hif_start(struct ath10k *ar) +{ + return ar->hif.ops->start(ar); +} + +static inline void ath10k_hif_stop(struct ath10k *ar) +{ + return ar->hif.ops->stop(ar); +} + +static inline int ath10k_hif_map_service_to_pipe(struct ath10k *ar, + u16 service_id, + u8 *ul_pipe, u8 *dl_pipe, + int *ul_is_polled, + int *dl_is_polled) +{ + return ar->hif.ops->map_service_to_pipe(ar, service_id, + ul_pipe, dl_pipe, + ul_is_polled, dl_is_polled); +} + +static inline void ath10k_hif_get_default_pipe(struct ath10k *ar, + u8 *ul_pipe, u8 *dl_pipe) +{ + ar->hif.ops->get_default_pipe(ar, ul_pipe, dl_pipe); +} + +static inline void ath10k_hif_send_complete_check(struct ath10k *ar, + u8 pipe_id, int force) +{ + ar->hif.ops->send_complete_check(ar, pipe_id, force); +} + +static inline void ath10k_hif_init(struct ath10k *ar, + struct ath10k_hif_cb *callbacks) +{ + ar->hif.ops->init(ar, callbacks); +} + +static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar, + u8 pipe_id) +{ + return ar->hif.ops->get_free_queue_number(ar, pipe_id); +} + +#endif /* _HIF_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c new file mode 100644 index 000000000000..74363c949392 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -0,0 +1,1000 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "core.h" +#include "hif.h" +#include "debug.h" + +/********/ +/* Send */ +/********/ + +static inline void ath10k_htc_send_complete_check(struct ath10k_htc_ep *ep, + int force) +{ + /* + * Check whether HIF has any prior sends that have finished, + * have not had the post-processing done. + */ + ath10k_hif_send_complete_check(ep->htc->ar, ep->ul_pipe_id, force); +} + +static void ath10k_htc_control_tx_complete(struct ath10k *ar, + struct sk_buff *skb) +{ + kfree_skb(skb); +} + +static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar) +{ + struct sk_buff *skb; + struct ath10k_skb_cb *skb_cb; + + skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE); + if (!skb) { + ath10k_warn("Unable to allocate ctrl skb\n"); + return NULL; + } + + skb_reserve(skb, 20); /* FIXME: why 20 bytes? */ + WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); + + skb_cb = ATH10K_SKB_CB(skb); + memset(skb_cb, 0, sizeof(*skb_cb)); + + ath10k_dbg(ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb); + return skb; +} + +static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc, + struct sk_buff *skb) +{ + ath10k_skb_unmap(htc->ar->dev, skb); + skb_pull(skb, sizeof(struct ath10k_htc_hdr)); +} + +static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep, + struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__, + ep->eid, skb); + + ath10k_htc_restore_tx_skb(ep->htc, skb); + + if (!ep->ep_ops.ep_tx_complete) { + ath10k_warn("no tx handler for eid %d\n", ep->eid); + dev_kfree_skb_any(skb); + return; + } + + ep->ep_ops.ep_tx_complete(ep->htc->ar, skb); +} + +/* assumes tx_lock is held */ +static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep) +{ + if (!ep->tx_credit_flow_enabled) + return false; + if (ep->tx_credits >= ep->tx_credits_per_max_message) + return false; + + ath10k_dbg(ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n", + ep->eid); + return true; +} + +static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep, + struct sk_buff *skb) +{ + struct ath10k_htc_hdr *hdr; + + hdr = (struct ath10k_htc_hdr *)skb->data; + memset(hdr, 0, sizeof(*hdr)); + + hdr->eid = ep->eid; + hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr)); + + spin_lock_bh(&ep->htc->tx_lock); + hdr->seq_no = ep->seq_no++; + + if (ath10k_htc_ep_need_credit_update(ep)) + hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE; + + spin_unlock_bh(&ep->htc->tx_lock); +} + +static int ath10k_htc_issue_skb(struct ath10k_htc *htc, + struct ath10k_htc_ep *ep, + struct sk_buff *skb, + u8 credits) +{ + struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); + int ret; + + ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__, + ep->eid, skb); + + ath10k_htc_prepare_tx_skb(ep, skb); + + ret = ath10k_skb_map(htc->ar->dev, skb); + if (ret) + goto err; + + ret = ath10k_hif_send_head(htc->ar, + ep->ul_pipe_id, + ep->eid, + skb->len, + skb); + if (unlikely(ret)) + goto err; + + return 0; +err: + ath10k_warn("HTC issue failed: %d\n", ret); + + spin_lock_bh(&htc->tx_lock); + ep->tx_credits += credits; + spin_unlock_bh(&htc->tx_lock); + + /* this is the simplest way to handle out-of-resources for non-credit + * based endpoints. credit based endpoints can still get -ENOSR, but + * this is highly unlikely as credit reservation should prevent that */ + if (ret == -ENOSR) { + spin_lock_bh(&htc->tx_lock); + __skb_queue_head(&ep->tx_queue, skb); + spin_unlock_bh(&htc->tx_lock); + + return ret; + } + + skb_cb->is_aborted = true; + ath10k_htc_notify_tx_completion(ep, skb); + + return ret; +} + +static struct sk_buff *ath10k_htc_get_skb_credit_based(struct ath10k_htc *htc, + struct ath10k_htc_ep *ep, + u8 *credits) +{ + struct sk_buff *skb; + struct ath10k_skb_cb *skb_cb; + int credits_required; + int remainder; + unsigned int transfer_len; + + lockdep_assert_held(&htc->tx_lock); + + skb = __skb_dequeue(&ep->tx_queue); + if (!skb) + return NULL; + + skb_cb = ATH10K_SKB_CB(skb); + transfer_len = skb->len; + + if (likely(transfer_len <= htc->target_credit_size)) { + credits_required = 1; + } else { + /* figure out how many credits this message requires */ + credits_required = transfer_len / htc->target_credit_size; + remainder = transfer_len % htc->target_credit_size; + + if (remainder) + credits_required++; + } + + ath10k_dbg(ATH10K_DBG_HTC, "Credits required %d got %d\n", + credits_required, ep->tx_credits); + + if (ep->tx_credits < credits_required) { + __skb_queue_head(&ep->tx_queue, skb); + return NULL; + } + + ep->tx_credits -= credits_required; + *credits = credits_required; + return skb; +} + +static void ath10k_htc_send_work(struct work_struct *work) +{ + struct ath10k_htc_ep *ep = container_of(work, + struct ath10k_htc_ep, send_work); + struct ath10k_htc *htc = ep->htc; + struct sk_buff *skb; + u8 credits = 0; + int ret; + + while (true) { + if (ep->ul_is_polled) + ath10k_htc_send_complete_check(ep, 0); + + spin_lock_bh(&htc->tx_lock); + if (ep->tx_credit_flow_enabled) + skb = ath10k_htc_get_skb_credit_based(htc, ep, + &credits); + else + skb = __skb_dequeue(&ep->tx_queue); + spin_unlock_bh(&htc->tx_lock); + + if (!skb) + break; + + ret = ath10k_htc_issue_skb(htc, ep, skb, credits); + if (ret == -ENOSR) + break; + } +} + +int ath10k_htc_send(struct ath10k_htc *htc, + enum ath10k_htc_ep_id eid, + struct sk_buff *skb) +{ + struct ath10k_htc_ep *ep = &htc->endpoint[eid]; + + if (eid >= ATH10K_HTC_EP_COUNT) { + ath10k_warn("Invalid endpoint id: %d\n", eid); + return -ENOENT; + } + + skb_push(skb, sizeof(struct ath10k_htc_hdr)); + + spin_lock_bh(&htc->tx_lock); + __skb_queue_tail(&ep->tx_queue, skb); + spin_unlock_bh(&htc->tx_lock); + + queue_work(htc->ar->workqueue, &ep->send_work); + return 0; +} + +static int ath10k_htc_tx_completion_handler(struct ath10k *ar, + struct sk_buff *skb, + unsigned int eid) +{ + struct ath10k_htc *htc = ar->htc; + struct ath10k_htc_ep *ep = &htc->endpoint[eid]; + bool stopping; + + ath10k_htc_notify_tx_completion(ep, skb); + /* the skb now belongs to the completion handler */ + + spin_lock_bh(&htc->tx_lock); + stopping = htc->stopping; + spin_unlock_bh(&htc->tx_lock); + + if (!ep->tx_credit_flow_enabled && !stopping) + /* + * note: when using TX credit flow, the re-checking of + * queues happens when credits flow back from the target. + * in the non-TX credit case, we recheck after the packet + * completes + */ + queue_work(ar->workqueue, &ep->send_work); + + return 0; +} + +/* flush endpoint TX queue */ +static void ath10k_htc_flush_endpoint_tx(struct ath10k_htc *htc, + struct ath10k_htc_ep *ep) +{ + struct sk_buff *skb; + struct ath10k_skb_cb *skb_cb; + + spin_lock_bh(&htc->tx_lock); + for (;;) { + skb = __skb_dequeue(&ep->tx_queue); + if (!skb) + break; + + skb_cb = ATH10K_SKB_CB(skb); + skb_cb->is_aborted = true; + ath10k_htc_notify_tx_completion(ep, skb); + } + spin_unlock_bh(&htc->tx_lock); + + cancel_work_sync(&ep->send_work); +} + +/***********/ +/* Receive */ +/***********/ + +static void +ath10k_htc_process_credit_report(struct ath10k_htc *htc, + const struct ath10k_htc_credit_report *report, + int len, + enum ath10k_htc_ep_id eid) +{ + struct ath10k_htc_ep *ep; + int i, n_reports; + + if (len % sizeof(*report)) + ath10k_warn("Uneven credit report len %d", len); + + n_reports = len / sizeof(*report); + + spin_lock_bh(&htc->tx_lock); + for (i = 0; i < n_reports; i++, report++) { + if (report->eid >= ATH10K_HTC_EP_COUNT) + break; + + ath10k_dbg(ATH10K_DBG_HTC, "ep %d got %d credits\n", + report->eid, report->credits); + + ep = &htc->endpoint[report->eid]; + ep->tx_credits += report->credits; + + if (ep->tx_credits && !skb_queue_empty(&ep->tx_queue)) + queue_work(htc->ar->workqueue, &ep->send_work); + } + spin_unlock_bh(&htc->tx_lock); +} + +static int ath10k_htc_process_trailer(struct ath10k_htc *htc, + u8 *buffer, + int length, + enum ath10k_htc_ep_id src_eid) +{ + int status = 0; + struct ath10k_htc_record *record; + u8 *orig_buffer; + int orig_length; + size_t len; + + orig_buffer = buffer; + orig_length = length; + + while (length > 0) { + record = (struct ath10k_htc_record *)buffer; + + if (length < sizeof(record->hdr)) { + status = -EINVAL; + break; + } + + if (record->hdr.len > length) { + /* no room left in buffer for record */ + ath10k_warn("Invalid record length: %d\n", + record->hdr.len); + status = -EINVAL; + break; + } + + switch (record->hdr.id) { + case ATH10K_HTC_RECORD_CREDITS: + len = sizeof(struct ath10k_htc_credit_report); + if (record->hdr.len < len) { + ath10k_warn("Credit report too long\n"); + status = -EINVAL; + break; + } + ath10k_htc_process_credit_report(htc, + record->credit_report, + record->hdr.len, + src_eid); + break; + default: + ath10k_warn("Unhandled record: id:%d length:%d\n", + record->hdr.id, record->hdr.len); + break; + } + + if (status) + break; + + /* multiple records may be present in a trailer */ + buffer += sizeof(record->hdr) + record->hdr.len; + length -= sizeof(record->hdr) + record->hdr.len; + } + + if (status) + ath10k_dbg_dump(ATH10K_DBG_HTC, "htc rx bad trailer", "", + orig_buffer, orig_length); + + return status; +} + +static int ath10k_htc_rx_completion_handler(struct ath10k *ar, + struct sk_buff *skb, + u8 pipe_id) +{ + int status = 0; + struct ath10k_htc *htc = ar->htc; + struct ath10k_htc_hdr *hdr; + struct ath10k_htc_ep *ep; + u16 payload_len; + u32 trailer_len = 0; + size_t min_len; + u8 eid; + bool trailer_present; + + hdr = (struct ath10k_htc_hdr *)skb->data; + skb_pull(skb, sizeof(*hdr)); + + eid = hdr->eid; + + if (eid >= ATH10K_HTC_EP_COUNT) { + ath10k_warn("HTC Rx: invalid eid %d\n", eid); + ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad header", "", + hdr, sizeof(*hdr)); + status = -EINVAL; + goto out; + } + + ep = &htc->endpoint[eid]; + + /* + * If this endpoint that received a message from the target has + * a to-target HIF pipe whose send completions are polled rather + * than interrupt-driven, this is a good point to ask HIF to check + * whether it has any completed sends to handle. + */ + if (ep->ul_is_polled) + ath10k_htc_send_complete_check(ep, 1); + + payload_len = __le16_to_cpu(hdr->len); + + if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) { + ath10k_warn("HTC rx frame too long, len: %zu\n", + payload_len + sizeof(*hdr)); + ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len", "", + hdr, sizeof(*hdr)); + status = -EINVAL; + goto out; + } + + if (skb->len < payload_len) { + ath10k_dbg(ATH10K_DBG_HTC, + "HTC Rx: insufficient length, got %d, expected %d\n", + skb->len, payload_len); + ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len", + "", hdr, sizeof(*hdr)); + status = -EINVAL; + goto out; + } + + /* get flags to check for trailer */ + trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT; + if (trailer_present) { + u8 *trailer; + + trailer_len = hdr->trailer_len; + min_len = sizeof(struct ath10k_ath10k_htc_record_hdr); + + if ((trailer_len < min_len) || + (trailer_len > payload_len)) { + ath10k_warn("Invalid trailer length: %d\n", + trailer_len); + status = -EPROTO; + goto out; + } + + trailer = (u8 *)hdr; + trailer += sizeof(*hdr); + trailer += payload_len; + trailer -= trailer_len; + status = ath10k_htc_process_trailer(htc, trailer, + trailer_len, hdr->eid); + if (status) + goto out; + + skb_trim(skb, skb->len - trailer_len); + } + + if (((int)payload_len - (int)trailer_len) <= 0) + /* zero length packet with trailer data, just drop these */ + goto out; + + if (eid == ATH10K_HTC_EP_0) { + struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data; + + switch (__le16_to_cpu(msg->hdr.message_id)) { + default: + /* handle HTC control message */ + if (completion_done(&htc->ctl_resp)) { + /* + * this is a fatal error, target should not be + * sending unsolicited messages on the ep 0 + */ + ath10k_warn("HTC rx ctrl still processing\n"); + status = -EINVAL; + complete(&htc->ctl_resp); + goto out; + } + + htc->control_resp_len = + min_t(int, skb->len, + ATH10K_HTC_MAX_CTRL_MSG_LEN); + + memcpy(htc->control_resp_buffer, skb->data, + htc->control_resp_len); + + complete(&htc->ctl_resp); + break; + case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE: + htc->htc_ops.target_send_suspend_complete(ar); + } + goto out; + } + + ath10k_dbg(ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n", + eid, skb); + ep->ep_ops.ep_rx_complete(ar, skb); + + /* skb is now owned by the rx completion handler */ + skb = NULL; +out: + kfree_skb(skb); + + return status; +} + +static void ath10k_htc_control_rx_complete(struct ath10k *ar, + struct sk_buff *skb) +{ + /* This is unexpected. FW is not supposed to send regular rx on this + * endpoint. */ + ath10k_warn("unexpected htc rx\n"); + kfree_skb(skb); +} + +/***************/ +/* Init/Deinit */ +/***************/ + +static const char *htc_service_name(enum ath10k_htc_svc_id id) +{ + switch (id) { + case ATH10K_HTC_SVC_ID_RESERVED: + return "Reserved"; + case ATH10K_HTC_SVC_ID_RSVD_CTRL: + return "Control"; + case ATH10K_HTC_SVC_ID_WMI_CONTROL: + return "WMI"; + case ATH10K_HTC_SVC_ID_WMI_DATA_BE: + return "DATA BE"; + case ATH10K_HTC_SVC_ID_WMI_DATA_BK: + return "DATA BK"; + case ATH10K_HTC_SVC_ID_WMI_DATA_VI: + return "DATA VI"; + case ATH10K_HTC_SVC_ID_WMI_DATA_VO: + return "DATA VO"; + case ATH10K_HTC_SVC_ID_NMI_CONTROL: + return "NMI Control"; + case ATH10K_HTC_SVC_ID_NMI_DATA: + return "NMI Data"; + case ATH10K_HTC_SVC_ID_HTT_DATA_MSG: + return "HTT Data"; + case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS: + return "RAW"; + } + + return "Unknown"; +} + +static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc) +{ + struct ath10k_htc_ep *ep; + int i; + + for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) { + ep = &htc->endpoint[i]; + ep->service_id = ATH10K_HTC_SVC_ID_UNUSED; + ep->max_ep_message_len = 0; + ep->max_tx_queue_depth = 0; + ep->eid = i; + skb_queue_head_init(&ep->tx_queue); + ep->htc = htc; + ep->tx_credit_flow_enabled = true; + INIT_WORK(&ep->send_work, ath10k_htc_send_work); + } +} + +static void ath10k_htc_setup_target_buffer_assignments(struct ath10k_htc *htc) +{ + struct ath10k_htc_svc_tx_credits *entry; + + entry = &htc->service_tx_alloc[0]; + + /* + * for PCIE allocate all credists/HTC buffers to WMI. + * no buffers are used/required for data. data always + * remains on host. + */ + entry++; + entry->service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL; + entry->credit_allocation = htc->total_transmit_credits; +} + +static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc, + u16 service_id) +{ + u8 allocation = 0; + int i; + + for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) { + if (htc->service_tx_alloc[i].service_id == service_id) + allocation = + htc->service_tx_alloc[i].credit_allocation; + } + + return allocation; +} + +int ath10k_htc_wait_target(struct ath10k_htc *htc) +{ + int status = 0; + struct ath10k_htc_svc_conn_req conn_req; + struct ath10k_htc_svc_conn_resp conn_resp; + struct ath10k_htc_msg *msg; + u16 message_id; + u16 credit_count; + u16 credit_size; + + INIT_COMPLETION(htc->ctl_resp); + + status = ath10k_hif_start(htc->ar); + if (status) { + ath10k_err("could not start HIF (%d)\n", status); + goto err_start; + } + + status = wait_for_completion_timeout(&htc->ctl_resp, + ATH10K_HTC_WAIT_TIMEOUT_HZ); + if (status <= 0) { + if (status == 0) + status = -ETIMEDOUT; + + ath10k_err("ctl_resp never came in (%d)\n", status); + goto err_target; + } + + if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) { + ath10k_err("Invalid HTC ready msg len:%d\n", + htc->control_resp_len); + + status = -ECOMM; + goto err_target; + } + + msg = (struct ath10k_htc_msg *)htc->control_resp_buffer; + message_id = __le16_to_cpu(msg->hdr.message_id); + credit_count = __le16_to_cpu(msg->ready.credit_count); + credit_size = __le16_to_cpu(msg->ready.credit_size); + + if (message_id != ATH10K_HTC_MSG_READY_ID) { + ath10k_err("Invalid HTC ready msg: 0x%x\n", message_id); + status = -ECOMM; + goto err_target; + } + + htc->total_transmit_credits = credit_count; + htc->target_credit_size = credit_size; + + ath10k_dbg(ATH10K_DBG_HTC, + "Target ready! transmit resources: %d size:%d\n", + htc->total_transmit_credits, + htc->target_credit_size); + + if ((htc->total_transmit_credits == 0) || + (htc->target_credit_size == 0)) { + status = -ECOMM; + ath10k_err("Invalid credit size received\n"); + goto err_target; + } + + ath10k_htc_setup_target_buffer_assignments(htc); + + /* setup our pseudo HTC control endpoint connection */ + memset(&conn_req, 0, sizeof(conn_req)); + memset(&conn_resp, 0, sizeof(conn_resp)); + conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete; + conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete; + conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS; + conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL; + + /* connect fake service */ + status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp); + if (status) { + ath10k_err("could not connect to htc service (%d)\n", status); + goto err_target; + } + + return 0; +err_target: + ath10k_hif_stop(htc->ar); +err_start: + return status; +} + +int ath10k_htc_connect_service(struct ath10k_htc *htc, + struct ath10k_htc_svc_conn_req *conn_req, + struct ath10k_htc_svc_conn_resp *conn_resp) +{ + struct ath10k_htc_msg *msg; + struct ath10k_htc_conn_svc *req_msg; + struct ath10k_htc_conn_svc_response resp_msg_dummy; + struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy; + enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT; + struct ath10k_htc_ep *ep; + struct sk_buff *skb; + unsigned int max_msg_size = 0; + int length, status; + bool disable_credit_flow_ctrl = false; + u16 message_id, service_id, flags = 0; + u8 tx_alloc = 0; + + /* special case for HTC pseudo control service */ + if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) { + disable_credit_flow_ctrl = true; + assigned_eid = ATH10K_HTC_EP_0; + max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN; + memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy)); + goto setup; + } + + tx_alloc = ath10k_htc_get_credit_allocation(htc, + conn_req->service_id); + if (!tx_alloc) + ath10k_warn("HTC Service %s does not allocate target credits\n", + htc_service_name(conn_req->service_id)); + + skb = ath10k_htc_build_tx_ctrl_skb(htc->ar); + if (!skb) { + ath10k_err("Failed to allocate HTC packet\n"); + return -ENOMEM; + } + + length = sizeof(msg->hdr) + sizeof(msg->connect_service); + skb_put(skb, length); + memset(skb->data, 0, length); + + msg = (struct ath10k_htc_msg *)skb->data; + msg->hdr.message_id = + __cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID); + + flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC); + + req_msg = &msg->connect_service; + req_msg->flags = __cpu_to_le16(flags); + req_msg->service_id = __cpu_to_le16(conn_req->service_id); + + /* Only enable credit flow control for WMI ctrl service */ + if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) { + flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL; + disable_credit_flow_ctrl = true; + } + + INIT_COMPLETION(htc->ctl_resp); + + status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb); + if (status) { + kfree_skb(skb); + return status; + } + + /* wait for response */ + status = wait_for_completion_timeout(&htc->ctl_resp, + ATH10K_HTC_CONN_SVC_TIMEOUT_HZ); + if (status <= 0) { + if (status == 0) + status = -ETIMEDOUT; + ath10k_err("Service connect timeout: %d\n", status); + return status; + } + + /* we controlled the buffer creation, it's aligned */ + msg = (struct ath10k_htc_msg *)htc->control_resp_buffer; + resp_msg = &msg->connect_service_response; + message_id = __le16_to_cpu(msg->hdr.message_id); + service_id = __le16_to_cpu(resp_msg->service_id); + + if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) || + (htc->control_resp_len < sizeof(msg->hdr) + + sizeof(msg->connect_service_response))) { + ath10k_err("Invalid resp message ID 0x%x", message_id); + return -EPROTO; + } + + ath10k_dbg(ATH10K_DBG_HTC, + "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n", + htc_service_name(service_id), + resp_msg->status, resp_msg->eid); + + conn_resp->connect_resp_code = resp_msg->status; + + /* check response status */ + if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) { + ath10k_err("HTC Service %s connect request failed: 0x%x)\n", + htc_service_name(service_id), + resp_msg->status); + return -EPROTO; + } + + assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid; + max_msg_size = __le16_to_cpu(resp_msg->max_msg_size); + +setup: + + if (assigned_eid >= ATH10K_HTC_EP_COUNT) + return -EPROTO; + + if (max_msg_size == 0) + return -EPROTO; + + ep = &htc->endpoint[assigned_eid]; + ep->eid = assigned_eid; + + if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED) + return -EPROTO; + + /* return assigned endpoint to caller */ + conn_resp->eid = assigned_eid; + conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size); + + /* setup the endpoint */ + ep->service_id = conn_req->service_id; + ep->max_tx_queue_depth = conn_req->max_send_queue_depth; + ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size); + ep->tx_credits = tx_alloc; + ep->tx_credit_size = htc->target_credit_size; + ep->tx_credits_per_max_message = ep->max_ep_message_len / + htc->target_credit_size; + + if (ep->max_ep_message_len % htc->target_credit_size) + ep->tx_credits_per_max_message++; + + /* copy all the callbacks */ + ep->ep_ops = conn_req->ep_ops; + + status = ath10k_hif_map_service_to_pipe(htc->ar, + ep->service_id, + &ep->ul_pipe_id, + &ep->dl_pipe_id, + &ep->ul_is_polled, + &ep->dl_is_polled); + if (status) + return status; + + ath10k_dbg(ATH10K_DBG_HTC, + "HTC service: %s UL pipe: %d DL pipe: %d eid: %d ready\n", + htc_service_name(ep->service_id), ep->ul_pipe_id, + ep->dl_pipe_id, ep->eid); + + ath10k_dbg(ATH10K_DBG_HTC, + "EP %d UL polled: %d, DL polled: %d\n", + ep->eid, ep->ul_is_polled, ep->dl_is_polled); + + if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) { + ep->tx_credit_flow_enabled = false; + ath10k_dbg(ATH10K_DBG_HTC, + "HTC service: %s eid: %d TX flow control disabled\n", + htc_service_name(ep->service_id), assigned_eid); + } + + return status; +} + +struct sk_buff *ath10k_htc_alloc_skb(int size) +{ + struct sk_buff *skb; + + skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr)); + if (!skb) { + ath10k_warn("could not allocate HTC tx skb\n"); + return NULL; + } + + skb_reserve(skb, sizeof(struct ath10k_htc_hdr)); + + /* FW/HTC requires 4-byte aligned streams */ + if (!IS_ALIGNED((unsigned long)skb->data, 4)) + ath10k_warn("Unaligned HTC tx skb\n"); + + return skb; +} + +int ath10k_htc_start(struct ath10k_htc *htc) +{ + struct sk_buff *skb; + int status = 0; + struct ath10k_htc_msg *msg; + + skb = ath10k_htc_build_tx_ctrl_skb(htc->ar); + if (!skb) + return -ENOMEM; + + skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext)); + memset(skb->data, 0, skb->len); + + msg = (struct ath10k_htc_msg *)skb->data; + msg->hdr.message_id = + __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID); + + ath10k_dbg(ATH10K_DBG_HTC, "HTC is using TX credit flow control\n"); + + status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb); + if (status) { + kfree_skb(skb); + return status; + } + + return 0; +} + +/* + * stop HTC communications, i.e. stop interrupt reception, and flush all + * queued buffers + */ +void ath10k_htc_stop(struct ath10k_htc *htc) +{ + int i; + struct ath10k_htc_ep *ep; + + spin_lock_bh(&htc->tx_lock); + htc->stopping = true; + spin_unlock_bh(&htc->tx_lock); + + for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) { + ep = &htc->endpoint[i]; + ath10k_htc_flush_endpoint_tx(htc, ep); + } + + ath10k_hif_stop(htc->ar); + ath10k_htc_reset_endpoint_states(htc); +} + +/* registered target arrival callback from the HIF layer */ +struct ath10k_htc *ath10k_htc_create(struct ath10k *ar, + struct ath10k_htc_ops *htc_ops) +{ + struct ath10k_hif_cb htc_callbacks; + struct ath10k_htc_ep *ep = NULL; + struct ath10k_htc *htc = NULL; + + /* FIXME: use struct ath10k instead */ + htc = kzalloc(sizeof(struct ath10k_htc), GFP_KERNEL); + if (!htc) + return ERR_PTR(-ENOMEM); + + spin_lock_init(&htc->tx_lock); + + memcpy(&htc->htc_ops, htc_ops, sizeof(struct ath10k_htc_ops)); + + ath10k_htc_reset_endpoint_states(htc); + + /* setup HIF layer callbacks */ + htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler; + htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler; + htc->ar = ar; + + /* Get HIF default pipe for HTC message exchange */ + ep = &htc->endpoint[ATH10K_HTC_EP_0]; + + ath10k_hif_init(ar, &htc_callbacks); + ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id); + + init_completion(&htc->ctl_resp); + + return htc; +} + +void ath10k_htc_destroy(struct ath10k_htc *htc) +{ + kfree(htc); +} diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h new file mode 100644 index 000000000000..fa45844b59fb --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/htc.h @@ -0,0 +1,368 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HTC_H_ +#define _HTC_H_ + +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/bug.h> +#include <linux/skbuff.h> +#include <linux/semaphore.h> +#include <linux/timer.h> + +struct ath10k; + +/****************/ +/* HTC protocol */ +/****************/ + +/* + * HTC - host-target control protocol + * + * tx packets are generally <htc_hdr><payload> + * rx packets are more complex: <htc_hdr><payload><trailer> + * + * The payload + trailer length is stored in len. + * To get payload-only length one needs to payload - trailer_len. + * + * Trailer contains (possibly) multiple <htc_record>. + * Each record is a id-len-value. + * + * HTC header flags, control_byte0, control_byte1 + * have different meaning depending whether its tx + * or rx. + * + * Alignment: htc_hdr, payload and trailer are + * 4-byte aligned. + */ + +enum ath10k_htc_tx_flags { + ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01, + ATH10K_HTC_FLAG_SEND_BUNDLE = 0x02 +}; + +enum ath10k_htc_rx_flags { + ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02, + ATH10K_HTC_FLAG_BUNDLE_MASK = 0xF0 +}; + +struct ath10k_htc_hdr { + u8 eid; /* @enum ath10k_htc_ep_id */ + u8 flags; /* @enum ath10k_htc_tx_flags, ath10k_htc_rx_flags */ + __le16 len; + union { + u8 trailer_len; /* for rx */ + u8 control_byte0; + } __packed; + union { + u8 seq_no; /* for tx */ + u8 control_byte1; + } __packed; + u8 pad0; + u8 pad1; +} __packed __aligned(4); + +enum ath10k_ath10k_htc_msg_id { + ATH10K_HTC_MSG_READY_ID = 1, + ATH10K_HTC_MSG_CONNECT_SERVICE_ID = 2, + ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID = 3, + ATH10K_HTC_MSG_SETUP_COMPLETE_ID = 4, + ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID = 5, + ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE = 6 +}; + +enum ath10k_htc_version { + ATH10K_HTC_VERSION_2P0 = 0x00, /* 2.0 */ + ATH10K_HTC_VERSION_2P1 = 0x01, /* 2.1 */ +}; + +enum ath10k_htc_conn_flags { + ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_FOURTH = 0x0, + ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF = 0x1, + ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS = 0x2, + ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY = 0x3, +#define ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_MASK 0x3 + ATH10K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE = 1 << 2, + ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 1 << 3 +#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_MASK 0xFF00 +#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_LSB 8 +}; + +enum ath10k_htc_conn_svc_status { + ATH10K_HTC_CONN_SVC_STATUS_SUCCESS = 0, + ATH10K_HTC_CONN_SVC_STATUS_NOT_FOUND = 1, + ATH10K_HTC_CONN_SVC_STATUS_FAILED = 2, + ATH10K_HTC_CONN_SVC_STATUS_NO_RESOURCES = 3, + ATH10K_HTC_CONN_SVC_STATUS_NO_MORE_EP = 4 +}; + +struct ath10k_ath10k_htc_msg_hdr { + __le16 message_id; /* @enum htc_message_id */ +} __packed; + +struct ath10k_htc_unknown { + u8 pad0; + u8 pad1; +} __packed; + +struct ath10k_htc_ready { + __le16 credit_count; + __le16 credit_size; + u8 max_endpoints; + u8 pad0; +} __packed; + +struct ath10k_htc_ready_extended { + struct ath10k_htc_ready base; + u8 htc_version; /* @enum ath10k_htc_version */ + u8 max_msgs_per_htc_bundle; + u8 pad0; + u8 pad1; +} __packed; + +struct ath10k_htc_conn_svc { + __le16 service_id; + __le16 flags; /* @enum ath10k_htc_conn_flags */ + u8 pad0; + u8 pad1; +} __packed; + +struct ath10k_htc_conn_svc_response { + __le16 service_id; + u8 status; /* @enum ath10k_htc_conn_svc_status */ + u8 eid; + __le16 max_msg_size; +} __packed; + +struct ath10k_htc_setup_complete_extended { + u8 pad0; + u8 pad1; + __le32 flags; /* @enum htc_setup_complete_flags */ + u8 max_msgs_per_bundled_recv; + u8 pad2; + u8 pad3; + u8 pad4; +} __packed; + +struct ath10k_htc_msg { + struct ath10k_ath10k_htc_msg_hdr hdr; + union { + /* host-to-target */ + struct ath10k_htc_conn_svc connect_service; + struct ath10k_htc_ready ready; + struct ath10k_htc_ready_extended ready_ext; + struct ath10k_htc_unknown unknown; + struct ath10k_htc_setup_complete_extended setup_complete_ext; + + /* target-to-host */ + struct ath10k_htc_conn_svc_response connect_service_response; + }; +} __packed __aligned(4); + +enum ath10k_ath10k_htc_record_id { + ATH10K_HTC_RECORD_NULL = 0, + ATH10K_HTC_RECORD_CREDITS = 1 +}; + +struct ath10k_ath10k_htc_record_hdr { + u8 id; /* @enum ath10k_ath10k_htc_record_id */ + u8 len; + u8 pad0; + u8 pad1; +} __packed; + +struct ath10k_htc_credit_report { + u8 eid; /* @enum ath10k_htc_ep_id */ + u8 credits; + u8 pad0; + u8 pad1; +} __packed; + +struct ath10k_htc_record { + struct ath10k_ath10k_htc_record_hdr hdr; + union { + struct ath10k_htc_credit_report credit_report[0]; + u8 pauload[0]; + }; +} __packed __aligned(4); + +/* + * note: the trailer offset is dynamic depending + * on payload length. this is only a struct layout draft + */ +struct ath10k_htc_frame { + struct ath10k_htc_hdr hdr; + union { + struct ath10k_htc_msg msg; + u8 payload[0]; + }; + struct ath10k_htc_record trailer[0]; +} __packed __aligned(4); + + +/*******************/ +/* Host-side stuff */ +/*******************/ + +enum ath10k_htc_svc_gid { + ATH10K_HTC_SVC_GRP_RSVD = 0, + ATH10K_HTC_SVC_GRP_WMI = 1, + ATH10K_HTC_SVC_GRP_NMI = 2, + ATH10K_HTC_SVC_GRP_HTT = 3, + + ATH10K_HTC_SVC_GRP_TEST = 254, + ATH10K_HTC_SVC_GRP_LAST = 255, +}; + +#define SVC(group, idx) \ + (int)(((int)(group) << 8) | (int)(idx)) + +enum ath10k_htc_svc_id { + /* NOTE: service ID of 0x0000 is reserved and should never be used */ + ATH10K_HTC_SVC_ID_RESERVED = 0x0000, + ATH10K_HTC_SVC_ID_UNUSED = ATH10K_HTC_SVC_ID_RESERVED, + + ATH10K_HTC_SVC_ID_RSVD_CTRL = SVC(ATH10K_HTC_SVC_GRP_RSVD, 1), + ATH10K_HTC_SVC_ID_WMI_CONTROL = SVC(ATH10K_HTC_SVC_GRP_WMI, 0), + ATH10K_HTC_SVC_ID_WMI_DATA_BE = SVC(ATH10K_HTC_SVC_GRP_WMI, 1), + ATH10K_HTC_SVC_ID_WMI_DATA_BK = SVC(ATH10K_HTC_SVC_GRP_WMI, 2), + ATH10K_HTC_SVC_ID_WMI_DATA_VI = SVC(ATH10K_HTC_SVC_GRP_WMI, 3), + ATH10K_HTC_SVC_ID_WMI_DATA_VO = SVC(ATH10K_HTC_SVC_GRP_WMI, 4), + + ATH10K_HTC_SVC_ID_NMI_CONTROL = SVC(ATH10K_HTC_SVC_GRP_NMI, 0), + ATH10K_HTC_SVC_ID_NMI_DATA = SVC(ATH10K_HTC_SVC_GRP_NMI, 1), + + ATH10K_HTC_SVC_ID_HTT_DATA_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 0), + + /* raw stream service (i.e. flash, tcmd, calibration apps) */ + ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH10K_HTC_SVC_GRP_TEST, 0), +}; + +#undef SVC + +enum ath10k_htc_ep_id { + ATH10K_HTC_EP_UNUSED = -1, + ATH10K_HTC_EP_0 = 0, + ATH10K_HTC_EP_1 = 1, + ATH10K_HTC_EP_2, + ATH10K_HTC_EP_3, + ATH10K_HTC_EP_4, + ATH10K_HTC_EP_5, + ATH10K_HTC_EP_6, + ATH10K_HTC_EP_7, + ATH10K_HTC_EP_8, + ATH10K_HTC_EP_COUNT, +}; + +struct ath10k_htc_ops { + void (*target_send_suspend_complete)(struct ath10k *ar); +}; + +struct ath10k_htc_ep_ops { + void (*ep_tx_complete)(struct ath10k *, struct sk_buff *); + void (*ep_rx_complete)(struct ath10k *, struct sk_buff *); +}; + +/* service connection information */ +struct ath10k_htc_svc_conn_req { + u16 service_id; + struct ath10k_htc_ep_ops ep_ops; + int max_send_queue_depth; +}; + +/* service connection response information */ +struct ath10k_htc_svc_conn_resp { + u8 buffer_len; + u8 actual_len; + enum ath10k_htc_ep_id eid; + unsigned int max_msg_len; + u8 connect_resp_code; +}; + +#define ATH10K_NUM_CONTROL_TX_BUFFERS 2 +#define ATH10K_HTC_MAX_LEN 4096 +#define ATH10K_HTC_MAX_CTRL_MSG_LEN 256 +#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1*HZ) +#define ATH10K_HTC_CONTROL_BUFFER_SIZE (ATH10K_HTC_MAX_CTRL_MSG_LEN + \ + sizeof(struct ath10k_htc_hdr)) +#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1*HZ) + +struct ath10k_htc_ep { + struct ath10k_htc *htc; + enum ath10k_htc_ep_id eid; + enum ath10k_htc_svc_id service_id; + struct ath10k_htc_ep_ops ep_ops; + + int max_tx_queue_depth; + int max_ep_message_len; + u8 ul_pipe_id; + u8 dl_pipe_id; + int ul_is_polled; /* call HIF to get tx completions */ + int dl_is_polled; /* call HIF to fetch rx (not implemented) */ + + struct sk_buff_head tx_queue; + + u8 seq_no; /* for debugging */ + int tx_credits; + int tx_credit_size; + int tx_credits_per_max_message; + bool tx_credit_flow_enabled; + + struct work_struct send_work; +}; + +struct ath10k_htc_svc_tx_credits { + u16 service_id; + u8 credit_allocation; +}; + +struct ath10k_htc { + struct ath10k *ar; + struct ath10k_htc_ep endpoint[ATH10K_HTC_EP_COUNT]; + + /* protects endpoint and stopping fields */ + spinlock_t tx_lock; + + struct ath10k_htc_ops htc_ops; + + u8 control_resp_buffer[ATH10K_HTC_MAX_CTRL_MSG_LEN]; + int control_resp_len; + + struct completion ctl_resp; + + int total_transmit_credits; + struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT]; + int target_credit_size; + + bool stopping; +}; + +struct ath10k_htc *ath10k_htc_create(struct ath10k *ar, + struct ath10k_htc_ops *htc_ops); +int ath10k_htc_wait_target(struct ath10k_htc *htc); +int ath10k_htc_start(struct ath10k_htc *htc); +int ath10k_htc_connect_service(struct ath10k_htc *htc, + struct ath10k_htc_svc_conn_req *conn_req, + struct ath10k_htc_svc_conn_resp *conn_resp); +int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid, + struct sk_buff *packet); +void ath10k_htc_stop(struct ath10k_htc *htc); +void ath10k_htc_destroy(struct ath10k_htc *htc); +struct sk_buff *ath10k_htc_alloc_skb(int size); + +#endif diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c new file mode 100644 index 000000000000..185a5468a2f2 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/htt.c @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/slab.h> + +#include "htt.h" +#include "core.h" +#include "debug.h" + +static int ath10k_htt_htc_attach(struct ath10k_htt *htt) +{ + struct ath10k_htc_svc_conn_req conn_req; + struct ath10k_htc_svc_conn_resp conn_resp; + int status; + + memset(&conn_req, 0, sizeof(conn_req)); + memset(&conn_resp, 0, sizeof(conn_resp)); + + conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete; + conn_req.ep_ops.ep_rx_complete = ath10k_htt_t2h_msg_handler; + + /* connect to control service */ + conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG; + + status = ath10k_htc_connect_service(htt->ar->htc, &conn_req, + &conn_resp); + + if (status) + return status; + + htt->eid = conn_resp.eid; + + return 0; +} + +struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar) +{ + struct ath10k_htt *htt; + int ret; + + htt = kzalloc(sizeof(*htt), GFP_KERNEL); + if (!htt) + return NULL; + + htt->ar = ar; + htt->max_throughput_mbps = 800; + + /* + * Connect to HTC service. + * This has to be done before calling ath10k_htt_rx_attach, + * since ath10k_htt_rx_attach involves sending a rx ring configure + * message to the target. + */ + if (ath10k_htt_htc_attach(htt)) + goto err_htc_attach; + + ret = ath10k_htt_tx_attach(htt); + if (ret) { + ath10k_err("could not attach htt tx (%d)\n", ret); + goto err_htc_attach; + } + + if (ath10k_htt_rx_attach(htt)) + goto err_rx_attach; + + /* + * Prefetch enough data to satisfy target + * classification engine. + * This is for LL chips. HL chips will probably + * transfer all frame in the tx fragment. + */ + htt->prefetch_len = + 36 + /* 802.11 + qos + ht */ + 4 + /* 802.1q */ + 8 + /* llc snap */ + 2; /* ip4 dscp or ip6 priority */ + + return htt; + +err_rx_attach: + ath10k_htt_tx_detach(htt); +err_htc_attach: + kfree(htt); + return NULL; +} + +#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ) + +static int ath10k_htt_verify_version(struct ath10k_htt *htt) +{ + ath10k_dbg(ATH10K_DBG_HTT, + "htt target version %d.%d; host version %d.%d\n", + htt->target_version_major, + htt->target_version_minor, + HTT_CURRENT_VERSION_MAJOR, + HTT_CURRENT_VERSION_MINOR); + + if (htt->target_version_major != HTT_CURRENT_VERSION_MAJOR) { + ath10k_err("htt major versions are incompatible!\n"); + return -ENOTSUPP; + } + + if (htt->target_version_minor != HTT_CURRENT_VERSION_MINOR) + ath10k_warn("htt minor version differ but still compatible\n"); + + return 0; +} + +int ath10k_htt_attach_target(struct ath10k_htt *htt) +{ + int status; + + init_completion(&htt->target_version_received); + + status = ath10k_htt_h2t_ver_req_msg(htt); + if (status) + return status; + + status = wait_for_completion_timeout(&htt->target_version_received, + HTT_TARGET_VERSION_TIMEOUT_HZ); + if (status <= 0) { + ath10k_warn("htt version request timed out\n"); + return -ETIMEDOUT; + } + + status = ath10k_htt_verify_version(htt); + if (status) + return status; + + return ath10k_htt_send_rx_ring_cfg_ll(htt); +} + +void ath10k_htt_detach(struct ath10k_htt *htt) +{ + ath10k_htt_rx_detach(htt); + ath10k_htt_tx_detach(htt); + kfree(htt); +} diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h new file mode 100644 index 000000000000..a7a7aa040536 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -0,0 +1,1338 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HTT_H_ +#define _HTT_H_ + +#include <linux/bug.h> + +#include "core.h" +#include "htc.h" +#include "rx_desc.h" + +#define HTT_CURRENT_VERSION_MAJOR 2 +#define HTT_CURRENT_VERSION_MINOR 1 + +enum htt_dbg_stats_type { + HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0, + HTT_DBG_STATS_RX_REORDER = 1 << 1, + HTT_DBG_STATS_RX_RATE_INFO = 1 << 2, + HTT_DBG_STATS_TX_PPDU_LOG = 1 << 3, + HTT_DBG_STATS_TX_RATE_INFO = 1 << 4, + /* bits 5-23 currently reserved */ + + HTT_DBG_NUM_STATS /* keep this last */ +}; + +enum htt_h2t_msg_type { /* host-to-target */ + HTT_H2T_MSG_TYPE_VERSION_REQ = 0, + HTT_H2T_MSG_TYPE_TX_FRM = 1, + HTT_H2T_MSG_TYPE_RX_RING_CFG = 2, + HTT_H2T_MSG_TYPE_STATS_REQ = 3, + HTT_H2T_MSG_TYPE_SYNC = 4, + HTT_H2T_MSG_TYPE_AGGR_CFG = 5, + HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6, + HTT_H2T_MSG_TYPE_MGMT_TX = 7, + + HTT_H2T_NUM_MSGS /* keep this last */ +}; + +struct htt_cmd_hdr { + u8 msg_type; +} __packed; + +struct htt_ver_req { + u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)]; +} __packed; + +/* + * HTT tx MSDU descriptor + * + * The HTT tx MSDU descriptor is created by the host HTT SW for each + * tx MSDU. The HTT tx MSDU descriptor contains the information that + * the target firmware needs for the FW's tx processing, particularly + * for creating the HW msdu descriptor. + * The same HTT tx descriptor is used for HL and LL systems, though + * a few fields within the tx descriptor are used only by LL or + * only by HL. + * The HTT tx descriptor is defined in two manners: by a struct with + * bitfields, and by a series of [dword offset, bit mask, bit shift] + * definitions. + * The target should use the struct def, for simplicitly and clarity, + * but the host shall use the bit-mast + bit-shift defs, to be endian- + * neutral. Specifically, the host shall use the get/set macros built + * around the mask + shift defs. + */ +struct htt_data_tx_desc_frag { + __le32 paddr; + __le32 len; +} __packed; + +enum htt_data_tx_desc_flags0 { + HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0, + HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1, + HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT = 1 << 2, + HTT_DATA_TX_DESC_FLAGS0_NO_CLASSIFY = 1 << 3, + HTT_DATA_TX_DESC_FLAGS0_RSVD0 = 1 << 4 +#define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_MASK 0xE0 +#define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_LSB 5 +}; + +enum htt_data_tx_desc_flags1 { +#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_BITS 6 +#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_MASK 0x003F +#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_LSB 0 +#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_BITS 5 +#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_MASK 0x07C0 +#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_LSB 6 + HTT_DATA_TX_DESC_FLAGS1_POSTPONED = 1 << 11, + HTT_DATA_TX_DESC_FLAGS1_MORE_IN_BATCH = 1 << 12, + HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD = 1 << 13, + HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD = 1 << 14, + HTT_DATA_TX_DESC_FLAGS1_RSVD1 = 1 << 15 +}; + +enum htt_data_tx_ext_tid { + HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST = 16, + HTT_DATA_TX_EXT_TID_MGMT = 17, + HTT_DATA_TX_EXT_TID_INVALID = 31 +}; + +#define HTT_INVALID_PEERID 0xFFFF + +/* + * htt_data_tx_desc - used for data tx path + * + * Note: vdev_id irrelevant for pkt_type == raw and no_classify == 1. + * ext_tid: for qos-data frames (0-15), see %HTT_DATA_TX_EXT_TID_ + * for special kinds of tids + * postponed: only for HL hosts. indicates if this is a resend + * (HL hosts manage queues on the host ) + * more_in_batch: only for HL hosts. indicates if more packets are + * pending. this allows target to wait and aggregate + */ +struct htt_data_tx_desc { + u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */ + __le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */ + __le16 len; + __le16 id; + __le32 frags_paddr; + __le32 peerid; + u8 prefetch[0]; /* start of frame, for FW classification engine */ +} __packed; + +enum htt_rx_ring_flags { + HTT_RX_RING_FLAGS_MAC80211_HDR = 1 << 0, + HTT_RX_RING_FLAGS_MSDU_PAYLOAD = 1 << 1, + HTT_RX_RING_FLAGS_PPDU_START = 1 << 2, + HTT_RX_RING_FLAGS_PPDU_END = 1 << 3, + HTT_RX_RING_FLAGS_MPDU_START = 1 << 4, + HTT_RX_RING_FLAGS_MPDU_END = 1 << 5, + HTT_RX_RING_FLAGS_MSDU_START = 1 << 6, + HTT_RX_RING_FLAGS_MSDU_END = 1 << 7, + HTT_RX_RING_FLAGS_RX_ATTENTION = 1 << 8, + HTT_RX_RING_FLAGS_FRAG_INFO = 1 << 9, + HTT_RX_RING_FLAGS_UNICAST_RX = 1 << 10, + HTT_RX_RING_FLAGS_MULTICAST_RX = 1 << 11, + HTT_RX_RING_FLAGS_CTRL_RX = 1 << 12, + HTT_RX_RING_FLAGS_MGMT_RX = 1 << 13, + HTT_RX_RING_FLAGS_NULL_RX = 1 << 14, + HTT_RX_RING_FLAGS_PHY_DATA_RX = 1 << 15 +}; + +struct htt_rx_ring_setup_ring { + __le32 fw_idx_shadow_reg_paddr; + __le32 rx_ring_base_paddr; + __le16 rx_ring_len; /* in 4-byte words */ + __le16 rx_ring_bufsize; /* rx skb size - in bytes */ + __le16 flags; /* %HTT_RX_RING_FLAGS_ */ + __le16 fw_idx_init_val; + + /* the following offsets are in 4-byte units */ + __le16 mac80211_hdr_offset; + __le16 msdu_payload_offset; + __le16 ppdu_start_offset; + __le16 ppdu_end_offset; + __le16 mpdu_start_offset; + __le16 mpdu_end_offset; + __le16 msdu_start_offset; + __le16 msdu_end_offset; + __le16 rx_attention_offset; + __le16 frag_info_offset; +} __packed; + +struct htt_rx_ring_setup_hdr { + u8 num_rings; /* supported values: 1, 2 */ + __le16 rsvd0; +} __packed; + +struct htt_rx_ring_setup { + struct htt_rx_ring_setup_hdr hdr; + struct htt_rx_ring_setup_ring rings[0]; +} __packed; + +/* + * htt_stats_req - request target to send specified statistics + * + * @msg_type: hardcoded %HTT_H2T_MSG_TYPE_STATS_REQ + * @upload_types: see %htt_dbg_stats_type. this is 24bit field actually + * so make sure its little-endian. + * @reset_types: see %htt_dbg_stats_type. this is 24bit field actually + * so make sure its little-endian. + * @cfg_val: stat_type specific configuration + * @stat_type: see %htt_dbg_stats_type + * @cookie_lsb: used for confirmation message from target->host + * @cookie_msb: ditto as %cookie + */ +struct htt_stats_req { + u8 upload_types[3]; + u8 rsvd0; + u8 reset_types[3]; + struct { + u8 mpdu_bytes; + u8 mpdu_num_msdus; + u8 msdu_bytes; + } __packed; + u8 stat_type; + __le32 cookie_lsb; + __le32 cookie_msb; +} __packed; + +#define HTT_STATS_REQ_CFG_STAT_TYPE_INVALID 0xff + +/* + * htt_oob_sync_req - request out-of-band sync + * + * The HTT SYNC tells the target to suspend processing of subsequent + * HTT host-to-target messages until some other target agent locally + * informs the target HTT FW that the current sync counter is equal to + * or greater than (in a modulo sense) the sync counter specified in + * the SYNC message. + * + * This allows other host-target components to synchronize their operation + * with HTT, e.g. to ensure that tx frames don't get transmitted until a + * security key has been downloaded to and activated by the target. + * In the absence of any explicit synchronization counter value + * specification, the target HTT FW will use zero as the default current + * sync value. + * + * The HTT target FW will suspend its host->target message processing as long + * as 0 < (in-band sync counter - out-of-band sync counter) & 0xff < 128. + */ +struct htt_oob_sync_req { + u8 sync_count; + __le16 rsvd0; +} __packed; + +#define HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_MASK 0x1F +#define HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_LSB 0 + +struct htt_aggr_conf { + u8 max_num_ampdu_subframes; + union { + /* dont use bitfields; undefined behaviour */ + u8 flags; /* see %HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_ */ + u8 max_num_amsdu_subframes:5; + } __packed; +} __packed; + +#define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32 + +struct htt_mgmt_tx_desc { + u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)]; + __le32 msdu_paddr; + __le32 desc_id; + __le32 len; + __le32 vdev_id; + u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN]; +} __packed; + +enum htt_mgmt_tx_status { + HTT_MGMT_TX_STATUS_OK = 0, + HTT_MGMT_TX_STATUS_RETRY = 1, + HTT_MGMT_TX_STATUS_DROP = 2 +}; + +/*=== target -> host messages ===============================================*/ + + +enum htt_t2h_msg_type { + HTT_T2H_MSG_TYPE_VERSION_CONF = 0x0, + HTT_T2H_MSG_TYPE_RX_IND = 0x1, + HTT_T2H_MSG_TYPE_RX_FLUSH = 0x2, + HTT_T2H_MSG_TYPE_PEER_MAP = 0x3, + HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x4, + HTT_T2H_MSG_TYPE_RX_ADDBA = 0x5, + HTT_T2H_MSG_TYPE_RX_DELBA = 0x6, + HTT_T2H_MSG_TYPE_TX_COMPL_IND = 0x7, + HTT_T2H_MSG_TYPE_PKTLOG = 0x8, + HTT_T2H_MSG_TYPE_STATS_CONF = 0x9, + HTT_T2H_MSG_TYPE_RX_FRAG_IND = 0xa, + HTT_T2H_MSG_TYPE_SEC_IND = 0xb, + HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, + HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd, + HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe, + HTT_T2H_MSG_TYPE_TEST, + /* keep this last */ + HTT_T2H_NUM_MSGS +}; + +/* + * htt_resp_hdr - header for target-to-host messages + * + * msg_type: see htt_t2h_msg_type + */ +struct htt_resp_hdr { + u8 msg_type; +} __packed; + +#define HTT_RESP_HDR_MSG_TYPE_OFFSET 0 +#define HTT_RESP_HDR_MSG_TYPE_MASK 0xff +#define HTT_RESP_HDR_MSG_TYPE_LSB 0 + +/* htt_ver_resp - response sent for htt_ver_req */ +struct htt_ver_resp { + u8 minor; + u8 major; + u8 rsvd0; +} __packed; + +struct htt_mgmt_tx_completion { + u8 rsvd0; + u8 rsvd1; + u8 rsvd2; + __le32 desc_id; + __le32 status; +} __packed; + +#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x3F) +#define HTT_RX_INDICATION_INFO0_EXT_TID_LSB (0) +#define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 6) +#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 7) + +#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK 0x0000003F +#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB 0 +#define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_MASK 0x00000FC0 +#define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_LSB 6 +#define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_MASK 0x0003F000 +#define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_LSB 12 +#define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_MASK 0x00FC0000 +#define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_LSB 18 +#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK 0xFF000000 +#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB 24 + +struct htt_rx_indication_hdr { + u8 info0; /* %HTT_RX_INDICATION_INFO0_ */ + __le16 peer_id; + __le32 info1; /* %HTT_RX_INDICATION_INFO1_ */ +} __packed; + +#define HTT_RX_INDICATION_INFO0_PHY_ERR_VALID (1 << 0) +#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_MASK (0x1E) +#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_LSB (1) +#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK (1 << 5) +#define HTT_RX_INDICATION_INFO0_END_VALID (1 << 6) +#define HTT_RX_INDICATION_INFO0_START_VALID (1 << 7) + +#define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_MASK 0x00FFFFFF +#define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_LSB 0 +#define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_MASK 0xFF000000 +#define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_LSB 24 + +#define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_MASK 0x00FFFFFF +#define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_LSB 0 +#define HTT_RX_INDICATION_INFO2_SERVICE_MASK 0xFF000000 +#define HTT_RX_INDICATION_INFO2_SERVICE_LSB 24 + +enum htt_rx_legacy_rate { + HTT_RX_OFDM_48 = 0, + HTT_RX_OFDM_24 = 1, + HTT_RX_OFDM_12, + HTT_RX_OFDM_6, + HTT_RX_OFDM_54, + HTT_RX_OFDM_36, + HTT_RX_OFDM_18, + HTT_RX_OFDM_9, + + /* long preamble */ + HTT_RX_CCK_11_LP = 0, + HTT_RX_CCK_5_5_LP = 1, + HTT_RX_CCK_2_LP, + HTT_RX_CCK_1_LP, + /* short preamble */ + HTT_RX_CCK_11_SP, + HTT_RX_CCK_5_5_SP, + HTT_RX_CCK_2_SP +}; + +enum htt_rx_legacy_rate_type { + HTT_RX_LEGACY_RATE_OFDM = 0, + HTT_RX_LEGACY_RATE_CCK +}; + +enum htt_rx_preamble_type { + HTT_RX_LEGACY = 0x4, + HTT_RX_HT = 0x8, + HTT_RX_HT_WITH_TXBF = 0x9, + HTT_RX_VHT = 0xC, + HTT_RX_VHT_WITH_TXBF = 0xD, +}; + +/* + * Fields: phy_err_valid, phy_err_code, tsf, + * usec_timestamp, sub_usec_timestamp + * ..are valid only if end_valid == 1. + * + * Fields: rssi_chains, legacy_rate_type, + * legacy_rate_cck, preamble_type, service, + * vht_sig_* + * ..are valid only if start_valid == 1; + */ +struct htt_rx_indication_ppdu { + u8 combined_rssi; + u8 sub_usec_timestamp; + u8 phy_err_code; + u8 info0; /* HTT_RX_INDICATION_INFO0_ */ + struct { + u8 pri20_db; + u8 ext20_db; + u8 ext40_db; + u8 ext80_db; + } __packed rssi_chains[4]; + __le32 tsf; + __le32 usec_timestamp; + __le32 info1; /* HTT_RX_INDICATION_INFO1_ */ + __le32 info2; /* HTT_RX_INDICATION_INFO2_ */ +} __packed; + +enum htt_rx_mpdu_status { + HTT_RX_IND_MPDU_STATUS_UNKNOWN = 0x0, + HTT_RX_IND_MPDU_STATUS_OK, + HTT_RX_IND_MPDU_STATUS_ERR_FCS, + HTT_RX_IND_MPDU_STATUS_ERR_DUP, + HTT_RX_IND_MPDU_STATUS_ERR_REPLAY, + HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER, + /* only accept EAPOL frames */ + HTT_RX_IND_MPDU_STATUS_UNAUTH_PEER, + HTT_RX_IND_MPDU_STATUS_OUT_OF_SYNC, + /* Non-data in promiscous mode */ + HTT_RX_IND_MPDU_STATUS_MGMT_CTRL, + HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR, + HTT_RX_IND_MPDU_STATUS_DECRYPT_ERR, + HTT_RX_IND_MPDU_STATUS_MPDU_LENGTH_ERR, + HTT_RX_IND_MPDU_STATUS_ENCRYPT_REQUIRED_ERR, + HTT_RX_IND_MPDU_STATUS_PRIVACY_ERR, + + /* + * MISC: discard for unspecified reasons. + * Leave this enum value last. + */ + HTT_RX_IND_MPDU_STATUS_ERR_MISC = 0xFF +}; + +struct htt_rx_indication_mpdu_range { + u8 mpdu_count; + u8 mpdu_range_status; /* %htt_rx_mpdu_status */ + u8 pad0; + u8 pad1; +} __packed; + +struct htt_rx_indication_prefix { + __le16 fw_rx_desc_bytes; + u8 pad0; + u8 pad1; +}; + +struct htt_rx_indication { + struct htt_rx_indication_hdr hdr; + struct htt_rx_indication_ppdu ppdu; + struct htt_rx_indication_prefix prefix; + + /* + * the following fields are both dynamically sized, so + * take care addressing them + */ + + /* the size of this is %fw_rx_desc_bytes */ + struct fw_rx_desc_base fw_desc; + + /* + * %mpdu_ranges starts after &%prefix + roundup(%fw_rx_desc_bytes, 4) + * and has %num_mpdu_ranges elements. + */ + struct htt_rx_indication_mpdu_range mpdu_ranges[0]; +} __packed; + +static inline struct htt_rx_indication_mpdu_range * + htt_rx_ind_get_mpdu_ranges(struct htt_rx_indication *rx_ind) +{ + void *ptr = rx_ind; + + ptr += sizeof(rx_ind->hdr) + + sizeof(rx_ind->ppdu) + + sizeof(rx_ind->prefix) + + roundup(__le16_to_cpu(rx_ind->prefix.fw_rx_desc_bytes), 4); + return ptr; +} + +enum htt_rx_flush_mpdu_status { + HTT_RX_FLUSH_MPDU_DISCARD = 0, + HTT_RX_FLUSH_MPDU_REORDER = 1, +}; + +/* + * htt_rx_flush - discard or reorder given range of mpdus + * + * Note: host must check if all sequence numbers between + * [seq_num_start, seq_num_end-1] are valid. + */ +struct htt_rx_flush { + __le16 peer_id; + u8 tid; + u8 rsvd0; + u8 mpdu_status; /* %htt_rx_flush_mpdu_status */ + u8 seq_num_start; /* it is 6 LSBs of 802.11 seq no */ + u8 seq_num_end; /* it is 6 LSBs of 802.11 seq no */ +}; + +struct htt_rx_peer_map { + u8 vdev_id; + __le16 peer_id; + u8 addr[6]; + u8 rsvd0; + u8 rsvd1; +} __packed; + +struct htt_rx_peer_unmap { + u8 rsvd0; + __le16 peer_id; +} __packed; + +enum htt_security_types { + HTT_SECURITY_NONE, + HTT_SECURITY_WEP128, + HTT_SECURITY_WEP104, + HTT_SECURITY_WEP40, + HTT_SECURITY_TKIP, + HTT_SECURITY_TKIP_NOMIC, + HTT_SECURITY_AES_CCMP, + HTT_SECURITY_WAPI, + + HTT_NUM_SECURITY_TYPES /* keep this last! */ +}; + +enum htt_security_flags { +#define HTT_SECURITY_TYPE_MASK 0x7F +#define HTT_SECURITY_TYPE_LSB 0 + HTT_SECURITY_IS_UNICAST = 1 << 7 +}; + +struct htt_security_indication { + union { + /* dont use bitfields; undefined behaviour */ + u8 flags; /* %htt_security_flags */ + struct { + u8 security_type:7, /* %htt_security_types */ + is_unicast:1; + } __packed; + } __packed; + __le16 peer_id; + u8 michael_key[8]; + u8 wapi_rsc[16]; +} __packed; + +#define HTT_RX_BA_INFO0_TID_MASK 0x000F +#define HTT_RX_BA_INFO0_TID_LSB 0 +#define HTT_RX_BA_INFO0_PEER_ID_MASK 0xFFF0 +#define HTT_RX_BA_INFO0_PEER_ID_LSB 4 + +struct htt_rx_addba { + u8 window_size; + __le16 info0; /* %HTT_RX_BA_INFO0_ */ +} __packed; + +struct htt_rx_delba { + u8 rsvd0; + __le16 info0; /* %HTT_RX_BA_INFO0_ */ +} __packed; + +enum htt_data_tx_status { + HTT_DATA_TX_STATUS_OK = 0, + HTT_DATA_TX_STATUS_DISCARD = 1, + HTT_DATA_TX_STATUS_NO_ACK = 2, + HTT_DATA_TX_STATUS_POSTPONE = 3, /* HL only */ + HTT_DATA_TX_STATUS_DOWNLOAD_FAIL = 128 +}; + +enum htt_data_tx_flags { +#define HTT_DATA_TX_STATUS_MASK 0x07 +#define HTT_DATA_TX_STATUS_LSB 0 +#define HTT_DATA_TX_TID_MASK 0x78 +#define HTT_DATA_TX_TID_LSB 3 + HTT_DATA_TX_TID_INVALID = 1 << 7 +}; + +#define HTT_TX_COMPL_INV_MSDU_ID 0xFFFF + +struct htt_data_tx_completion { + union { + u8 flags; + struct { + u8 status:3, + tid:4, + tid_invalid:1; + } __packed; + } __packed; + u8 num_msdus; + u8 rsvd0; + __le16 msdus[0]; /* variable length based on %num_msdus */ +} __packed; + +struct htt_tx_compl_ind_base { + u32 hdr; + u16 payload[1/*or more*/]; +} __packed; + +struct htt_rc_tx_done_params { + u32 rate_code; + u32 rate_code_flags; + u32 flags; + u32 num_enqued; /* 1 for non-AMPDU */ + u32 num_retries; + u32 num_failed; /* for AMPDU */ + u32 ack_rssi; + u32 time_stamp; + u32 is_probe; +}; + +struct htt_rc_update { + u8 vdev_id; + __le16 peer_id; + u8 addr[6]; + u8 num_elems; + u8 rsvd0; + struct htt_rc_tx_done_params params[0]; /* variable length %num_elems */ +} __packed; + +/* see htt_rx_indication for similar fields and descriptions */ +struct htt_rx_fragment_indication { + union { + u8 info0; /* %HTT_RX_FRAG_IND_INFO0_ */ + struct { + u8 ext_tid:5, + flush_valid:1; + } __packed; + } __packed; + __le16 peer_id; + __le32 info1; /* %HTT_RX_FRAG_IND_INFO1_ */ + __le16 fw_rx_desc_bytes; + __le16 rsvd0; + + u8 fw_msdu_rx_desc[0]; +} __packed; + +#define HTT_RX_FRAG_IND_INFO0_EXT_TID_MASK 0x1F +#define HTT_RX_FRAG_IND_INFO0_EXT_TID_LSB 0 +#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_MASK 0x20 +#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_LSB 5 + +#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_MASK 0x0000003F +#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_LSB 0 +#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0 +#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6 + +/* + * target -> host test message definition + * + * The following field definitions describe the format of the test + * message sent from the target to the host. + * The message consists of a 4-octet header, followed by a variable + * number of 32-bit integer values, followed by a variable number + * of 8-bit character values. + * + * |31 16|15 8|7 0| + * |-----------------------------------------------------------| + * | num chars | num ints | msg type | + * |-----------------------------------------------------------| + * | int 0 | + * |-----------------------------------------------------------| + * | int 1 | + * |-----------------------------------------------------------| + * | ... | + * |-----------------------------------------------------------| + * | char 3 | char 2 | char 1 | char 0 | + * |-----------------------------------------------------------| + * | | | ... | char 4 | + * |-----------------------------------------------------------| + * - MSG_TYPE + * Bits 7:0 + * Purpose: identifies this as a test message + * Value: HTT_MSG_TYPE_TEST + * - NUM_INTS + * Bits 15:8 + * Purpose: indicate how many 32-bit integers follow the message header + * - NUM_CHARS + * Bits 31:16 + * Purpose: indicate how many 8-bit charaters follow the series of integers + */ +struct htt_rx_test { + u8 num_ints; + __le16 num_chars; + + /* payload consists of 2 lists: + * a) num_ints * sizeof(__le32) + * b) num_chars * sizeof(u8) aligned to 4bytes */ + u8 payload[0]; +} __packed; + +static inline __le32 *htt_rx_test_get_ints(struct htt_rx_test *rx_test) +{ + return (__le32 *)rx_test->payload; +} + +static inline u8 *htt_rx_test_get_chars(struct htt_rx_test *rx_test) +{ + return rx_test->payload + (rx_test->num_ints * sizeof(__le32)); +} + +/* + * target -> host packet log message + * + * The following field definitions describe the format of the packet log + * message sent from the target to the host. + * The message consists of a 4-octet header,followed by a variable number + * of 32-bit character values. + * + * |31 24|23 16|15 8|7 0| + * |-----------------------------------------------------------| + * | | | | msg type | + * |-----------------------------------------------------------| + * | payload | + * |-----------------------------------------------------------| + * - MSG_TYPE + * Bits 7:0 + * Purpose: identifies this as a test message + * Value: HTT_MSG_TYPE_PACKETLOG + */ +struct htt_pktlog_msg { + u8 pad[3]; + __le32 payload[1 /* or more */]; +} __packed; + +struct htt_dbg_stats_rx_reorder_stats { + /* Non QoS MPDUs received */ + __le32 deliver_non_qos; + + /* MPDUs received in-order */ + __le32 deliver_in_order; + + /* Flush due to reorder timer expired */ + __le32 deliver_flush_timeout; + + /* Flush due to move out of window */ + __le32 deliver_flush_oow; + + /* Flush due to DELBA */ + __le32 deliver_flush_delba; + + /* MPDUs dropped due to FCS error */ + __le32 fcs_error; + + /* MPDUs dropped due to monitor mode non-data packet */ + __le32 mgmt_ctrl; + + /* MPDUs dropped due to invalid peer */ + __le32 invalid_peer; + + /* MPDUs dropped due to duplication (non aggregation) */ + __le32 dup_non_aggr; + + /* MPDUs dropped due to processed before */ + __le32 dup_past; + + /* MPDUs dropped due to duplicate in reorder queue */ + __le32 dup_in_reorder; + + /* Reorder timeout happened */ + __le32 reorder_timeout; + + /* invalid bar ssn */ + __le32 invalid_bar_ssn; + + /* reorder reset due to bar ssn */ + __le32 ssn_reset; +}; + +struct htt_dbg_stats_wal_tx_stats { + /* Num HTT cookies queued to dispatch list */ + __le32 comp_queued; + + /* Num HTT cookies dispatched */ + __le32 comp_delivered; + + /* Num MSDU queued to WAL */ + __le32 msdu_enqued; + + /* Num MPDU queue to WAL */ + __le32 mpdu_enqued; + + /* Num MSDUs dropped by WMM limit */ + __le32 wmm_drop; + + /* Num Local frames queued */ + __le32 local_enqued; + + /* Num Local frames done */ + __le32 local_freed; + + /* Num queued to HW */ + __le32 hw_queued; + + /* Num PPDU reaped from HW */ + __le32 hw_reaped; + + /* Num underruns */ + __le32 underrun; + + /* Num PPDUs cleaned up in TX abort */ + __le32 tx_abort; + + /* Num MPDUs requed by SW */ + __le32 mpdus_requed; + + /* excessive retries */ + __le32 tx_ko; + + /* data hw rate code */ + __le32 data_rc; + + /* Scheduler self triggers */ + __le32 self_triggers; + + /* frames dropped due to excessive sw retries */ + __le32 sw_retry_failure; + + /* illegal rate phy errors */ + __le32 illgl_rate_phy_err; + + /* wal pdev continous xretry */ + __le32 pdev_cont_xretry; + + /* wal pdev continous xretry */ + __le32 pdev_tx_timeout; + + /* wal pdev resets */ + __le32 pdev_resets; + + __le32 phy_underrun; + + /* MPDU is more than txop limit */ + __le32 txop_ovf; +} __packed; + +struct htt_dbg_stats_wal_rx_stats { + /* Cnts any change in ring routing mid-ppdu */ + __le32 mid_ppdu_route_change; + + /* Total number of statuses processed */ + __le32 status_rcvd; + + /* Extra frags on rings 0-3 */ + __le32 r0_frags; + __le32 r1_frags; + __le32 r2_frags; + __le32 r3_frags; + + /* MSDUs / MPDUs delivered to HTT */ + __le32 htt_msdus; + __le32 htt_mpdus; + + /* MSDUs / MPDUs delivered to local stack */ + __le32 loc_msdus; + __le32 loc_mpdus; + + /* AMSDUs that have more MSDUs than the status ring size */ + __le32 oversize_amsdu; + + /* Number of PHY errors */ + __le32 phy_errs; + + /* Number of PHY errors drops */ + __le32 phy_err_drop; + + /* Number of mpdu errors - FCS, MIC, ENC etc. */ + __le32 mpdu_errs; +} __packed; + +struct htt_dbg_stats_wal_peer_stats { + __le32 dummy; /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */ +} __packed; + +struct htt_dbg_stats_wal_pdev_txrx { + struct htt_dbg_stats_wal_tx_stats tx_stats; + struct htt_dbg_stats_wal_rx_stats rx_stats; + struct htt_dbg_stats_wal_peer_stats peer_stats; +} __packed; + +struct htt_dbg_stats_rx_rate_info { + __le32 mcs[10]; + __le32 sgi[10]; + __le32 nss[4]; + __le32 stbc[10]; + __le32 bw[3]; + __le32 pream[6]; + __le32 ldpc; + __le32 txbf; +}; + +/* + * htt_dbg_stats_status - + * present - The requested stats have been delivered in full. + * This indicates that either the stats information was contained + * in its entirety within this message, or else this message + * completes the delivery of the requested stats info that was + * partially delivered through earlier STATS_CONF messages. + * partial - The requested stats have been delivered in part. + * One or more subsequent STATS_CONF messages with the same + * cookie value will be sent to deliver the remainder of the + * information. + * error - The requested stats could not be delivered, for example due + * to a shortage of memory to construct a message holding the + * requested stats. + * invalid - The requested stat type is either not recognized, or the + * target is configured to not gather the stats type in question. + * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + * series_done - This special value indicates that no further stats info + * elements are present within a series of stats info elems + * (within a stats upload confirmation message). + */ +enum htt_dbg_stats_status { + HTT_DBG_STATS_STATUS_PRESENT = 0, + HTT_DBG_STATS_STATUS_PARTIAL = 1, + HTT_DBG_STATS_STATUS_ERROR = 2, + HTT_DBG_STATS_STATUS_INVALID = 3, + HTT_DBG_STATS_STATUS_SERIES_DONE = 7 +}; + +/* + * target -> host statistics upload + * + * The following field definitions describe the format of the HTT target + * to host stats upload confirmation message. + * The message contains a cookie echoed from the HTT host->target stats + * upload request, which identifies which request the confirmation is + * for, and a series of tag-length-value stats information elements. + * The tag-length header for each stats info element also includes a + * status field, to indicate whether the request for the stat type in + * question was fully met, partially met, unable to be met, or invalid + * (if the stat type in question is disabled in the target). + * A special value of all 1's in this status field is used to indicate + * the end of the series of stats info elements. + * + * + * |31 16|15 8|7 5|4 0| + * |------------------------------------------------------------| + * | reserved | msg type | + * |------------------------------------------------------------| + * | cookie LSBs | + * |------------------------------------------------------------| + * | cookie MSBs | + * |------------------------------------------------------------| + * | stats entry length | reserved | S |stat type| + * |------------------------------------------------------------| + * | | + * | type-specific stats info | + * | | + * |------------------------------------------------------------| + * | stats entry length | reserved | S |stat type| + * |------------------------------------------------------------| + * | | + * | type-specific stats info | + * | | + * |------------------------------------------------------------| + * | n/a | reserved | 111 | n/a | + * |------------------------------------------------------------| + * Header fields: + * - MSG_TYPE + * Bits 7:0 + * Purpose: identifies this is a statistics upload confirmation message + * Value: 0x9 + * - COOKIE_LSBS + * Bits 31:0 + * Purpose: Provide a mechanism to match a target->host stats confirmation + * message with its preceding host->target stats request message. + * Value: LSBs of the opaque cookie specified by the host-side requestor + * - COOKIE_MSBS + * Bits 31:0 + * Purpose: Provide a mechanism to match a target->host stats confirmation + * message with its preceding host->target stats request message. + * Value: MSBs of the opaque cookie specified by the host-side requestor + * + * Stats Information Element tag-length header fields: + * - STAT_TYPE + * Bits 4:0 + * Purpose: identifies the type of statistics info held in the + * following information element + * Value: htt_dbg_stats_type + * - STATUS + * Bits 7:5 + * Purpose: indicate whether the requested stats are present + * Value: htt_dbg_stats_status, including a special value (0x7) to mark + * the completion of the stats entry series + * - LENGTH + * Bits 31:16 + * Purpose: indicate the stats information size + * Value: This field specifies the number of bytes of stats information + * that follows the element tag-length header. + * It is expected but not required that this length is a multiple of + * 4 bytes. Even if the length is not an integer multiple of 4, the + * subsequent stats entry header will begin on a 4-byte aligned + * boundary. + */ + +#define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_MASK 0x1F +#define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_LSB 0 +#define HTT_STATS_CONF_ITEM_INFO_STATUS_MASK 0xE0 +#define HTT_STATS_CONF_ITEM_INFO_STATUS_LSB 5 + +struct htt_stats_conf_item { + union { + u8 info; + struct { + u8 stat_type:5; /* %HTT_DBG_STATS_ */ + u8 status:3; /* %HTT_DBG_STATS_STATUS_ */ + } __packed; + } __packed; + u8 pad; + __le16 length; + u8 payload[0]; /* roundup(length, 4) long */ +} __packed; + +struct htt_stats_conf { + u8 pad[3]; + __le32 cookie_lsb; + __le32 cookie_msb; + + /* each item has variable length! */ + struct htt_stats_conf_item items[0]; +} __packed; + +static inline struct htt_stats_conf_item *htt_stats_conf_next_item( + const struct htt_stats_conf_item *item) +{ + return (void *)item + sizeof(*item) + roundup(item->length, 4); +} +/* + * host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank + * + * The following field definitions describe the format of the HTT host + * to target frag_desc/msdu_ext bank configuration message. + * The message contains the based address and the min and max id of the + * MSDU_EXT/FRAG_DESC that will be used by the HTT to map MSDU DESC and + * MSDU_EXT/FRAG_DESC. + * HTT will use id in HTT descriptor instead sending the frag_desc_ptr. + * For QCA988X HW the firmware will use fragment_desc_ptr but in WIFI2.0 + * the hardware does the mapping/translation. + * + * Total banks that can be configured is configured to 16. + * + * This should be called before any TX has be initiated by the HTT + * + * |31 16|15 8|7 5|4 0| + * |------------------------------------------------------------| + * | DESC_SIZE | NUM_BANKS | RES |SWP|pdev| msg type | + * |------------------------------------------------------------| + * | BANK0_BASE_ADDRESS | + * |------------------------------------------------------------| + * | ... | + * |------------------------------------------------------------| + * | BANK15_BASE_ADDRESS | + * |------------------------------------------------------------| + * | BANK0_MAX_ID | BANK0_MIN_ID | + * |------------------------------------------------------------| + * | ... | + * |------------------------------------------------------------| + * | BANK15_MAX_ID | BANK15_MIN_ID | + * |------------------------------------------------------------| + * Header fields: + * - MSG_TYPE + * Bits 7:0 + * Value: 0x6 + * - BANKx_BASE_ADDRESS + * Bits 31:0 + * Purpose: Provide a mechanism to specify the base address of the MSDU_EXT + * bank physical/bus address. + * - BANKx_MIN_ID + * Bits 15:0 + * Purpose: Provide a mechanism to specify the min index that needs to + * mapped. + * - BANKx_MAX_ID + * Bits 31:16 + * Purpose: Provide a mechanism to specify the max index that needs to + * + */ +struct htt_frag_desc_bank_id { + __le16 bank_min_id; + __le16 bank_max_id; +} __packed; + +/* real is 16 but it wouldn't fit in the max htt message size + * so we use a conservatively safe value for now */ +#define HTT_FRAG_DESC_BANK_MAX 4 + +#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03 +#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB 0 +#define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP (1 << 2) + +struct htt_frag_desc_bank_cfg { + u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */ + u8 num_banks; + u8 desc_size; + __le32 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX]; + struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX]; +} __packed; + +union htt_rx_pn_t { + /* WEP: 24-bit PN */ + u32 pn24; + + /* TKIP or CCMP: 48-bit PN */ + u_int64_t pn48; + + /* WAPI: 128-bit PN */ + u_int64_t pn128[2]; +}; + +struct htt_cmd { + struct htt_cmd_hdr hdr; + union { + struct htt_ver_req ver_req; + struct htt_mgmt_tx_desc mgmt_tx; + struct htt_data_tx_desc data_tx; + struct htt_rx_ring_setup rx_setup; + struct htt_stats_req stats_req; + struct htt_oob_sync_req oob_sync_req; + struct htt_aggr_conf aggr_conf; + struct htt_frag_desc_bank_cfg frag_desc_bank_cfg; + }; +} __packed; + +struct htt_resp { + struct htt_resp_hdr hdr; + union { + struct htt_ver_resp ver_resp; + struct htt_mgmt_tx_completion mgmt_tx_completion; + struct htt_data_tx_completion data_tx_completion; + struct htt_rx_indication rx_ind; + struct htt_rx_fragment_indication rx_frag_ind; + struct htt_rx_peer_map peer_map; + struct htt_rx_peer_unmap peer_unmap; + struct htt_rx_flush rx_flush; + struct htt_rx_addba rx_addba; + struct htt_rx_delba rx_delba; + struct htt_security_indication security_indication; + struct htt_rc_update rc_update; + struct htt_rx_test rx_test; + struct htt_pktlog_msg pktlog_msg; + struct htt_stats_conf stats_conf; + }; +} __packed; + + +/*** host side structures follow ***/ + +struct htt_tx_done { + u32 msdu_id; + bool discard; + bool no_ack; +}; + +struct htt_peer_map_event { + u8 vdev_id; + u16 peer_id; + u8 addr[ETH_ALEN]; +}; + +struct htt_peer_unmap_event { + u16 peer_id; +}; + +struct htt_rx_info { + struct sk_buff *skb; + enum htt_rx_mpdu_status status; + enum htt_rx_mpdu_encrypt_type encrypt_type; + s8 signal; + struct { + u8 info0; + u32 info1; + u32 info2; + } rate; + bool fcs_err; +}; + +struct ath10k_htt { + struct ath10k *ar; + enum ath10k_htc_ep_id eid; + + int max_throughput_mbps; + u8 target_version_major; + u8 target_version_minor; + struct completion target_version_received; + + struct { + /* + * Ring of network buffer objects - This ring is + * used exclusively by the host SW. This ring + * mirrors the dev_addrs_ring that is shared + * between the host SW and the MAC HW. The host SW + * uses this netbufs ring to locate the network + * buffer objects whose data buffers the HW has + * filled. + */ + struct sk_buff **netbufs_ring; + /* + * Ring of buffer addresses - + * This ring holds the "physical" device address of the + * rx buffers the host SW provides for the MAC HW to + * fill. + */ + __le32 *paddrs_ring; + + /* + * Base address of ring, as a "physical" device address + * rather than a CPU address. + */ + dma_addr_t base_paddr; + + /* how many elems in the ring (power of 2) */ + int size; + + /* size - 1 */ + unsigned size_mask; + + /* how many rx buffers to keep in the ring */ + int fill_level; + + /* how many rx buffers (full+empty) are in the ring */ + int fill_cnt; + + /* + * alloc_idx - where HTT SW has deposited empty buffers + * This is allocated in consistent mem, so that the FW can + * read this variable, and program the HW's FW_IDX reg with + * the value of this shadow register. + */ + struct { + __le32 *vaddr; + dma_addr_t paddr; + } alloc_idx; + + /* where HTT SW has processed bufs filled by rx MAC DMA */ + struct { + unsigned msdu_payld; + } sw_rd_idx; + + /* + * refill_retry_timer - timer triggered when the ring is + * not refilled to the level expected + */ + struct timer_list refill_retry_timer; + + /* Protects access to all rx ring buffer state variables */ + spinlock_t lock; + } rx_ring; + + unsigned int prefetch_len; + + /* Protects access to %pending_tx, %used_msdu_ids */ + spinlock_t tx_lock; + int max_num_pending_tx; + int num_pending_tx; + struct sk_buff **pending_tx; + unsigned long *used_msdu_ids; /* bitmap */ + wait_queue_head_t empty_tx_wq; + + /* set if host-fw communication goes haywire + * used to avoid further failures */ + bool rx_confused; +}; + +#define RX_HTT_HDR_STATUS_LEN 64 + +/* This structure layout is programmed via rx ring setup + * so that FW knows how to transfer the rx descriptor to the host. + * Buffers like this are placed on the rx ring. */ +struct htt_rx_desc { + union { + /* This field is filled on the host using the msdu buffer + * from htt_rx_indication */ + struct fw_rx_desc_base fw_desc; + u32 pad; + } __packed; + struct { + struct rx_attention attention; + struct rx_frag_info frag_info; + struct rx_mpdu_start mpdu_start; + struct rx_msdu_start msdu_start; + struct rx_msdu_end msdu_end; + struct rx_mpdu_end mpdu_end; + struct rx_ppdu_start ppdu_start; + struct rx_ppdu_end ppdu_end; + } __packed; + u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN]; + u8 msdu_payload[0]; +}; + +#define HTT_RX_DESC_ALIGN 8 + +#define HTT_MAC_ADDR_LEN 6 + +/* + * FIX THIS + * Should be: sizeof(struct htt_host_rx_desc) + max rx MSDU size, + * rounded up to a cache line size. + */ +#define HTT_RX_BUF_SIZE 1920 +#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc)) + +/* + * DMA_MAP expects the buffer to be an integral number of cache lines. + * Rather than checking the actual cache line size, this code makes a + * conservative estimate of what the cache line size could be. + */ +#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */ +#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1) + +struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar); +int ath10k_htt_attach_target(struct ath10k_htt *htt); +void ath10k_htt_detach(struct ath10k_htt *htt); + +int ath10k_htt_tx_attach(struct ath10k_htt *htt); +void ath10k_htt_tx_detach(struct ath10k_htt *htt); +int ath10k_htt_rx_attach(struct ath10k_htt *htt); +void ath10k_htt_rx_detach(struct ath10k_htt *htt); +void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb); +void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb); +int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt); +int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt); + +void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt); +int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt); +void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id); +int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *); +int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *); +#endif diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c new file mode 100644 index 000000000000..de058d7adca8 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -0,0 +1,1167 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "htc.h" +#include "htt.h" +#include "txrx.h" +#include "debug.h" + +#include <linux/log2.h> + +/* slightly larger than one large A-MPDU */ +#define HTT_RX_RING_SIZE_MIN 128 + +/* roughly 20 ms @ 1 Gbps of 1500B MSDUs */ +#define HTT_RX_RING_SIZE_MAX 2048 + +#define HTT_RX_AVG_FRM_BYTES 1000 + +/* ms, very conservative */ +#define HTT_RX_HOST_LATENCY_MAX_MS 20 + +/* ms, conservative */ +#define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10 + +/* when under memory pressure rx ring refill may fail and needs a retry */ +#define HTT_RX_RING_REFILL_RETRY_MS 50 + +static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt) +{ + int size; + + /* + * It is expected that the host CPU will typically be able to + * service the rx indication from one A-MPDU before the rx + * indication from the subsequent A-MPDU happens, roughly 1-2 ms + * later. However, the rx ring should be sized very conservatively, + * to accomodate the worst reasonable delay before the host CPU + * services a rx indication interrupt. + * + * The rx ring need not be kept full of empty buffers. In theory, + * the htt host SW can dynamically track the low-water mark in the + * rx ring, and dynamically adjust the level to which the rx ring + * is filled with empty buffers, to dynamically meet the desired + * low-water mark. + * + * In contrast, it's difficult to resize the rx ring itself, once + * it's in use. Thus, the ring itself should be sized very + * conservatively, while the degree to which the ring is filled + * with empty buffers should be sized moderately conservatively. + */ + + /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ + size = + htt->max_throughput_mbps + + 1000 / + (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS; + + if (size < HTT_RX_RING_SIZE_MIN) + size = HTT_RX_RING_SIZE_MIN; + + if (size > HTT_RX_RING_SIZE_MAX) + size = HTT_RX_RING_SIZE_MAX; + + size = roundup_pow_of_two(size); + + return size; +} + +static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt) +{ + int size; + + /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ + size = + htt->max_throughput_mbps * + 1000 / + (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS; + + /* + * Make sure the fill level is at least 1 less than the ring size. + * Leaving 1 element empty allows the SW to easily distinguish + * between a full ring vs. an empty ring. + */ + if (size >= htt->rx_ring.size) + size = htt->rx_ring.size - 1; + + return size; +} + +static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) +{ + struct sk_buff *skb; + struct ath10k_skb_cb *cb; + int i; + + for (i = 0; i < htt->rx_ring.fill_cnt; i++) { + skb = htt->rx_ring.netbufs_ring[i]; + cb = ATH10K_SKB_CB(skb); + dma_unmap_single(htt->ar->dev, cb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + } + + htt->rx_ring.fill_cnt = 0; +} + +static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) +{ + struct htt_rx_desc *rx_desc; + struct sk_buff *skb; + dma_addr_t paddr; + int ret = 0, idx; + + idx = __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr)); + while (num > 0) { + skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); + if (!skb) { + ret = -ENOMEM; + goto fail; + } + + if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN)) + skb_pull(skb, + PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) - + skb->data); + + /* Clear rx_desc attention word before posting to Rx ring */ + rx_desc = (struct htt_rx_desc *)skb->data; + rx_desc->attention.flags = __cpu_to_le32(0); + + paddr = dma_map_single(htt->ar->dev, skb->data, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + + if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) { + dev_kfree_skb_any(skb); + ret = -ENOMEM; + goto fail; + } + + ATH10K_SKB_CB(skb)->paddr = paddr; + htt->rx_ring.netbufs_ring[idx] = skb; + htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr); + htt->rx_ring.fill_cnt++; + + num--; + idx++; + idx &= htt->rx_ring.size_mask; + } + +fail: + *(htt->rx_ring.alloc_idx.vaddr) = __cpu_to_le32(idx); + return ret; +} + +static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) +{ + lockdep_assert_held(&htt->rx_ring.lock); + return __ath10k_htt_rx_ring_fill_n(htt, num); +} + +static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) +{ + int ret, num_to_fill; + + spin_lock_bh(&htt->rx_ring.lock); + num_to_fill = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; + ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill); + if (ret == -ENOMEM) { + /* + * Failed to fill it to the desired level - + * we'll start a timer and try again next time. + * As long as enough buffers are left in the ring for + * another A-MPDU rx, no special recovery is needed. + */ + mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + + msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS)); + } + spin_unlock_bh(&htt->rx_ring.lock); +} + +static void ath10k_htt_rx_ring_refill_retry(unsigned long arg) +{ + struct ath10k_htt *htt = (struct ath10k_htt *)arg; + ath10k_htt_rx_msdu_buff_replenish(htt); +} + +static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt *htt) +{ + return (__le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr) - + htt->rx_ring.sw_rd_idx.msdu_payld) & htt->rx_ring.size_mask; +} + +void ath10k_htt_rx_detach(struct ath10k_htt *htt) +{ + int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld; + + del_timer_sync(&htt->rx_ring.refill_retry_timer); + + while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) { + struct sk_buff *skb = + htt->rx_ring.netbufs_ring[sw_rd_idx]; + struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); + + dma_unmap_single(htt->ar->dev, cb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]); + sw_rd_idx++; + sw_rd_idx &= htt->rx_ring.size_mask; + } + + dma_free_coherent(htt->ar->dev, + (htt->rx_ring.size * + sizeof(htt->rx_ring.paddrs_ring)), + htt->rx_ring.paddrs_ring, + htt->rx_ring.base_paddr); + + dma_free_coherent(htt->ar->dev, + sizeof(*htt->rx_ring.alloc_idx.vaddr), + htt->rx_ring.alloc_idx.vaddr, + htt->rx_ring.alloc_idx.paddr); + + kfree(htt->rx_ring.netbufs_ring); +} + +static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) +{ + int idx; + struct sk_buff *msdu; + + spin_lock_bh(&htt->rx_ring.lock); + + if (ath10k_htt_rx_ring_elems(htt) == 0) + ath10k_warn("htt rx ring is empty!\n"); + + idx = htt->rx_ring.sw_rd_idx.msdu_payld; + msdu = htt->rx_ring.netbufs_ring[idx]; + + idx++; + idx &= htt->rx_ring.size_mask; + htt->rx_ring.sw_rd_idx.msdu_payld = idx; + htt->rx_ring.fill_cnt--; + + spin_unlock_bh(&htt->rx_ring.lock); + return msdu; +} + +static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb) +{ + struct sk_buff *next; + + while (skb) { + next = skb->next; + dev_kfree_skb_any(skb); + skb = next; + } +} + +static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, + u8 **fw_desc, int *fw_desc_len, + struct sk_buff **head_msdu, + struct sk_buff **tail_msdu) +{ + int msdu_len, msdu_chaining = 0; + struct sk_buff *msdu; + struct htt_rx_desc *rx_desc; + + if (ath10k_htt_rx_ring_elems(htt) == 0) + ath10k_warn("htt rx ring is empty!\n"); + + if (htt->rx_confused) { + ath10k_warn("htt is confused. refusing rx\n"); + return 0; + } + + msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt); + while (msdu) { + int last_msdu, msdu_len_invalid, msdu_chained; + + dma_unmap_single(htt->ar->dev, + ATH10K_SKB_CB(msdu)->paddr, + msdu->len + skb_tailroom(msdu), + DMA_FROM_DEVICE); + + ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ", + msdu->data, msdu->len + skb_tailroom(msdu)); + + rx_desc = (struct htt_rx_desc *)msdu->data; + + /* FIXME: we must report msdu payload since this is what caller + * expects now */ + skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload)); + skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload)); + + /* + * Sanity check - confirm the HW is finished filling in the + * rx data. + * If the HW and SW are working correctly, then it's guaranteed + * that the HW's MAC DMA is done before this point in the SW. + * To prevent the case that we handle a stale Rx descriptor, + * just assert for now until we have a way to recover. + */ + if (!(__le32_to_cpu(rx_desc->attention.flags) + & RX_ATTENTION_FLAGS_MSDU_DONE)) { + ath10k_htt_rx_free_msdu_chain(*head_msdu); + *head_msdu = NULL; + msdu = NULL; + ath10k_err("htt rx stopped. cannot recover\n"); + htt->rx_confused = true; + break; + } + + /* + * Copy the FW rx descriptor for this MSDU from the rx + * indication message into the MSDU's netbuf. HL uses the + * same rx indication message definition as LL, and simply + * appends new info (fields from the HW rx desc, and the + * MSDU payload itself). So, the offset into the rx + * indication message only has to account for the standard + * offset of the per-MSDU FW rx desc info within the + * message, and how many bytes of the per-MSDU FW rx desc + * info have already been consumed. (And the endianness of + * the host, since for a big-endian host, the rx ind + * message contents, including the per-MSDU rx desc bytes, + * were byteswapped during upload.) + */ + if (*fw_desc_len > 0) { + rx_desc->fw_desc.info0 = **fw_desc; + /* + * The target is expected to only provide the basic + * per-MSDU rx descriptors. Just to be sure, verify + * that the target has not attached extension data + * (e.g. LRO flow ID). + */ + + /* or more, if there's extension data */ + (*fw_desc)++; + (*fw_desc_len)--; + } else { + /* + * When an oversized AMSDU happened, FW will lost + * some of MSDU status - in this case, the FW + * descriptors provided will be less than the + * actual MSDUs inside this MPDU. Mark the FW + * descriptors so that it will still deliver to + * upper stack, if no CRC error for this MPDU. + * + * FIX THIS - the FW descriptors are actually for + * MSDUs in the end of this A-MSDU instead of the + * beginning. + */ + rx_desc->fw_desc.info0 = 0; + } + + msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags) + & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR | + RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR)); + msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0), + RX_MSDU_START_INFO0_MSDU_LENGTH); + msdu_chained = rx_desc->frag_info.ring2_more_count; + + if (msdu_len_invalid) + msdu_len = 0; + + skb_trim(msdu, 0); + skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE)); + msdu_len -= msdu->len; + + /* FIXME: Do chained buffers include htt_rx_desc or not? */ + while (msdu_chained--) { + struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt); + + dma_unmap_single(htt->ar->dev, + ATH10K_SKB_CB(next)->paddr, + next->len + skb_tailroom(next), + DMA_FROM_DEVICE); + + ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ", + next->data, + next->len + skb_tailroom(next)); + + skb_trim(next, 0); + skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE)); + msdu_len -= next->len; + + msdu->next = next; + msdu = next; + msdu_chaining = 1; + } + + if (msdu_len > 0) { + /* This may suggest FW bug? */ + ath10k_warn("htt rx msdu len not consumed (%d)\n", + msdu_len); + } + + last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) & + RX_MSDU_END_INFO0_LAST_MSDU; + + if (last_msdu) { + msdu->next = NULL; + break; + } else { + struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt); + msdu->next = next; + msdu = next; + } + } + *tail_msdu = msdu; + + /* + * Don't refill the ring yet. + * + * First, the elements popped here are still in use - it is not + * safe to overwrite them until the matching call to + * mpdu_desc_list_next. Second, for efficiency it is preferable to + * refill the rx ring with 1 PPDU's worth of rx buffers (something + * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers + * (something like 3 buffers). Consequently, we'll rely on the txrx + * SW to tell us when it is done pulling all the PPDU's rx buffers + * out of the rx ring, and then refill it just once. + */ + + return msdu_chaining; +} + +int ath10k_htt_rx_attach(struct ath10k_htt *htt) +{ + dma_addr_t paddr; + void *vaddr; + struct timer_list *timer = &htt->rx_ring.refill_retry_timer; + + htt->rx_ring.size = ath10k_htt_rx_ring_size(htt); + if (!is_power_of_2(htt->rx_ring.size)) { + ath10k_warn("htt rx ring size is not power of 2\n"); + return -EINVAL; + } + + htt->rx_ring.size_mask = htt->rx_ring.size - 1; + + /* + * Set the initial value for the level to which the rx ring + * should be filled, based on the max throughput and the + * worst likely latency for the host to fill the rx ring + * with new buffers. In theory, this fill level can be + * dynamically adjusted from the initial value set here, to + * reflect the actual host latency rather than a + * conservative assumption about the host latency. + */ + htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt); + + htt->rx_ring.netbufs_ring = + kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *), + GFP_KERNEL); + if (!htt->rx_ring.netbufs_ring) + goto err_netbuf; + + vaddr = dma_alloc_coherent(htt->ar->dev, + (htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring)), + &paddr, GFP_DMA); + if (!vaddr) + goto err_dma_ring; + + htt->rx_ring.paddrs_ring = vaddr; + htt->rx_ring.base_paddr = paddr; + + vaddr = dma_alloc_coherent(htt->ar->dev, + sizeof(*htt->rx_ring.alloc_idx.vaddr), + &paddr, GFP_DMA); + if (!vaddr) + goto err_dma_idx; + + htt->rx_ring.alloc_idx.vaddr = vaddr; + htt->rx_ring.alloc_idx.paddr = paddr; + htt->rx_ring.sw_rd_idx.msdu_payld = 0; + *htt->rx_ring.alloc_idx.vaddr = 0; + + /* Initialize the Rx refill retry timer */ + setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt); + + spin_lock_init(&htt->rx_ring.lock); + + htt->rx_ring.fill_cnt = 0; + if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level)) + goto err_fill_ring; + + ath10k_dbg(ATH10K_DBG_HTT, "HTT RX ring size: %d, fill_level: %d\n", + htt->rx_ring.size, htt->rx_ring.fill_level); + return 0; + +err_fill_ring: + ath10k_htt_rx_ring_free(htt); + dma_free_coherent(htt->ar->dev, + sizeof(*htt->rx_ring.alloc_idx.vaddr), + htt->rx_ring.alloc_idx.vaddr, + htt->rx_ring.alloc_idx.paddr); +err_dma_idx: + dma_free_coherent(htt->ar->dev, + (htt->rx_ring.size * + sizeof(htt->rx_ring.paddrs_ring)), + htt->rx_ring.paddrs_ring, + htt->rx_ring.base_paddr); +err_dma_ring: + kfree(htt->rx_ring.netbufs_ring); +err_netbuf: + return -ENOMEM; +} + +static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type) +{ + switch (type) { + case HTT_RX_MPDU_ENCRYPT_WEP40: + case HTT_RX_MPDU_ENCRYPT_WEP104: + return 4; + case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: + case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */ + case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: + case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */ + case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: + return 8; + case HTT_RX_MPDU_ENCRYPT_NONE: + return 0; + } + + ath10k_warn("unknown encryption type %d\n", type); + return 0; +} + +static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type) +{ + switch (type) { + case HTT_RX_MPDU_ENCRYPT_NONE: + case HTT_RX_MPDU_ENCRYPT_WEP40: + case HTT_RX_MPDU_ENCRYPT_WEP104: + case HTT_RX_MPDU_ENCRYPT_WEP128: + case HTT_RX_MPDU_ENCRYPT_WAPI: + return 0; + case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: + case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: + return 4; + case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: + return 8; + } + + ath10k_warn("unknown encryption type %d\n", type); + return 0; +} + +/* Applies for first msdu in chain, before altering it. */ +static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb) +{ + struct htt_rx_desc *rxd; + enum rx_msdu_decap_format fmt; + + rxd = (void *)skb->data - sizeof(*rxd); + fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), + RX_MSDU_START_INFO1_DECAP_FORMAT); + + if (fmt == RX_MSDU_DECAP_RAW) + return (void *)skb->data; + else + return (void *)skb->data - RX_HTT_HDR_STATUS_LEN; +} + +/* This function only applies for first msdu in an msdu chain */ +static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr) +{ + if (ieee80211_is_data_qos(hdr->frame_control)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + if (qc[0] & 0x80) + return true; + } + return false; +} + +static int ath10k_htt_rx_amsdu(struct ath10k_htt *htt, + struct htt_rx_info *info) +{ + struct htt_rx_desc *rxd; + struct sk_buff *amsdu; + struct sk_buff *first; + struct ieee80211_hdr *hdr; + struct sk_buff *skb = info->skb; + enum rx_msdu_decap_format fmt; + enum htt_rx_mpdu_encrypt_type enctype; + unsigned int hdr_len; + int crypto_len; + + rxd = (void *)skb->data - sizeof(*rxd); + fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), + RX_MSDU_START_INFO1_DECAP_FORMAT); + enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), + RX_MPDU_START_INFO0_ENCRYPT_TYPE); + + /* FIXME: No idea what assumptions are safe here. Need logs */ + if ((fmt == RX_MSDU_DECAP_RAW && skb->next) || + (fmt == RX_MSDU_DECAP_8023_SNAP_LLC)) { + ath10k_htt_rx_free_msdu_chain(skb->next); + skb->next = NULL; + return -ENOTSUPP; + } + + /* A-MSDU max is a little less than 8K */ + amsdu = dev_alloc_skb(8*1024); + if (!amsdu) { + ath10k_warn("A-MSDU allocation failed\n"); + ath10k_htt_rx_free_msdu_chain(skb->next); + skb->next = NULL; + return -ENOMEM; + } + + if (fmt >= RX_MSDU_DECAP_NATIVE_WIFI) { + int hdrlen; + + hdr = (void *)rxd->rx_hdr_status; + hdrlen = ieee80211_hdrlen(hdr->frame_control); + memcpy(skb_put(amsdu, hdrlen), hdr, hdrlen); + } + + first = skb; + while (skb) { + void *decap_hdr; + int decap_len = 0; + + rxd = (void *)skb->data - sizeof(*rxd); + fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), + RX_MSDU_START_INFO1_DECAP_FORMAT); + decap_hdr = (void *)rxd->rx_hdr_status; + + if (skb == first) { + /* We receive linked A-MSDU subframe skbuffs. The + * first one contains the original 802.11 header (and + * possible crypto param) in the RX descriptor. The + * A-MSDU subframe header follows that. Each part is + * aligned to 4 byte boundary. */ + + hdr = (void *)amsdu->data; + hdr_len = ieee80211_hdrlen(hdr->frame_control); + crypto_len = ath10k_htt_rx_crypto_param_len(enctype); + + decap_hdr += roundup(hdr_len, 4); + decap_hdr += roundup(crypto_len, 4); + } + + if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) { + /* Ethernet2 decap inserts ethernet header in place of + * A-MSDU subframe header. */ + skb_pull(skb, 6 + 6 + 2); + + /* A-MSDU subframe header length */ + decap_len += 6 + 6 + 2; + + /* Ethernet2 decap also strips the LLC/SNAP so we need + * to re-insert it. The LLC/SNAP follows A-MSDU + * subframe header. */ + /* FIXME: Not all LLCs are 8 bytes long */ + decap_len += 8; + + memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len); + } + + if (fmt == RX_MSDU_DECAP_NATIVE_WIFI) { + /* Native Wifi decap inserts regular 802.11 header + * in place of A-MSDU subframe header. */ + hdr = (struct ieee80211_hdr *)skb->data; + skb_pull(skb, ieee80211_hdrlen(hdr->frame_control)); + + /* A-MSDU subframe header length */ + decap_len += 6 + 6 + 2; + + memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len); + } + + if (fmt == RX_MSDU_DECAP_RAW) + skb_trim(skb, skb->len - 4); /* remove FCS */ + + memcpy(skb_put(amsdu, skb->len), skb->data, skb->len); + + /* A-MSDU subframes are padded to 4bytes + * but relative to first subframe, not the whole MPDU */ + if (skb->next && ((decap_len + skb->len) & 3)) { + int padlen = 4 - ((decap_len + skb->len) & 3); + memset(skb_put(amsdu, padlen), 0, padlen); + } + + skb = skb->next; + } + + info->skb = amsdu; + info->encrypt_type = enctype; + + ath10k_htt_rx_free_msdu_chain(first); + + return 0; +} + +static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info) +{ + struct sk_buff *skb = info->skb; + struct htt_rx_desc *rxd; + struct ieee80211_hdr *hdr; + enum rx_msdu_decap_format fmt; + enum htt_rx_mpdu_encrypt_type enctype; + + /* This shouldn't happen. If it does than it may be a FW bug. */ + if (skb->next) { + ath10k_warn("received chained non A-MSDU frame\n"); + ath10k_htt_rx_free_msdu_chain(skb->next); + skb->next = NULL; + } + + rxd = (void *)skb->data - sizeof(*rxd); + fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), + RX_MSDU_START_INFO1_DECAP_FORMAT); + enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), + RX_MPDU_START_INFO0_ENCRYPT_TYPE); + hdr = (void *)skb->data - RX_HTT_HDR_STATUS_LEN; + + switch (fmt) { + case RX_MSDU_DECAP_RAW: + /* remove trailing FCS */ + skb_trim(skb, skb->len - 4); + break; + case RX_MSDU_DECAP_NATIVE_WIFI: + /* nothing to do here */ + break; + case RX_MSDU_DECAP_ETHERNET2_DIX: + /* macaddr[6] + macaddr[6] + ethertype[2] */ + skb_pull(skb, 6 + 6 + 2); + break; + case RX_MSDU_DECAP_8023_SNAP_LLC: + /* macaddr[6] + macaddr[6] + len[2] */ + /* we don't need this for non-A-MSDU */ + skb_pull(skb, 6 + 6 + 2); + break; + } + + if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) { + void *llc; + int llclen; + + llclen = 8; + llc = hdr; + llc += roundup(ieee80211_hdrlen(hdr->frame_control), 4); + llc += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4); + + skb_push(skb, llclen); + memcpy(skb->data, llc, llclen); + } + + if (fmt >= RX_MSDU_DECAP_ETHERNET2_DIX) { + int len = ieee80211_hdrlen(hdr->frame_control); + skb_push(skb, len); + memcpy(skb->data, hdr, len); + } + + info->skb = skb; + info->encrypt_type = enctype; + return 0; +} + +static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb) +{ + struct htt_rx_desc *rxd; + u32 flags; + + rxd = (void *)skb->data - sizeof(*rxd); + flags = __le32_to_cpu(rxd->attention.flags); + + if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR) + return true; + + return false; +} + +static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb) +{ + struct htt_rx_desc *rxd; + u32 flags; + + rxd = (void *)skb->data - sizeof(*rxd); + flags = __le32_to_cpu(rxd->attention.flags); + + if (flags & RX_ATTENTION_FLAGS_FCS_ERR) + return true; + + return false; +} + +static void ath10k_htt_rx_handler(struct ath10k_htt *htt, + struct htt_rx_indication *rx) +{ + struct htt_rx_info info; + struct htt_rx_indication_mpdu_range *mpdu_ranges; + struct ieee80211_hdr *hdr; + int num_mpdu_ranges; + int fw_desc_len; + u8 *fw_desc; + int i, j; + int ret; + + memset(&info, 0, sizeof(info)); + + fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes); + fw_desc = (u8 *)&rx->fw_desc; + + num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), + HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); + mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); + + ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", + rx, sizeof(*rx) + + (sizeof(struct htt_rx_indication_mpdu_range) * + num_mpdu_ranges)); + + for (i = 0; i < num_mpdu_ranges; i++) { + info.status = mpdu_ranges[i].mpdu_range_status; + + for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) { + struct sk_buff *msdu_head, *msdu_tail; + enum htt_rx_mpdu_status status; + int msdu_chaining; + + msdu_head = NULL; + msdu_tail = NULL; + msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, + &fw_desc, + &fw_desc_len, + &msdu_head, + &msdu_tail); + + if (!msdu_head) { + ath10k_warn("htt rx no data!\n"); + continue; + } + + if (msdu_head->len == 0) { + ath10k_dbg(ATH10K_DBG_HTT, + "htt rx dropping due to zero-len\n"); + ath10k_htt_rx_free_msdu_chain(msdu_head); + continue; + } + + if (ath10k_htt_rx_has_decrypt_err(msdu_head)) { + ath10k_htt_rx_free_msdu_chain(msdu_head); + continue; + } + + status = info.status; + + /* Skip mgmt frames while we handle this in WMI */ + if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL) { + ath10k_htt_rx_free_msdu_chain(msdu_head); + continue; + } + + if (status != HTT_RX_IND_MPDU_STATUS_OK && + status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR && + !htt->ar->monitor_enabled) { + ath10k_dbg(ATH10K_DBG_HTT, + "htt rx ignoring frame w/ status %d\n", + status); + ath10k_htt_rx_free_msdu_chain(msdu_head); + continue; + } + + /* FIXME: we do not support chaining yet. + * this needs investigation */ + if (msdu_chaining) { + ath10k_warn("msdu_chaining is true\n"); + ath10k_htt_rx_free_msdu_chain(msdu_head); + continue; + } + + info.skb = msdu_head; + info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head); + info.signal = ATH10K_DEFAULT_NOISE_FLOOR; + info.signal += rx->ppdu.combined_rssi; + + info.rate.info0 = rx->ppdu.info0; + info.rate.info1 = __le32_to_cpu(rx->ppdu.info1); + info.rate.info2 = __le32_to_cpu(rx->ppdu.info2); + + hdr = ath10k_htt_rx_skb_get_hdr(msdu_head); + + if (ath10k_htt_rx_hdr_is_amsdu(hdr)) + ret = ath10k_htt_rx_amsdu(htt, &info); + else + ret = ath10k_htt_rx_msdu(htt, &info); + + if (ret && !info.fcs_err) { + ath10k_warn("error processing msdus %d\n", ret); + dev_kfree_skb_any(info.skb); + continue; + } + + if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data)) + ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n"); + + ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ", + info.skb->data, info.skb->len); + ath10k_process_rx(htt->ar, &info); + } + } + + ath10k_htt_rx_msdu_buff_replenish(htt); +} + +static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, + struct htt_rx_fragment_indication *frag) +{ + struct sk_buff *msdu_head, *msdu_tail; + struct htt_rx_desc *rxd; + enum rx_msdu_decap_format fmt; + struct htt_rx_info info = {}; + struct ieee80211_hdr *hdr; + int msdu_chaining; + bool tkip_mic_err; + bool decrypt_err; + u8 *fw_desc; + int fw_desc_len, hdrlen, paramlen; + int trim; + + fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes); + fw_desc = (u8 *)frag->fw_msdu_rx_desc; + + msdu_head = NULL; + msdu_tail = NULL; + msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len, + &msdu_head, &msdu_tail); + + ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n"); + + if (!msdu_head) { + ath10k_warn("htt rx frag no data\n"); + return; + } + + if (msdu_chaining || msdu_head != msdu_tail) { + ath10k_warn("aggregation with fragmentation?!\n"); + ath10k_htt_rx_free_msdu_chain(msdu_head); + return; + } + + /* FIXME: implement signal strength */ + + hdr = (struct ieee80211_hdr *)msdu_head->data; + rxd = (void *)msdu_head->data - sizeof(*rxd); + tkip_mic_err = !!(__le32_to_cpu(rxd->attention.flags) & + RX_ATTENTION_FLAGS_TKIP_MIC_ERR); + decrypt_err = !!(__le32_to_cpu(rxd->attention.flags) & + RX_ATTENTION_FLAGS_DECRYPT_ERR); + fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), + RX_MSDU_START_INFO1_DECAP_FORMAT); + + if (fmt != RX_MSDU_DECAP_RAW) { + ath10k_warn("we dont support non-raw fragmented rx yet\n"); + dev_kfree_skb_any(msdu_head); + goto end; + } + + info.skb = msdu_head; + info.status = HTT_RX_IND_MPDU_STATUS_OK; + info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0), + RX_MPDU_START_INFO0_ENCRYPT_TYPE); + + if (tkip_mic_err) { + ath10k_warn("tkip mic error\n"); + info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR; + } + + if (decrypt_err) { + ath10k_warn("decryption err in fragmented rx\n"); + dev_kfree_skb_any(info.skb); + goto end; + } + + if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) { + hdrlen = ieee80211_hdrlen(hdr->frame_control); + paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type); + + /* It is more efficient to move the header than the payload */ + memmove((void *)info.skb->data + paramlen, + (void *)info.skb->data, + hdrlen); + skb_pull(info.skb, paramlen); + hdr = (struct ieee80211_hdr *)info.skb->data; + } + + /* remove trailing FCS */ + trim = 4; + + /* remove crypto trailer */ + trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type); + + /* last fragment of TKIP frags has MIC */ + if (!ieee80211_has_morefrags(hdr->frame_control) && + info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) + trim += 8; + + if (trim > info.skb->len) { + ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n"); + dev_kfree_skb_any(info.skb); + goto end; + } + + skb_trim(info.skb, info.skb->len - trim); + + ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt frag mpdu: ", + info.skb->data, info.skb->len); + ath10k_process_rx(htt->ar, &info); + +end: + if (fw_desc_len > 0) { + ath10k_dbg(ATH10K_DBG_HTT, + "expecting more fragmented rx in one indication %d\n", + fw_desc_len); + } +} + +void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) +{ + struct ath10k_htt *htt = ar->htt; + struct htt_resp *resp = (struct htt_resp *)skb->data; + + /* confirm alignment */ + if (!IS_ALIGNED((unsigned long)skb->data, 4)) + ath10k_warn("unaligned htt message, expect trouble\n"); + + ath10k_dbg(ATH10K_DBG_HTT, "HTT RX, msg_type: 0x%0X\n", + resp->hdr.msg_type); + switch (resp->hdr.msg_type) { + case HTT_T2H_MSG_TYPE_VERSION_CONF: { + htt->target_version_major = resp->ver_resp.major; + htt->target_version_minor = resp->ver_resp.minor; + complete(&htt->target_version_received); + break; + } + case HTT_T2H_MSG_TYPE_RX_IND: { + ath10k_htt_rx_handler(htt, &resp->rx_ind); + break; + } + case HTT_T2H_MSG_TYPE_PEER_MAP: { + struct htt_peer_map_event ev = { + .vdev_id = resp->peer_map.vdev_id, + .peer_id = __le16_to_cpu(resp->peer_map.peer_id), + }; + memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr)); + ath10k_peer_map_event(htt, &ev); + break; + } + case HTT_T2H_MSG_TYPE_PEER_UNMAP: { + struct htt_peer_unmap_event ev = { + .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id), + }; + ath10k_peer_unmap_event(htt, &ev); + break; + } + case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: { + struct htt_tx_done tx_done = {}; + int status = __le32_to_cpu(resp->mgmt_tx_completion.status); + + tx_done.msdu_id = + __le32_to_cpu(resp->mgmt_tx_completion.desc_id); + + switch (status) { + case HTT_MGMT_TX_STATUS_OK: + break; + case HTT_MGMT_TX_STATUS_RETRY: + tx_done.no_ack = true; + break; + case HTT_MGMT_TX_STATUS_DROP: + tx_done.discard = true; + break; + } + + ath10k_txrx_tx_completed(htt, &tx_done); + break; + } + case HTT_T2H_MSG_TYPE_TX_COMPL_IND: { + struct htt_tx_done tx_done = {}; + int status = MS(resp->data_tx_completion.flags, + HTT_DATA_TX_STATUS); + __le16 msdu_id; + int i; + + switch (status) { + case HTT_DATA_TX_STATUS_NO_ACK: + tx_done.no_ack = true; + break; + case HTT_DATA_TX_STATUS_OK: + break; + case HTT_DATA_TX_STATUS_DISCARD: + case HTT_DATA_TX_STATUS_POSTPONE: + case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL: + tx_done.discard = true; + break; + default: + ath10k_warn("unhandled tx completion status %d\n", + status); + tx_done.discard = true; + break; + } + + ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", + resp->data_tx_completion.num_msdus); + + for (i = 0; i < resp->data_tx_completion.num_msdus; i++) { + msdu_id = resp->data_tx_completion.msdus[i]; + tx_done.msdu_id = __le16_to_cpu(msdu_id); + ath10k_txrx_tx_completed(htt, &tx_done); + } + break; + } + case HTT_T2H_MSG_TYPE_SEC_IND: { + struct ath10k *ar = htt->ar; + struct htt_security_indication *ev = &resp->security_indication; + + ath10k_dbg(ATH10K_DBG_HTT, + "sec ind peer_id %d unicast %d type %d\n", + __le16_to_cpu(ev->peer_id), + !!(ev->flags & HTT_SECURITY_IS_UNICAST), + MS(ev->flags, HTT_SECURITY_TYPE)); + complete(&ar->install_key_done); + break; + } + case HTT_T2H_MSG_TYPE_RX_FRAG_IND: { + ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", + skb->data, skb->len); + ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind); + break; + } + case HTT_T2H_MSG_TYPE_TEST: + /* FIX THIS */ + break; + case HTT_T2H_MSG_TYPE_TX_INSPECT_IND: + case HTT_T2H_MSG_TYPE_STATS_CONF: + case HTT_T2H_MSG_TYPE_RX_ADDBA: + case HTT_T2H_MSG_TYPE_RX_DELBA: + case HTT_T2H_MSG_TYPE_RX_FLUSH: + default: + ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n", + resp->hdr.msg_type); + ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", + skb->data, skb->len); + break; + }; + + /* Free the indication buffer */ + dev_kfree_skb_any(skb); +} diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c new file mode 100644 index 000000000000..ef79106db247 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -0,0 +1,510 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/etherdevice.h> +#include "htt.h" +#include "mac.h" +#include "hif.h" +#include "txrx.h" +#include "debug.h" + +void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) +{ + htt->num_pending_tx--; + if (htt->num_pending_tx == htt->max_num_pending_tx - 1) + ieee80211_wake_queues(htt->ar->hw); +} + +static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) +{ + spin_lock_bh(&htt->tx_lock); + __ath10k_htt_tx_dec_pending(htt); + spin_unlock_bh(&htt->tx_lock); +} + +static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt) +{ + int ret = 0; + + spin_lock_bh(&htt->tx_lock); + + if (htt->num_pending_tx >= htt->max_num_pending_tx) { + ret = -EBUSY; + goto exit; + } + + htt->num_pending_tx++; + if (htt->num_pending_tx == htt->max_num_pending_tx) + ieee80211_stop_queues(htt->ar->hw); + +exit: + spin_unlock_bh(&htt->tx_lock); + return ret; +} + +int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt) +{ + int msdu_id; + + lockdep_assert_held(&htt->tx_lock); + + msdu_id = find_first_zero_bit(htt->used_msdu_ids, + htt->max_num_pending_tx); + if (msdu_id == htt->max_num_pending_tx) + return -ENOBUFS; + + ath10k_dbg(ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id); + __set_bit(msdu_id, htt->used_msdu_ids); + return msdu_id; +} + +void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) +{ + lockdep_assert_held(&htt->tx_lock); + + if (!test_bit(msdu_id, htt->used_msdu_ids)) + ath10k_warn("trying to free unallocated msdu_id %d\n", msdu_id); + + ath10k_dbg(ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); + __clear_bit(msdu_id, htt->used_msdu_ids); +} + +int ath10k_htt_tx_attach(struct ath10k_htt *htt) +{ + u8 pipe; + + spin_lock_init(&htt->tx_lock); + init_waitqueue_head(&htt->empty_tx_wq); + + /* At the beginning free queue number should hint us the maximum + * queue length */ + pipe = htt->ar->htc->endpoint[htt->eid].ul_pipe_id; + htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar, + pipe); + + ath10k_dbg(ATH10K_DBG_HTT, "htt tx max num pending tx %d\n", + htt->max_num_pending_tx); + + htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) * + htt->max_num_pending_tx, GFP_KERNEL); + if (!htt->pending_tx) + return -ENOMEM; + + htt->used_msdu_ids = kzalloc(sizeof(unsigned long) * + BITS_TO_LONGS(htt->max_num_pending_tx), + GFP_KERNEL); + if (!htt->used_msdu_ids) { + kfree(htt->pending_tx); + return -ENOMEM; + } + + return 0; +} + +static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt) +{ + struct sk_buff *txdesc; + int msdu_id; + + /* No locks needed. Called after communication with the device has + * been stopped. */ + + for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) { + if (!test_bit(msdu_id, htt->used_msdu_ids)) + continue; + + txdesc = htt->pending_tx[msdu_id]; + if (!txdesc) + continue; + + ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", + msdu_id); + + if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0) + ATH10K_SKB_CB(txdesc)->htt.refcount = 1; + + ATH10K_SKB_CB(txdesc)->htt.discard = true; + ath10k_txrx_tx_unref(htt, txdesc); + } +} + +void ath10k_htt_tx_detach(struct ath10k_htt *htt) +{ + ath10k_htt_tx_cleanup_pending(htt); + kfree(htt->pending_tx); + kfree(htt->used_msdu_ids); + return; +} + +void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) +{ + struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); + struct ath10k_htt *htt = ar->htt; + + if (skb_cb->htt.is_conf) { + dev_kfree_skb_any(skb); + return; + } + + if (skb_cb->is_aborted) { + skb_cb->htt.discard = true; + + /* if the skbuff is aborted we need to make sure we'll free up + * the tx resources, we can't simply run tx_unref() 2 times + * because if htt tx completion came in earlier we'd access + * unallocated memory */ + if (skb_cb->htt.refcount > 1) + skb_cb->htt.refcount = 1; + } + + ath10k_txrx_tx_unref(htt, skb); +} + +int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) +{ + struct sk_buff *skb; + struct htt_cmd *cmd; + int len = 0; + int ret; + + len += sizeof(cmd->hdr); + len += sizeof(cmd->ver_req); + + skb = ath10k_htc_alloc_skb(len); + if (!skb) + return -ENOMEM; + + skb_put(skb, len); + cmd = (struct htt_cmd *)skb->data; + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ; + + ATH10K_SKB_CB(skb)->htt.is_conf = true; + + ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb); + if (ret) { + dev_kfree_skb_any(skb); + return ret; + } + + return 0; +} + +int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) +{ + struct sk_buff *skb; + struct htt_cmd *cmd; + struct htt_rx_ring_setup_ring *ring; + const int num_rx_ring = 1; + u16 flags; + u32 fw_idx; + int len; + int ret; + + /* + * the HW expects the buffer to be an integral number of 4-byte + * "words" + */ + BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); + BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); + + len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr) + + (sizeof(*ring) * num_rx_ring); + skb = ath10k_htc_alloc_skb(len); + if (!skb) + return -ENOMEM; + + skb_put(skb, len); + + cmd = (struct htt_cmd *)skb->data; + ring = &cmd->rx_setup.rings[0]; + + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; + cmd->rx_setup.hdr.num_rings = 1; + + /* FIXME: do we need all of this? */ + flags = 0; + flags |= HTT_RX_RING_FLAGS_MAC80211_HDR; + flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; + flags |= HTT_RX_RING_FLAGS_PPDU_START; + flags |= HTT_RX_RING_FLAGS_PPDU_END; + flags |= HTT_RX_RING_FLAGS_MPDU_START; + flags |= HTT_RX_RING_FLAGS_MPDU_END; + flags |= HTT_RX_RING_FLAGS_MSDU_START; + flags |= HTT_RX_RING_FLAGS_MSDU_END; + flags |= HTT_RX_RING_FLAGS_RX_ATTENTION; + flags |= HTT_RX_RING_FLAGS_FRAG_INFO; + flags |= HTT_RX_RING_FLAGS_UNICAST_RX; + flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; + flags |= HTT_RX_RING_FLAGS_CTRL_RX; + flags |= HTT_RX_RING_FLAGS_MGMT_RX; + flags |= HTT_RX_RING_FLAGS_NULL_RX; + flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX; + + fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); + + ring->fw_idx_shadow_reg_paddr = + __cpu_to_le32(htt->rx_ring.alloc_idx.paddr); + ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr); + ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); + ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); + ring->flags = __cpu_to_le16(flags); + ring->fw_idx_init_val = __cpu_to_le16(fw_idx); + +#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4) + + ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status)); + ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload)); + ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start)); + ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end)); + ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start)); + ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end)); + ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start)); + ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end)); + ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention)); + ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info)); + +#undef desc_offset + + ATH10K_SKB_CB(skb)->htt.is_conf = true; + + ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb); + if (ret) { + dev_kfree_skb_any(skb); + return ret; + } + + return 0; +} + +int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) +{ + struct device *dev = htt->ar->dev; + struct ath10k_skb_cb *skb_cb; + struct sk_buff *txdesc = NULL; + struct htt_cmd *cmd; + u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id; + int len = 0; + int msdu_id = -1; + int res; + + + res = ath10k_htt_tx_inc_pending(htt); + if (res) + return res; + + len += sizeof(cmd->hdr); + len += sizeof(cmd->mgmt_tx); + + txdesc = ath10k_htc_alloc_skb(len); + if (!txdesc) { + res = -ENOMEM; + goto err; + } + + spin_lock_bh(&htt->tx_lock); + msdu_id = ath10k_htt_tx_alloc_msdu_id(htt); + if (msdu_id < 0) { + spin_unlock_bh(&htt->tx_lock); + res = msdu_id; + goto err; + } + htt->pending_tx[msdu_id] = txdesc; + spin_unlock_bh(&htt->tx_lock); + + res = ath10k_skb_map(dev, msdu); + if (res) + goto err; + + skb_put(txdesc, len); + cmd = (struct htt_cmd *)txdesc->data; + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; + cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); + cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); + cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); + cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); + memcpy(cmd->mgmt_tx.hdr, msdu->data, + min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); + + /* refcount is decremented by HTC and HTT completions until it reaches + * zero and is freed */ + skb_cb = ATH10K_SKB_CB(txdesc); + skb_cb->htt.msdu_id = msdu_id; + skb_cb->htt.refcount = 2; + skb_cb->htt.msdu = msdu; + + res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc); + if (res) + goto err; + + return 0; + +err: + ath10k_skb_unmap(dev, msdu); + + if (txdesc) + dev_kfree_skb_any(txdesc); + if (msdu_id >= 0) { + spin_lock_bh(&htt->tx_lock); + htt->pending_tx[msdu_id] = NULL; + ath10k_htt_tx_free_msdu_id(htt, msdu_id); + spin_unlock_bh(&htt->tx_lock); + } + ath10k_htt_tx_dec_pending(htt); + return res; +} + +int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) +{ + struct device *dev = htt->ar->dev; + struct htt_cmd *cmd; + struct htt_data_tx_desc_frag *tx_frags; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; + struct ath10k_skb_cb *skb_cb; + struct sk_buff *txdesc = NULL; + struct sk_buff *txfrag = NULL; + u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id; + u8 tid; + int prefetch_len, desc_len, frag_len; + dma_addr_t frags_paddr; + int msdu_id = -1; + int res; + u8 flags0; + u16 flags1; + + res = ath10k_htt_tx_inc_pending(htt); + if (res) + return res; + + prefetch_len = min(htt->prefetch_len, msdu->len); + prefetch_len = roundup(prefetch_len, 4); + + desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len; + frag_len = sizeof(*tx_frags) * 2; + + txdesc = ath10k_htc_alloc_skb(desc_len); + if (!txdesc) { + res = -ENOMEM; + goto err; + } + + txfrag = dev_alloc_skb(frag_len); + if (!txfrag) { + res = -ENOMEM; + goto err; + } + + if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) { + ath10k_warn("htt alignment check failed. dropping packet.\n"); + res = -EIO; + goto err; + } + + spin_lock_bh(&htt->tx_lock); + msdu_id = ath10k_htt_tx_alloc_msdu_id(htt); + if (msdu_id < 0) { + spin_unlock_bh(&htt->tx_lock); + res = msdu_id; + goto err; + } + htt->pending_tx[msdu_id] = txdesc; + spin_unlock_bh(&htt->tx_lock); + + res = ath10k_skb_map(dev, msdu); + if (res) + goto err; + + /* tx fragment list must be terminated with zero-entry */ + skb_put(txfrag, frag_len); + tx_frags = (struct htt_data_tx_desc_frag *)txfrag->data; + tx_frags[0].paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); + tx_frags[0].len = __cpu_to_le32(msdu->len); + tx_frags[1].paddr = __cpu_to_le32(0); + tx_frags[1].len = __cpu_to_le32(0); + + res = ath10k_skb_map(dev, txfrag); + if (res) + goto err; + + ath10k_dbg(ATH10K_DBG_HTT, "txfrag 0x%llx msdu 0x%llx\n", + (unsigned long long) ATH10K_SKB_CB(txfrag)->paddr, + (unsigned long long) ATH10K_SKB_CB(msdu)->paddr); + ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "txfrag: ", + txfrag->data, frag_len); + ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ", + msdu->data, msdu->len); + + skb_put(txdesc, desc_len); + cmd = (struct htt_cmd *)txdesc->data; + memset(cmd, 0, desc_len); + + tid = ATH10K_SKB_CB(msdu)->htt.tid; + + ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid); + + flags0 = 0; + if (!ieee80211_has_protected(hdr->frame_control)) + flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; + flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; + flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI, + HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); + + flags1 = 0; + flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); + flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); + + frags_paddr = ATH10K_SKB_CB(txfrag)->paddr; + + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; + cmd->data_tx.flags0 = flags0; + cmd->data_tx.flags1 = __cpu_to_le16(flags1); + cmd->data_tx.len = __cpu_to_le16(msdu->len); + cmd->data_tx.id = __cpu_to_le16(msdu_id); + cmd->data_tx.frags_paddr = __cpu_to_le32(frags_paddr); + cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); + + memcpy(cmd->data_tx.prefetch, msdu->data, prefetch_len); + + /* refcount is decremented by HTC and HTT completions until it reaches + * zero and is freed */ + skb_cb = ATH10K_SKB_CB(txdesc); + skb_cb->htt.msdu_id = msdu_id; + skb_cb->htt.refcount = 2; + skb_cb->htt.txfrag = txfrag; + skb_cb->htt.msdu = msdu; + + res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc); + if (res) + goto err; + + return 0; +err: + if (txfrag) + ath10k_skb_unmap(dev, txfrag); + if (txdesc) + dev_kfree_skb_any(txdesc); + if (txfrag) + dev_kfree_skb_any(txfrag); + if (msdu_id >= 0) { + spin_lock_bh(&htt->tx_lock); + htt->pending_tx[msdu_id] = NULL; + ath10k_htt_tx_free_msdu_id(htt, msdu_id); + spin_unlock_bh(&htt->tx_lock); + } + ath10k_htt_tx_dec_pending(htt); + ath10k_skb_unmap(dev, msdu); + return res; +} diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h new file mode 100644 index 000000000000..44ed5af0a204 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -0,0 +1,304 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HW_H_ +#define _HW_H_ + +#include "targaddrs.h" + +/* Supported FW version */ +#define SUPPORTED_FW_MAJOR 1 +#define SUPPORTED_FW_MINOR 0 +#define SUPPORTED_FW_RELEASE 0 +#define SUPPORTED_FW_BUILD 629 + +/* QCA988X 1.0 definitions */ +#define QCA988X_HW_1_0_VERSION 0x4000002c +#define QCA988X_HW_1_0_FW_DIR "ath10k/QCA988X/hw1.0" +#define QCA988X_HW_1_0_FW_FILE "firmware.bin" +#define QCA988X_HW_1_0_OTP_FILE "otp.bin" +#define QCA988X_HW_1_0_BOARD_DATA_FILE "board.bin" +#define QCA988X_HW_1_0_PATCH_LOAD_ADDR 0x1234 + +/* QCA988X 2.0 definitions */ +#define QCA988X_HW_2_0_VERSION 0x4100016c +#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0" +#define QCA988X_HW_2_0_FW_FILE "firmware.bin" +#define QCA988X_HW_2_0_OTP_FILE "otp.bin" +#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin" +#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234 + +/* Known pecularities: + * - current FW doesn't support raw rx mode (last tested v599) + * - current FW dumps upon raw tx mode (last tested v599) + * - raw appears in nwifi decap, raw and nwifi appear in ethernet decap + * - raw have FCS, nwifi doesn't + * - ethernet frames have 802.11 header decapped and parts (base hdr, cipher + * param, llc/snap) are aligned to 4byte boundaries each */ +enum ath10k_hw_txrx_mode { + ATH10K_HW_TXRX_RAW = 0, + ATH10K_HW_TXRX_NATIVE_WIFI = 1, + ATH10K_HW_TXRX_ETHERNET = 2, +}; + +enum ath10k_mcast2ucast_mode { + ATH10K_MCAST2UCAST_DISABLED = 0, + ATH10K_MCAST2UCAST_ENABLED = 1, +}; + +#define TARGET_NUM_VDEVS 8 +#define TARGET_NUM_PEER_AST 2 +#define TARGET_NUM_WDS_ENTRIES 32 +#define TARGET_DMA_BURST_SIZE 0 +#define TARGET_MAC_AGGR_DELIM 0 +#define TARGET_AST_SKID_LIMIT 16 +#define TARGET_NUM_PEERS 16 +#define TARGET_NUM_OFFLOAD_PEERS 0 +#define TARGET_NUM_OFFLOAD_REORDER_BUFS 0 +#define TARGET_NUM_PEER_KEYS 2 +#define TARGET_NUM_TIDS (2 * ((TARGET_NUM_PEERS) + (TARGET_NUM_VDEVS))) +#define TARGET_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) +#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) +#define TARGET_RX_TIMEOUT_LO_PRI 100 +#define TARGET_RX_TIMEOUT_HI_PRI 40 +#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_ETHERNET +#define TARGET_SCAN_MAX_PENDING_REQS 4 +#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3 +#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3 +#define TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES 8 +#define TARGET_GTK_OFFLOAD_MAX_VDEV 3 +#define TARGET_NUM_MCAST_GROUPS 0 +#define TARGET_NUM_MCAST_TABLE_ELEMS 0 +#define TARGET_MCAST2UCAST_MODE ATH10K_MCAST2UCAST_DISABLED +#define TARGET_TX_DBG_LOG_SIZE 1024 +#define TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 0 +#define TARGET_VOW_CONFIG 0 +#define TARGET_NUM_MSDU_DESC (1024 + 400) +#define TARGET_MAX_FRAG_ENTRIES 0 + + +/* Number of Copy Engines supported */ +#define CE_COUNT 8 + +/* + * Total number of PCIe MSI interrupts requested for all interrupt sources. + * PCIe standard forces this to be a power of 2. + * Some Host OS's limit MSI requests that can be granted to 8 + * so for now we abide by this limit and avoid requesting more + * than that. + */ +#define MSI_NUM_REQUEST_LOG2 3 +#define MSI_NUM_REQUEST (1<<MSI_NUM_REQUEST_LOG2) + +/* + * Granted MSIs are assigned as follows: + * Firmware uses the first + * Remaining MSIs, if any, are used by Copy Engines + * This mapping is known to both Target firmware and Host software. + * It may be changed as long as Host and Target are kept in sync. + */ +/* MSI for firmware (errors, etc.) */ +#define MSI_ASSIGN_FW 0 + +/* MSIs for Copy Engines */ +#define MSI_ASSIGN_CE_INITIAL 1 +#define MSI_ASSIGN_CE_MAX 7 + +/* as of IP3.7.1 */ +#define RTC_STATE_V_ON 3 + +#define RTC_STATE_COLD_RESET_MASK 0x00000400 +#define RTC_STATE_V_LSB 0 +#define RTC_STATE_V_MASK 0x00000007 +#define RTC_STATE_ADDRESS 0x0000 +#define PCIE_SOC_WAKE_V_MASK 0x00000001 +#define PCIE_SOC_WAKE_ADDRESS 0x0004 +#define PCIE_SOC_WAKE_RESET 0x00000000 +#define SOC_GLOBAL_RESET_ADDRESS 0x0008 + +#define RTC_SOC_BASE_ADDRESS 0x00004000 +#define RTC_WMAC_BASE_ADDRESS 0x00005000 +#define MAC_COEX_BASE_ADDRESS 0x00006000 +#define BT_COEX_BASE_ADDRESS 0x00007000 +#define SOC_PCIE_BASE_ADDRESS 0x00008000 +#define SOC_CORE_BASE_ADDRESS 0x00009000 +#define WLAN_UART_BASE_ADDRESS 0x0000c000 +#define WLAN_SI_BASE_ADDRESS 0x00010000 +#define WLAN_GPIO_BASE_ADDRESS 0x00014000 +#define WLAN_ANALOG_INTF_BASE_ADDRESS 0x0001c000 +#define WLAN_MAC_BASE_ADDRESS 0x00020000 +#define EFUSE_BASE_ADDRESS 0x00030000 +#define FPGA_REG_BASE_ADDRESS 0x00039000 +#define WLAN_UART2_BASE_ADDRESS 0x00054c00 +#define CE_WRAPPER_BASE_ADDRESS 0x00057000 +#define CE0_BASE_ADDRESS 0x00057400 +#define CE1_BASE_ADDRESS 0x00057800 +#define CE2_BASE_ADDRESS 0x00057c00 +#define CE3_BASE_ADDRESS 0x00058000 +#define CE4_BASE_ADDRESS 0x00058400 +#define CE5_BASE_ADDRESS 0x00058800 +#define CE6_BASE_ADDRESS 0x00058c00 +#define CE7_BASE_ADDRESS 0x00059000 +#define DBI_BASE_ADDRESS 0x00060000 +#define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000 +#define PCIE_LOCAL_BASE_ADDRESS 0x00080000 + +#define SOC_RESET_CONTROL_OFFSET 0x00000000 +#define SOC_RESET_CONTROL_SI0_RST_MASK 0x00000001 +#define SOC_CPU_CLOCK_OFFSET 0x00000020 +#define SOC_CPU_CLOCK_STANDARD_LSB 0 +#define SOC_CPU_CLOCK_STANDARD_MASK 0x00000003 +#define SOC_CLOCK_CONTROL_OFFSET 0x00000028 +#define SOC_CLOCK_CONTROL_SI0_CLK_MASK 0x00000001 +#define SOC_SYSTEM_SLEEP_OFFSET 0x000000c4 +#define SOC_LPO_CAL_OFFSET 0x000000e0 +#define SOC_LPO_CAL_ENABLE_LSB 20 +#define SOC_LPO_CAL_ENABLE_MASK 0x00100000 + +#define WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008 +#define WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004 +#define WLAN_SYSTEM_SLEEP_DISABLE_LSB 0 +#define WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001 + +#define WLAN_GPIO_PIN0_ADDRESS 0x00000028 +#define WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800 +#define WLAN_GPIO_PIN1_ADDRESS 0x0000002c +#define WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800 +#define WLAN_GPIO_PIN10_ADDRESS 0x00000050 +#define WLAN_GPIO_PIN11_ADDRESS 0x00000054 +#define WLAN_GPIO_PIN12_ADDRESS 0x00000058 +#define WLAN_GPIO_PIN13_ADDRESS 0x0000005c + +#define CLOCK_GPIO_OFFSET 0xffffffff +#define CLOCK_GPIO_BT_CLK_OUT_EN_LSB 0 +#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0 + +#define SI_CONFIG_OFFSET 0x00000000 +#define SI_CONFIG_BIDIR_OD_DATA_LSB 18 +#define SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000 +#define SI_CONFIG_I2C_LSB 16 +#define SI_CONFIG_I2C_MASK 0x00010000 +#define SI_CONFIG_POS_SAMPLE_LSB 7 +#define SI_CONFIG_POS_SAMPLE_MASK 0x00000080 +#define SI_CONFIG_INACTIVE_DATA_LSB 5 +#define SI_CONFIG_INACTIVE_DATA_MASK 0x00000020 +#define SI_CONFIG_INACTIVE_CLK_LSB 4 +#define SI_CONFIG_INACTIVE_CLK_MASK 0x00000010 +#define SI_CONFIG_DIVIDER_LSB 0 +#define SI_CONFIG_DIVIDER_MASK 0x0000000f +#define SI_CS_OFFSET 0x00000004 +#define SI_CS_DONE_ERR_MASK 0x00000400 +#define SI_CS_DONE_INT_MASK 0x00000200 +#define SI_CS_START_LSB 8 +#define SI_CS_START_MASK 0x00000100 +#define SI_CS_RX_CNT_LSB 4 +#define SI_CS_RX_CNT_MASK 0x000000f0 +#define SI_CS_TX_CNT_LSB 0 +#define SI_CS_TX_CNT_MASK 0x0000000f + +#define SI_TX_DATA0_OFFSET 0x00000008 +#define SI_TX_DATA1_OFFSET 0x0000000c +#define SI_RX_DATA0_OFFSET 0x00000010 +#define SI_RX_DATA1_OFFSET 0x00000014 + +#define CORE_CTRL_CPU_INTR_MASK 0x00002000 +#define CORE_CTRL_ADDRESS 0x0000 +#define PCIE_INTR_ENABLE_ADDRESS 0x0008 +#define PCIE_INTR_CLR_ADDRESS 0x0014 +#define SCRATCH_3_ADDRESS 0x0030 + +/* Firmware indications to the Host via SCRATCH_3 register. */ +#define FW_INDICATOR_ADDRESS (SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS) +#define FW_IND_EVENT_PENDING 1 +#define FW_IND_INITIALIZED 2 + +/* HOST_REG interrupt from firmware */ +#define PCIE_INTR_FIRMWARE_MASK 0x00000400 +#define PCIE_INTR_CE_MASK_ALL 0x0007f800 + +#define DRAM_BASE_ADDRESS 0x00400000 + +#define MISSING 0 + +#define SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET +#define WLAN_SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET +#define WLAN_RESET_CONTROL_OFFSET SOC_RESET_CONTROL_OFFSET +#define CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_OFFSET +#define CLOCK_CONTROL_SI0_CLK_MASK SOC_CLOCK_CONTROL_SI0_CLK_MASK +#define RESET_CONTROL_MBOX_RST_MASK MISSING +#define RESET_CONTROL_SI0_RST_MASK SOC_RESET_CONTROL_SI0_RST_MASK +#define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS +#define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS +#define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS +#define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK +#define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK +#define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS +#define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS +#define LOCAL_SCRATCH_OFFSET 0x18 +#define CPU_CLOCK_OFFSET SOC_CPU_CLOCK_OFFSET +#define LPO_CAL_OFFSET SOC_LPO_CAL_OFFSET +#define GPIO_PIN10_OFFSET WLAN_GPIO_PIN10_ADDRESS +#define GPIO_PIN11_OFFSET WLAN_GPIO_PIN11_ADDRESS +#define GPIO_PIN12_OFFSET WLAN_GPIO_PIN12_ADDRESS +#define GPIO_PIN13_OFFSET WLAN_GPIO_PIN13_ADDRESS +#define CPU_CLOCK_STANDARD_LSB SOC_CPU_CLOCK_STANDARD_LSB +#define CPU_CLOCK_STANDARD_MASK SOC_CPU_CLOCK_STANDARD_MASK +#define LPO_CAL_ENABLE_LSB SOC_LPO_CAL_ENABLE_LSB +#define LPO_CAL_ENABLE_MASK SOC_LPO_CAL_ENABLE_MASK +#define ANALOG_INTF_BASE_ADDRESS WLAN_ANALOG_INTF_BASE_ADDRESS +#define MBOX_BASE_ADDRESS MISSING +#define INT_STATUS_ENABLE_ERROR_LSB MISSING +#define INT_STATUS_ENABLE_ERROR_MASK MISSING +#define INT_STATUS_ENABLE_CPU_LSB MISSING +#define INT_STATUS_ENABLE_CPU_MASK MISSING +#define INT_STATUS_ENABLE_COUNTER_LSB MISSING +#define INT_STATUS_ENABLE_COUNTER_MASK MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING +#define INT_STATUS_ENABLE_ADDRESS MISSING +#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING +#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING +#define HOST_INT_STATUS_ADDRESS MISSING +#define CPU_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_WAKEUP_MASK MISSING +#define ERROR_INT_STATUS_WAKEUP_LSB MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING +#define COUNT_DEC_ADDRESS MISSING +#define HOST_INT_STATUS_CPU_MASK MISSING +#define HOST_INT_STATUS_CPU_LSB MISSING +#define HOST_INT_STATUS_ERROR_MASK MISSING +#define HOST_INT_STATUS_ERROR_LSB MISSING +#define HOST_INT_STATUS_COUNTER_MASK MISSING +#define HOST_INT_STATUS_COUNTER_LSB MISSING +#define RX_LOOKAHEAD_VALID_ADDRESS MISSING +#define WINDOW_DATA_ADDRESS MISSING +#define WINDOW_READ_ADDR_ADDRESS MISSING +#define WINDOW_WRITE_ADDR_ADDRESS MISSING + +#define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB) + +#endif /* _HW_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c new file mode 100644 index 000000000000..3446c989d6a6 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -0,0 +1,3066 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "mac.h" + +#include <net/mac80211.h> +#include <linux/etherdevice.h> + +#include "core.h" +#include "debug.h" +#include "wmi.h" +#include "htt.h" +#include "txrx.h" + +/**********/ +/* Crypto */ +/**********/ + +static int ath10k_send_key(struct ath10k_vif *arvif, + struct ieee80211_key_conf *key, + enum set_key_cmd cmd, + const u8 *macaddr) +{ + struct wmi_vdev_install_key_arg arg = { + .vdev_id = arvif->vdev_id, + .key_idx = key->keyidx, + .key_len = key->keylen, + .key_data = key->key, + .macaddr = macaddr, + }; + + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) + arg.key_flags = WMI_KEY_PAIRWISE; + else + arg.key_flags = WMI_KEY_GROUP; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + arg.key_cipher = WMI_CIPHER_AES_CCM; + key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; + break; + case WLAN_CIPHER_SUITE_TKIP: + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + arg.key_cipher = WMI_CIPHER_TKIP; + arg.key_txmic_len = 8; + arg.key_rxmic_len = 8; + break; + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + arg.key_cipher = WMI_CIPHER_WEP; + /* AP/IBSS mode requires self-key to be groupwise + * Otherwise pairwise key must be set */ + if (memcmp(macaddr, arvif->vif->addr, ETH_ALEN)) + arg.key_flags = WMI_KEY_PAIRWISE; + break; + default: + ath10k_warn("cipher %d is not supported\n", key->cipher); + return -EOPNOTSUPP; + } + + if (cmd == DISABLE_KEY) { + arg.key_cipher = WMI_CIPHER_NONE; + arg.key_data = NULL; + } + + return ath10k_wmi_vdev_install_key(arvif->ar, &arg); +} + +static int ath10k_install_key(struct ath10k_vif *arvif, + struct ieee80211_key_conf *key, + enum set_key_cmd cmd, + const u8 *macaddr) +{ + struct ath10k *ar = arvif->ar; + int ret; + + INIT_COMPLETION(ar->install_key_done); + + ret = ath10k_send_key(arvif, key, cmd, macaddr); + if (ret) + return ret; + + ret = wait_for_completion_timeout(&ar->install_key_done, 3*HZ); + if (ret == 0) + return -ETIMEDOUT; + + return 0; +} + +static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif, + const u8 *addr) +{ + struct ath10k *ar = arvif->ar; + struct ath10k_peer *peer; + int ret; + int i; + + lockdep_assert_held(&ar->conf_mutex); + + spin_lock_bh(&ar->data_lock); + peer = ath10k_peer_find(ar, arvif->vdev_id, addr); + spin_unlock_bh(&ar->data_lock); + + if (!peer) + return -ENOENT; + + for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) { + if (arvif->wep_keys[i] == NULL) + continue; + + ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY, + addr); + if (ret) + return ret; + + peer->keys[i] = arvif->wep_keys[i]; + } + + return 0; +} + +static int ath10k_clear_peer_keys(struct ath10k_vif *arvif, + const u8 *addr) +{ + struct ath10k *ar = arvif->ar; + struct ath10k_peer *peer; + int first_errno = 0; + int ret; + int i; + + lockdep_assert_held(&ar->conf_mutex); + + spin_lock_bh(&ar->data_lock); + peer = ath10k_peer_find(ar, arvif->vdev_id, addr); + spin_unlock_bh(&ar->data_lock); + + if (!peer) + return -ENOENT; + + for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { + if (peer->keys[i] == NULL) + continue; + + ret = ath10k_install_key(arvif, peer->keys[i], + DISABLE_KEY, addr); + if (ret && first_errno == 0) + first_errno = ret; + + if (ret) + ath10k_warn("could not remove peer wep key %d (%d)\n", + i, ret); + + peer->keys[i] = NULL; + } + + return first_errno; +} + +static int ath10k_clear_vdev_key(struct ath10k_vif *arvif, + struct ieee80211_key_conf *key) +{ + struct ath10k *ar = arvif->ar; + struct ath10k_peer *peer; + u8 addr[ETH_ALEN]; + int first_errno = 0; + int ret; + int i; + + lockdep_assert_held(&ar->conf_mutex); + + for (;;) { + /* since ath10k_install_key we can't hold data_lock all the + * time, so we try to remove the keys incrementally */ + spin_lock_bh(&ar->data_lock); + i = 0; + list_for_each_entry(peer, &ar->peers, list) { + for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { + if (peer->keys[i] == key) { + memcpy(addr, peer->addr, ETH_ALEN); + peer->keys[i] = NULL; + break; + } + } + + if (i < ARRAY_SIZE(peer->keys)) + break; + } + spin_unlock_bh(&ar->data_lock); + + if (i == ARRAY_SIZE(peer->keys)) + break; + + ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr); + if (ret && first_errno == 0) + first_errno = ret; + + if (ret) + ath10k_warn("could not remove key for %pM\n", addr); + } + + return first_errno; +} + + +/*********************/ +/* General utilities */ +/*********************/ + +static inline enum wmi_phy_mode +chan_to_phymode(const struct cfg80211_chan_def *chandef) +{ + enum wmi_phy_mode phymode = MODE_UNKNOWN; + + switch (chandef->chan->band) { + case IEEE80211_BAND_2GHZ: + switch (chandef->width) { + case NL80211_CHAN_WIDTH_20_NOHT: + phymode = MODE_11G; + break; + case NL80211_CHAN_WIDTH_20: + phymode = MODE_11NG_HT20; + break; + case NL80211_CHAN_WIDTH_40: + phymode = MODE_11NG_HT40; + break; + case NL80211_CHAN_WIDTH_80: + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_160: + phymode = MODE_UNKNOWN; + break; + } + break; + case IEEE80211_BAND_5GHZ: + switch (chandef->width) { + case NL80211_CHAN_WIDTH_20_NOHT: + phymode = MODE_11A; + break; + case NL80211_CHAN_WIDTH_20: + phymode = MODE_11NA_HT20; + break; + case NL80211_CHAN_WIDTH_40: + phymode = MODE_11NA_HT40; + break; + case NL80211_CHAN_WIDTH_80: + phymode = MODE_11AC_VHT80; + break; + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_160: + phymode = MODE_UNKNOWN; + break; + } + break; + default: + break; + } + + WARN_ON(phymode == MODE_UNKNOWN); + return phymode; +} + +static u8 ath10k_parse_mpdudensity(u8 mpdudensity) +{ +/* + * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing": + * 0 for no restriction + * 1 for 1/4 us + * 2 for 1/2 us + * 3 for 1 us + * 4 for 2 us + * 5 for 4 us + * 6 for 8 us + * 7 for 16 us + */ + switch (mpdudensity) { + case 0: + return 0; + case 1: + case 2: + case 3: + /* Our lower layer calculations limit our precision to + 1 microsecond */ + return 1; + case 4: + return 2; + case 5: + return 4; + case 6: + return 8; + case 7: + return 16; + default: + return 0; + } +} + +static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr) +{ + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + ret = ath10k_wmi_peer_create(ar, vdev_id, addr); + if (ret) + return ret; + + ret = ath10k_wait_for_peer_created(ar, vdev_id, addr); + if (ret) + return ret; + + return 0; +} + +static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) +{ + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + ret = ath10k_wmi_peer_delete(ar, vdev_id, addr); + if (ret) + return ret; + + ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr); + if (ret) + return ret; + + return 0; +} + +static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) +{ + struct ath10k_peer *peer, *tmp; + + lockdep_assert_held(&ar->conf_mutex); + + spin_lock_bh(&ar->data_lock); + list_for_each_entry_safe(peer, tmp, &ar->peers, list) { + if (peer->vdev_id != vdev_id) + continue; + + ath10k_warn("removing stale peer %pM from vdev_id %d\n", + peer->addr, vdev_id); + + list_del(&peer->list); + kfree(peer); + } + spin_unlock_bh(&ar->data_lock); +} + +/************************/ +/* Interface management */ +/************************/ + +static inline int ath10k_vdev_setup_sync(struct ath10k *ar) +{ + int ret; + + ret = wait_for_completion_timeout(&ar->vdev_setup_done, + ATH10K_VDEV_SETUP_TIMEOUT_HZ); + if (ret == 0) + return -ETIMEDOUT; + + return 0; +} + +static int ath10k_vdev_start(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + struct ieee80211_conf *conf = &ar->hw->conf; + struct ieee80211_channel *channel = conf->chandef.chan; + struct wmi_vdev_start_request_arg arg = {}; + int ret = 0; + + lockdep_assert_held(&ar->conf_mutex); + + INIT_COMPLETION(ar->vdev_setup_done); + + arg.vdev_id = arvif->vdev_id; + arg.dtim_period = arvif->dtim_period; + arg.bcn_intval = arvif->beacon_interval; + + arg.channel.freq = channel->center_freq; + + arg.channel.band_center_freq1 = conf->chandef.center_freq1; + + arg.channel.mode = chan_to_phymode(&conf->chandef); + + arg.channel.min_power = channel->max_power * 3; + arg.channel.max_power = channel->max_power * 4; + arg.channel.max_reg_power = channel->max_reg_power * 4; + arg.channel.max_antenna_gain = channel->max_antenna_gain; + + if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { + arg.ssid = arvif->u.ap.ssid; + arg.ssid_len = arvif->u.ap.ssid_len; + arg.hidden_ssid = arvif->u.ap.hidden_ssid; + } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { + arg.ssid = arvif->vif->bss_conf.ssid; + arg.ssid_len = arvif->vif->bss_conf.ssid_len; + } + + ret = ath10k_wmi_vdev_start(ar, &arg); + if (ret) { + ath10k_warn("WMI vdev start failed: ret %d\n", ret); + return ret; + } + + ret = ath10k_vdev_setup_sync(ar); + if (ret) { + ath10k_warn("vdev setup failed %d\n", ret); + return ret; + } + + return ret; +} + +static int ath10k_vdev_stop(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + INIT_COMPLETION(ar->vdev_setup_done); + + ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id); + if (ret) { + ath10k_warn("WMI vdev stop failed: ret %d\n", ret); + return ret; + } + + ret = ath10k_vdev_setup_sync(ar); + if (ret) { + ath10k_warn("vdev setup failed %d\n", ret); + return ret; + } + + return ret; +} + +static int ath10k_monitor_start(struct ath10k *ar, int vdev_id) +{ + struct ieee80211_channel *channel = ar->hw->conf.chandef.chan; + struct wmi_vdev_start_request_arg arg = {}; + enum nl80211_channel_type type; + int ret = 0; + + lockdep_assert_held(&ar->conf_mutex); + + type = cfg80211_get_chandef_type(&ar->hw->conf.chandef); + + arg.vdev_id = vdev_id; + arg.channel.freq = channel->center_freq; + arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1; + + /* TODO setup this dynamically, what in case we + don't have any vifs? */ + arg.channel.mode = chan_to_phymode(&ar->hw->conf.chandef); + + arg.channel.min_power = channel->max_power * 3; + arg.channel.max_power = channel->max_power * 4; + arg.channel.max_reg_power = channel->max_reg_power * 4; + arg.channel.max_antenna_gain = channel->max_antenna_gain; + + ret = ath10k_wmi_vdev_start(ar, &arg); + if (ret) { + ath10k_warn("Monitor vdev start failed: ret %d\n", ret); + return ret; + } + + ret = ath10k_vdev_setup_sync(ar); + if (ret) { + ath10k_warn("Monitor vdev setup failed %d\n", ret); + return ret; + } + + ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); + if (ret) { + ath10k_warn("Monitor vdev up failed: %d\n", ret); + goto vdev_stop; + } + + ar->monitor_vdev_id = vdev_id; + ar->monitor_enabled = true; + + return 0; + +vdev_stop: + ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); + if (ret) + ath10k_warn("Monitor vdev stop failed: %d\n", ret); + + return ret; +} + +static int ath10k_monitor_stop(struct ath10k *ar) +{ + int ret = 0; + + lockdep_assert_held(&ar->conf_mutex); + + /* For some reasons, ath10k_wmi_vdev_down() here couse + * often ath10k_wmi_vdev_stop() to fail. Next we could + * not run monitor vdev and driver reload + * required. Don't see such problems we skip + * ath10k_wmi_vdev_down() here. + */ + + ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); + if (ret) + ath10k_warn("Monitor vdev stop failed: %d\n", ret); + + ret = ath10k_vdev_setup_sync(ar); + if (ret) + ath10k_warn("Monitor_down sync failed: %d\n", ret); + + ar->monitor_enabled = false; + return ret; +} + +static int ath10k_monitor_create(struct ath10k *ar) +{ + int bit, ret = 0; + + lockdep_assert_held(&ar->conf_mutex); + + if (ar->monitor_present) { + ath10k_warn("Monitor mode already enabled\n"); + return 0; + } + + bit = ffs(ar->free_vdev_map); + if (bit == 0) { + ath10k_warn("No free VDEV slots\n"); + return -ENOMEM; + } + + ar->monitor_vdev_id = bit - 1; + ar->free_vdev_map &= ~(1 << ar->monitor_vdev_id); + + ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id, + WMI_VDEV_TYPE_MONITOR, + 0, ar->mac_addr); + if (ret) { + ath10k_warn("WMI vdev monitor create failed: ret %d\n", ret); + goto vdev_fail; + } + + ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface created, vdev id: %d\n", + ar->monitor_vdev_id); + + ar->monitor_present = true; + return 0; + +vdev_fail: + /* + * Restore the ID to the global map. + */ + ar->free_vdev_map |= 1 << (ar->monitor_vdev_id); + return ret; +} + +static int ath10k_monitor_destroy(struct ath10k *ar) +{ + int ret = 0; + + lockdep_assert_held(&ar->conf_mutex); + + if (!ar->monitor_present) + return 0; + + ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id); + if (ret) { + ath10k_warn("WMI vdev monitor delete failed: %d\n", ret); + return ret; + } + + ar->free_vdev_map |= 1 << (ar->monitor_vdev_id); + ar->monitor_present = false; + + ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface destroyed, vdev id: %d\n", + ar->monitor_vdev_id); + return ret; +} + +static void ath10k_control_beaconing(struct ath10k_vif *arvif, + struct ieee80211_bss_conf *info) +{ + int ret = 0; + + if (!info->enable_beacon) { + ath10k_vdev_stop(arvif); + return; + } + + arvif->tx_seq_no = 0x1000; + + ret = ath10k_vdev_start(arvif); + if (ret) + return; + + ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, 0, info->bssid); + if (ret) { + ath10k_warn("Failed to bring up VDEV: %d\n", + arvif->vdev_id); + return; + } + ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d up\n", arvif->vdev_id); +} + +static void ath10k_control_ibss(struct ath10k_vif *arvif, + struct ieee80211_bss_conf *info, + const u8 self_peer[ETH_ALEN]) +{ + int ret = 0; + + if (!info->ibss_joined) { + ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer); + if (ret) + ath10k_warn("Failed to delete IBSS self peer:%pM for VDEV:%d ret:%d\n", + self_peer, arvif->vdev_id, ret); + + if (is_zero_ether_addr(arvif->u.ibss.bssid)) + return; + + ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, + arvif->u.ibss.bssid); + if (ret) { + ath10k_warn("Failed to delete IBSS BSSID peer:%pM for VDEV:%d ret:%d\n", + arvif->u.ibss.bssid, arvif->vdev_id, ret); + return; + } + + memset(arvif->u.ibss.bssid, 0, ETH_ALEN); + + return; + } + + ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer); + if (ret) { + ath10k_warn("Failed to create IBSS self peer:%pM for VDEV:%d ret:%d\n", + self_peer, arvif->vdev_id, ret); + return; + } + + ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, + WMI_VDEV_PARAM_ATIM_WINDOW, + ATH10K_DEFAULT_ATIM); + if (ret) + ath10k_warn("Failed to set IBSS ATIM for VDEV:%d ret:%d\n", + arvif->vdev_id, ret); +} + +/* + * Review this when mac80211 gains per-interface powersave support. + */ +static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif) +{ + struct ath10k_generic_iter *ar_iter = data; + struct ieee80211_conf *conf = &ar_iter->ar->hw->conf; + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + enum wmi_sta_powersave_param param; + enum wmi_sta_ps_mode psmode; + int ret; + + if (vif->type != NL80211_IFTYPE_STATION) + return; + + if (conf->flags & IEEE80211_CONF_PS) { + psmode = WMI_STA_PS_MODE_ENABLED; + param = WMI_STA_PS_PARAM_INACTIVITY_TIME; + + ret = ath10k_wmi_set_sta_ps_param(ar_iter->ar, + arvif->vdev_id, + param, + conf->dynamic_ps_timeout); + if (ret) { + ath10k_warn("Failed to set inactivity time for VDEV: %d\n", + arvif->vdev_id); + return; + } + + ar_iter->ret = ret; + } else { + psmode = WMI_STA_PS_MODE_DISABLED; + } + + ar_iter->ret = ath10k_wmi_set_psmode(ar_iter->ar, arvif->vdev_id, + psmode); + if (ar_iter->ret) + ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n", + psmode, arvif->vdev_id); + else + ath10k_dbg(ATH10K_DBG_MAC, "Set PS Mode: %d for VDEV: %d\n", + psmode, arvif->vdev_id); +} + +/**********************/ +/* Station management */ +/**********************/ + +static void ath10k_peer_assoc_h_basic(struct ath10k *ar, + struct ath10k_vif *arvif, + struct ieee80211_sta *sta, + struct ieee80211_bss_conf *bss_conf, + struct wmi_peer_assoc_complete_arg *arg) +{ + memcpy(arg->addr, sta->addr, ETH_ALEN); + arg->vdev_id = arvif->vdev_id; + arg->peer_aid = sta->aid; + arg->peer_flags |= WMI_PEER_AUTH; + + if (arvif->vdev_type == WMI_VDEV_TYPE_STA) + /* + * Seems FW have problems with Power Save in STA + * mode when we setup this parameter to high (eg. 5). + * Often we see that FW don't send NULL (with clean P flags) + * frame even there is info about buffered frames in beacons. + * Sometimes we have to wait more than 10 seconds before FW + * will wakeup. Often sending one ping from AP to our device + * just fail (more than 50%). + * + * Seems setting this FW parameter to 1 couse FW + * will check every beacon and will wakup immediately + * after detection buffered data. + */ + arg->peer_listen_intval = 1; + else + arg->peer_listen_intval = ar->hw->conf.listen_interval; + + arg->peer_num_spatial_streams = 1; + + /* + * The assoc capabilities are available only in managed mode. + */ + if (arvif->vdev_type == WMI_VDEV_TYPE_STA && bss_conf) + arg->peer_caps = bss_conf->assoc_capability; +} + +static void ath10k_peer_assoc_h_crypto(struct ath10k *ar, + struct ath10k_vif *arvif, + struct wmi_peer_assoc_complete_arg *arg) +{ + struct ieee80211_vif *vif = arvif->vif; + struct ieee80211_bss_conf *info = &vif->bss_conf; + struct cfg80211_bss *bss; + const u8 *rsnie = NULL; + const u8 *wpaie = NULL; + + bss = cfg80211_get_bss(ar->hw->wiphy, ar->hw->conf.chandef.chan, + info->bssid, NULL, 0, 0, 0); + if (bss) { + const struct cfg80211_bss_ies *ies; + + rcu_read_lock(); + rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN); + + ies = rcu_dereference(bss->ies); + + wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, + WLAN_OUI_TYPE_MICROSOFT_WPA, + ies->data, + ies->len); + rcu_read_unlock(); + cfg80211_put_bss(ar->hw->wiphy, bss); + } + + /* FIXME: base on RSN IE/WPA IE is a correct idea? */ + if (rsnie || wpaie) { + ath10k_dbg(ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__); + arg->peer_flags |= WMI_PEER_NEED_PTK_4_WAY; + } + + if (wpaie) { + ath10k_dbg(ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__); + arg->peer_flags |= WMI_PEER_NEED_GTK_2_WAY; + } +} + +static void ath10k_peer_assoc_h_rates(struct ath10k *ar, + struct ieee80211_sta *sta, + struct wmi_peer_assoc_complete_arg *arg) +{ + struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; + const struct ieee80211_supported_band *sband; + const struct ieee80211_rate *rates; + u32 ratemask; + int i; + + sband = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band]; + ratemask = sta->supp_rates[ar->hw->conf.chandef.chan->band]; + rates = sband->bitrates; + + rateset->num_rates = 0; + + for (i = 0; i < 32; i++, ratemask >>= 1, rates++) { + if (!(ratemask & 1)) + continue; + + rateset->rates[rateset->num_rates] = rates->hw_value; + rateset->num_rates++; + } +} + +static void ath10k_peer_assoc_h_ht(struct ath10k *ar, + struct ieee80211_sta *sta, + struct wmi_peer_assoc_complete_arg *arg) +{ + const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + int smps; + int i, n; + + if (!ht_cap->ht_supported) + return; + + arg->peer_flags |= WMI_PEER_HT; + arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + + ht_cap->ampdu_factor)) - 1; + + arg->peer_mpdu_density = + ath10k_parse_mpdudensity(ht_cap->ampdu_density); + + arg->peer_ht_caps = ht_cap->cap; + arg->peer_rate_caps |= WMI_RC_HT_FLAG; + + if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) + arg->peer_flags |= WMI_PEER_LDPC; + + if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) { + arg->peer_flags |= WMI_PEER_40MHZ; + arg->peer_rate_caps |= WMI_RC_CW40_FLAG; + } + + if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) + arg->peer_rate_caps |= WMI_RC_SGI_FLAG; + + if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) + arg->peer_rate_caps |= WMI_RC_SGI_FLAG; + + if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) { + arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG; + arg->peer_flags |= WMI_PEER_STBC; + } + + if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) { + u32 stbc; + stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC; + stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT; + stbc = stbc << WMI_RC_RX_STBC_FLAG_S; + arg->peer_rate_caps |= stbc; + arg->peer_flags |= WMI_PEER_STBC; + } + + smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; + smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; + + if (smps == WLAN_HT_CAP_SM_PS_STATIC) { + arg->peer_flags |= WMI_PEER_SPATIAL_MUX; + arg->peer_flags |= WMI_PEER_STATIC_MIMOPS; + } else if (smps == WLAN_HT_CAP_SM_PS_DYNAMIC) { + arg->peer_flags |= WMI_PEER_SPATIAL_MUX; + arg->peer_flags |= WMI_PEER_DYN_MIMOPS; + } + + if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2]) + arg->peer_rate_caps |= WMI_RC_TS_FLAG; + else if (ht_cap->mcs.rx_mask[1]) + arg->peer_rate_caps |= WMI_RC_DS_FLAG; + + for (i = 0, n = 0; i < IEEE80211_HT_MCS_MASK_LEN*8; i++) + if (ht_cap->mcs.rx_mask[i/8] & (1 << i%8)) + arg->peer_ht_rates.rates[n++] = i; + + arg->peer_ht_rates.num_rates = n; + arg->peer_num_spatial_streams = max((n+7) / 8, 1); + + ath10k_dbg(ATH10K_DBG_MAC, "mcs cnt %d nss %d\n", + arg->peer_ht_rates.num_rates, + arg->peer_num_spatial_streams); +} + +static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar, + struct ath10k_vif *arvif, + struct ieee80211_sta *sta, + struct ieee80211_bss_conf *bss_conf, + struct wmi_peer_assoc_complete_arg *arg) +{ + u32 uapsd = 0; + u32 max_sp = 0; + + if (sta->wme) + arg->peer_flags |= WMI_PEER_QOS; + + if (sta->wme && sta->uapsd_queues) { + ath10k_dbg(ATH10K_DBG_MAC, "uapsd_queues: 0x%X, max_sp: %d\n", + sta->uapsd_queues, sta->max_sp); + + arg->peer_flags |= WMI_PEER_APSD; + arg->peer_flags |= WMI_RC_UAPSD_FLAG; + + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) + uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN | + WMI_AP_PS_UAPSD_AC3_TRIGGER_EN; + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) + uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN | + WMI_AP_PS_UAPSD_AC2_TRIGGER_EN; + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) + uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN | + WMI_AP_PS_UAPSD_AC1_TRIGGER_EN; + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) + uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN | + WMI_AP_PS_UAPSD_AC0_TRIGGER_EN; + + + if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP) + max_sp = sta->max_sp; + + ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, + sta->addr, + WMI_AP_PS_PEER_PARAM_UAPSD, + uapsd); + + ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, + sta->addr, + WMI_AP_PS_PEER_PARAM_MAX_SP, + max_sp); + + /* TODO setup this based on STA listen interval and + beacon interval. Currently we don't know + sta->listen_interval - mac80211 patch required. + Currently use 10 seconds */ + ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, + sta->addr, + WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, + 10); + } +} + +static void ath10k_peer_assoc_h_qos_sta(struct ath10k *ar, + struct ath10k_vif *arvif, + struct ieee80211_sta *sta, + struct ieee80211_bss_conf *bss_conf, + struct wmi_peer_assoc_complete_arg *arg) +{ + if (bss_conf->qos) + arg->peer_flags |= WMI_PEER_QOS; +} + +static void ath10k_peer_assoc_h_vht(struct ath10k *ar, + struct ieee80211_sta *sta, + struct wmi_peer_assoc_complete_arg *arg) +{ + const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; + + if (!vht_cap->vht_supported) + return; + + arg->peer_flags |= WMI_PEER_VHT; + + arg->peer_vht_caps = vht_cap->cap; + + if (sta->bandwidth == IEEE80211_STA_RX_BW_80) + arg->peer_flags |= WMI_PEER_80MHZ; + + arg->peer_vht_rates.rx_max_rate = + __le16_to_cpu(vht_cap->vht_mcs.rx_highest); + arg->peer_vht_rates.rx_mcs_set = + __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); + arg->peer_vht_rates.tx_max_rate = + __le16_to_cpu(vht_cap->vht_mcs.tx_highest); + arg->peer_vht_rates.tx_mcs_set = + __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map); + + ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer\n"); +} + +static void ath10k_peer_assoc_h_qos(struct ath10k *ar, + struct ath10k_vif *arvif, + struct ieee80211_sta *sta, + struct ieee80211_bss_conf *bss_conf, + struct wmi_peer_assoc_complete_arg *arg) +{ + switch (arvif->vdev_type) { + case WMI_VDEV_TYPE_AP: + ath10k_peer_assoc_h_qos_ap(ar, arvif, sta, bss_conf, arg); + break; + case WMI_VDEV_TYPE_STA: + ath10k_peer_assoc_h_qos_sta(ar, arvif, sta, bss_conf, arg); + break; + default: + break; + } +} + +static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, + struct ath10k_vif *arvif, + struct ieee80211_sta *sta, + struct wmi_peer_assoc_complete_arg *arg) +{ + enum wmi_phy_mode phymode = MODE_UNKNOWN; + + /* FIXME: add VHT */ + + switch (ar->hw->conf.chandef.chan->band) { + case IEEE80211_BAND_2GHZ: + if (sta->ht_cap.ht_supported) { + if (sta->bandwidth == IEEE80211_STA_RX_BW_40) + phymode = MODE_11NG_HT40; + else + phymode = MODE_11NG_HT20; + } else { + phymode = MODE_11G; + } + + break; + case IEEE80211_BAND_5GHZ: + if (sta->ht_cap.ht_supported) { + if (sta->bandwidth == IEEE80211_STA_RX_BW_40) + phymode = MODE_11NA_HT40; + else + phymode = MODE_11NA_HT20; + } else { + phymode = MODE_11A; + } + + break; + default: + break; + } + + arg->peer_phymode = phymode; + WARN_ON(phymode == MODE_UNKNOWN); +} + +static int ath10k_peer_assoc(struct ath10k *ar, + struct ath10k_vif *arvif, + struct ieee80211_sta *sta, + struct ieee80211_bss_conf *bss_conf) +{ + struct wmi_peer_assoc_complete_arg arg; + + memset(&arg, 0, sizeof(struct wmi_peer_assoc_complete_arg)); + + ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, &arg); + ath10k_peer_assoc_h_crypto(ar, arvif, &arg); + ath10k_peer_assoc_h_rates(ar, sta, &arg); + ath10k_peer_assoc_h_ht(ar, sta, &arg); + ath10k_peer_assoc_h_vht(ar, sta, &arg); + ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, &arg); + ath10k_peer_assoc_h_phymode(ar, arvif, sta, &arg); + + return ath10k_wmi_peer_assoc(ar, &arg); +} + +/* can be called only in mac80211 callbacks due to `key_count` usage */ +static void ath10k_bss_assoc(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf) +{ + struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ieee80211_sta *ap_sta; + int ret; + + rcu_read_lock(); + + ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); + if (!ap_sta) { + ath10k_warn("Failed to find station entry for %pM\n", + bss_conf->bssid); + rcu_read_unlock(); + return; + } + + ret = ath10k_peer_assoc(ar, arvif, ap_sta, bss_conf); + if (ret) { + ath10k_warn("Peer assoc failed for %pM\n", bss_conf->bssid); + rcu_read_unlock(); + return; + } + + rcu_read_unlock(); + + ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, bss_conf->aid, + bss_conf->bssid); + if (ret) + ath10k_warn("VDEV: %d up failed: ret %d\n", + arvif->vdev_id, ret); + else + ath10k_dbg(ATH10K_DBG_MAC, + "VDEV: %d associated, BSSID: %pM, AID: %d\n", + arvif->vdev_id, bss_conf->bssid, bss_conf->aid); +} + +/* + * FIXME: flush TIDs + */ +static void ath10k_bss_disassoc(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + int ret; + + /* + * For some reason, calling VDEV-DOWN before VDEV-STOP + * makes the FW to send frames via HTT after disassociation. + * No idea why this happens, even though VDEV-DOWN is supposed + * to be analogous to link down, so just stop the VDEV. + */ + ret = ath10k_vdev_stop(arvif); + if (!ret) + ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d stopped\n", + arvif->vdev_id); + + /* + * If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and + * report beacons from previously associated network through HTT. + * This in turn would spam mac80211 WARN_ON if we bring down all + * interfaces as it expects there is no rx when no interface is + * running. + */ + ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); + if (ret) + ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d ath10k_wmi_vdev_down failed (%d)\n", + arvif->vdev_id, ret); + + ath10k_wmi_flush_tx(ar); + + arvif->def_wep_key_index = 0; +} + +static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif, + struct ieee80211_sta *sta) +{ + int ret = 0; + + ret = ath10k_peer_assoc(ar, arvif, sta, NULL); + if (ret) { + ath10k_warn("WMI peer assoc failed for %pM\n", sta->addr); + return ret; + } + + ret = ath10k_install_peer_wep_keys(arvif, sta->addr); + if (ret) { + ath10k_warn("could not install peer wep keys (%d)\n", ret); + return ret; + } + + return ret; +} + +static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif, + struct ieee80211_sta *sta) +{ + int ret = 0; + + ret = ath10k_clear_peer_keys(arvif, sta->addr); + if (ret) { + ath10k_warn("could not clear all peer wep keys (%d)\n", ret); + return ret; + } + + return ret; +} + +/**************/ +/* Regulatory */ +/**************/ + +static int ath10k_update_channel_list(struct ath10k *ar) +{ + struct ieee80211_hw *hw = ar->hw; + struct ieee80211_supported_band **bands; + enum ieee80211_band band; + struct ieee80211_channel *channel; + struct wmi_scan_chan_list_arg arg = {0}; + struct wmi_channel_arg *ch; + bool passive; + int len; + int ret; + int i; + + bands = hw->wiphy->bands; + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + if (!bands[band]) + continue; + + for (i = 0; i < bands[band]->n_channels; i++) { + if (bands[band]->channels[i].flags & + IEEE80211_CHAN_DISABLED) + continue; + + arg.n_channels++; + } + } + + len = sizeof(struct wmi_channel_arg) * arg.n_channels; + arg.channels = kzalloc(len, GFP_KERNEL); + if (!arg.channels) + return -ENOMEM; + + ch = arg.channels; + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + if (!bands[band]) + continue; + + for (i = 0; i < bands[band]->n_channels; i++) { + channel = &bands[band]->channels[i]; + + if (channel->flags & IEEE80211_CHAN_DISABLED) + continue; + + ch->allow_ht = true; + + /* FIXME: when should we really allow VHT? */ + ch->allow_vht = true; + + ch->allow_ibss = + !(channel->flags & IEEE80211_CHAN_NO_IBSS); + + ch->ht40plus = + !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS); + + passive = channel->flags & IEEE80211_CHAN_PASSIVE_SCAN; + ch->passive = passive; + + ch->freq = channel->center_freq; + ch->min_power = channel->max_power * 3; + ch->max_power = channel->max_power * 4; + ch->max_reg_power = channel->max_reg_power * 4; + ch->max_antenna_gain = channel->max_antenna_gain; + ch->reg_class_id = 0; /* FIXME */ + + /* FIXME: why use only legacy modes, why not any + * HT/VHT modes? Would that even make any + * difference? */ + if (channel->band == IEEE80211_BAND_2GHZ) + ch->mode = MODE_11G; + else + ch->mode = MODE_11A; + + if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN)) + continue; + + ath10k_dbg(ATH10K_DBG_WMI, + "%s: [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n", + __func__, ch - arg.channels, arg.n_channels, + ch->freq, ch->max_power, ch->max_reg_power, + ch->max_antenna_gain, ch->mode); + + ch++; + } + } + + ret = ath10k_wmi_scan_chan_list(ar, &arg); + kfree(arg.channels); + + return ret; +} + +static void ath10k_reg_notifier(struct wiphy *wiphy, + struct regulatory_request *request) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct reg_dmn_pair_mapping *regpair; + struct ath10k *ar = hw->priv; + int ret; + + ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); + + ret = ath10k_update_channel_list(ar); + if (ret) + ath10k_warn("could not update channel list (%d)\n", ret); + + regpair = ar->ath_common.regulatory.regpair; + /* Target allows setting up per-band regdomain but ath_common provides + * a combined one only */ + ret = ath10k_wmi_pdev_set_regdomain(ar, + regpair->regDmnEnum, + regpair->regDmnEnum, /* 2ghz */ + regpair->regDmnEnum, /* 5ghz */ + regpair->reg_2ghz_ctl, + regpair->reg_5ghz_ctl); + if (ret) + ath10k_warn("could not set pdev regdomain (%d)\n", ret); +} + +/***************/ +/* TX handlers */ +/***************/ + +/* + * Frames sent to the FW have to be in "Native Wifi" format. + * Strip the QoS field from the 802.11 header. + */ +static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + u8 *qos_ctl; + + if (!ieee80211_is_data_qos(hdr->frame_control)) + return; + + qos_ctl = ieee80211_get_qos_ctl(hdr); + memmove(qos_ctl, qos_ctl + IEEE80211_QOS_CTL_LEN, + skb->len - ieee80211_hdrlen(hdr->frame_control)); + skb_trim(skb, skb->len - IEEE80211_QOS_CTL_LEN); +} + +static void ath10k_tx_h_update_wep_key(struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_vif *vif = info->control.vif; + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k *ar = arvif->ar; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_key_conf *key = info->control.hw_key; + int ret; + + /* TODO AP mode should be implemented */ + if (vif->type != NL80211_IFTYPE_STATION) + return; + + if (!ieee80211_has_protected(hdr->frame_control)) + return; + + if (!key) + return; + + if (key->cipher != WLAN_CIPHER_SUITE_WEP40 && + key->cipher != WLAN_CIPHER_SUITE_WEP104) + return; + + if (key->keyidx == arvif->def_wep_key_index) + return; + + ath10k_dbg(ATH10K_DBG_MAC, "new wep keyidx will be %d\n", key->keyidx); + + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, + WMI_VDEV_PARAM_DEF_KEYID, + key->keyidx); + if (ret) { + ath10k_warn("could not update wep keyidx (%d)\n", ret); + return; + } + + arvif->def_wep_key_index = key->keyidx; +} + +static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_vif *vif = info->control.vif; + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + + /* This is case only for P2P_GO */ + if (arvif->vdev_type != WMI_VDEV_TYPE_AP || + arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) + return; + + if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) { + spin_lock_bh(&ar->data_lock); + if (arvif->u.ap.noa_data) + if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len, + GFP_ATOMIC)) + memcpy(skb_put(skb, arvif->u.ap.noa_len), + arvif->u.ap.noa_data, + arvif->u.ap.noa_len); + spin_unlock_bh(&ar->data_lock); + } +} + +static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int ret; + + if (ieee80211_is_mgmt(hdr->frame_control)) + ret = ath10k_htt_mgmt_tx(ar->htt, skb); + else if (ieee80211_is_nullfunc(hdr->frame_control)) + /* FW does not report tx status properly for NullFunc frames + * unless they are sent through mgmt tx path. mac80211 sends + * those frames when it detects link/beacon loss and depends on + * the tx status to be correct. */ + ret = ath10k_htt_mgmt_tx(ar->htt, skb); + else + ret = ath10k_htt_tx(ar->htt, skb); + + if (ret) { + ath10k_warn("tx failed (%d). dropping packet.\n", ret); + ieee80211_free_txskb(ar->hw, skb); + } +} + +void ath10k_offchan_tx_purge(struct ath10k *ar) +{ + struct sk_buff *skb; + + for (;;) { + skb = skb_dequeue(&ar->offchan_tx_queue); + if (!skb) + break; + + ieee80211_free_txskb(ar->hw, skb); + } +} + +void ath10k_offchan_tx_work(struct work_struct *work) +{ + struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work); + struct ath10k_peer *peer; + struct ieee80211_hdr *hdr; + struct sk_buff *skb; + const u8 *peer_addr; + int vdev_id; + int ret; + + /* FW requirement: We must create a peer before FW will send out + * an offchannel frame. Otherwise the frame will be stuck and + * never transmitted. We delete the peer upon tx completion. + * It is unlikely that a peer for offchannel tx will already be + * present. However it may be in some rare cases so account for that. + * Otherwise we might remove a legitimate peer and break stuff. */ + + for (;;) { + skb = skb_dequeue(&ar->offchan_tx_queue); + if (!skb) + break; + + mutex_lock(&ar->conf_mutex); + + ath10k_dbg(ATH10K_DBG_MAC, "processing offchannel skb %p\n", + skb); + + hdr = (struct ieee80211_hdr *)skb->data; + peer_addr = ieee80211_get_DA(hdr); + vdev_id = ATH10K_SKB_CB(skb)->htt.vdev_id; + + spin_lock_bh(&ar->data_lock); + peer = ath10k_peer_find(ar, vdev_id, peer_addr); + spin_unlock_bh(&ar->data_lock); + + if (peer) + ath10k_dbg(ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n", + peer_addr, vdev_id); + + if (!peer) { + ret = ath10k_peer_create(ar, vdev_id, peer_addr); + if (ret) + ath10k_warn("peer %pM on vdev %d not created (%d)\n", + peer_addr, vdev_id, ret); + } + + spin_lock_bh(&ar->data_lock); + INIT_COMPLETION(ar->offchan_tx_completed); + ar->offchan_tx_skb = skb; + spin_unlock_bh(&ar->data_lock); + + ath10k_tx_htt(ar, skb); + + ret = wait_for_completion_timeout(&ar->offchan_tx_completed, + 3 * HZ); + if (ret <= 0) + ath10k_warn("timed out waiting for offchannel skb %p\n", + skb); + + if (!peer) { + ret = ath10k_peer_delete(ar, vdev_id, peer_addr); + if (ret) + ath10k_warn("peer %pM on vdev %d not deleted (%d)\n", + peer_addr, vdev_id, ret); + } + + mutex_unlock(&ar->conf_mutex); + } +} + +/************/ +/* Scanning */ +/************/ + +/* + * This gets called if we dont get a heart-beat during scan. + * This may indicate the FW has hung and we need to abort the + * scan manually to prevent cancel_hw_scan() from deadlocking + */ +void ath10k_reset_scan(unsigned long ptr) +{ + struct ath10k *ar = (struct ath10k *)ptr; + + spin_lock_bh(&ar->data_lock); + if (!ar->scan.in_progress) { + spin_unlock_bh(&ar->data_lock); + return; + } + + ath10k_warn("scan timeout. resetting. fw issue?\n"); + + if (ar->scan.is_roc) + ieee80211_remain_on_channel_expired(ar->hw); + else + ieee80211_scan_completed(ar->hw, 1 /* aborted */); + + ar->scan.in_progress = false; + complete_all(&ar->scan.completed); + spin_unlock_bh(&ar->data_lock); +} + +static int ath10k_abort_scan(struct ath10k *ar) +{ + struct wmi_stop_scan_arg arg = { + .req_id = 1, /* FIXME */ + .req_type = WMI_SCAN_STOP_ONE, + .u.scan_id = ATH10K_SCAN_ID, + }; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + del_timer_sync(&ar->scan.timeout); + + spin_lock_bh(&ar->data_lock); + if (!ar->scan.in_progress) { + spin_unlock_bh(&ar->data_lock); + return 0; + } + + ar->scan.aborting = true; + spin_unlock_bh(&ar->data_lock); + + ret = ath10k_wmi_stop_scan(ar, &arg); + if (ret) { + ath10k_warn("could not submit wmi stop scan (%d)\n", ret); + return -EIO; + } + + ath10k_wmi_flush_tx(ar); + + ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ); + if (ret == 0) + ath10k_warn("timed out while waiting for scan to stop\n"); + + /* scan completion may be done right after we timeout here, so let's + * check the in_progress and tell mac80211 scan is completed. if we + * don't do that and FW fails to send us scan completion indication + * then userspace won't be able to scan anymore */ + ret = 0; + + spin_lock_bh(&ar->data_lock); + if (ar->scan.in_progress) { + ath10k_warn("could not stop scan. its still in progress\n"); + ar->scan.in_progress = false; + ath10k_offchan_tx_purge(ar); + ret = -ETIMEDOUT; + } + spin_unlock_bh(&ar->data_lock); + + return ret; +} + +static int ath10k_start_scan(struct ath10k *ar, + const struct wmi_start_scan_arg *arg) +{ + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + ret = ath10k_wmi_start_scan(ar, arg); + if (ret) + return ret; + + /* make sure we submit the command so the completion + * timeout makes sense */ + ath10k_wmi_flush_tx(ar); + + ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ); + if (ret == 0) { + ath10k_abort_scan(ar); + return ret; + } + + /* the scan can complete earlier, before we even + * start the timer. in that case the timer handler + * checks ar->scan.in_progress and bails out if its + * false. Add a 200ms margin to account event/command + * processing. */ + mod_timer(&ar->scan.timeout, jiffies + + msecs_to_jiffies(arg->max_scan_time+200)); + return 0; +} + +/**********************/ +/* mac80211 callbacks */ +/**********************/ + +static void ath10k_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = NULL; + u32 vdev_id = 0; + u8 tid; + + if (info->control.vif) { + arvif = ath10k_vif_to_arvif(info->control.vif); + vdev_id = arvif->vdev_id; + } else if (ar->monitor_enabled) { + vdev_id = ar->monitor_vdev_id; + } + + /* We should disable CCK RATE due to P2P */ + if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE) + ath10k_dbg(ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n"); + + /* we must calculate tid before we apply qos workaround + * as we'd lose the qos control field */ + tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; + if (ieee80211_is_data_qos(hdr->frame_control) && + is_unicast_ether_addr(ieee80211_get_DA(hdr))) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; + } + + ath10k_tx_h_qos_workaround(hw, control, skb); + ath10k_tx_h_update_wep_key(skb); + ath10k_tx_h_add_p2p_noa_ie(ar, skb); + ath10k_tx_h_seq_no(skb); + + memset(ATH10K_SKB_CB(skb), 0, sizeof(*ATH10K_SKB_CB(skb))); + ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id; + ATH10K_SKB_CB(skb)->htt.tid = tid; + + if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { + spin_lock_bh(&ar->data_lock); + ATH10K_SKB_CB(skb)->htt.is_offchan = true; + ATH10K_SKB_CB(skb)->htt.vdev_id = ar->scan.vdev_id; + spin_unlock_bh(&ar->data_lock); + + ath10k_dbg(ATH10K_DBG_MAC, "queued offchannel skb %p\n", skb); + + skb_queue_tail(&ar->offchan_tx_queue, skb); + ieee80211_queue_work(hw, &ar->offchan_tx_work); + return; + } + + ath10k_tx_htt(ar, skb); +} + +/* + * Initialize various parameters with default vaules. + */ +static int ath10k_start(struct ieee80211_hw *hw) +{ + struct ath10k *ar = hw->priv; + int ret; + + ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1); + if (ret) + ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n", + ret); + + ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 0); + if (ret) + ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n", + ret); + + return 0; +} + +static void ath10k_stop(struct ieee80211_hw *hw) +{ + struct ath10k *ar = hw->priv; + + /* avoid leaks in case FW never confirms scan for offchannel */ + cancel_work_sync(&ar->offchan_tx_work); + ath10k_offchan_tx_purge(ar); +} + +static int ath10k_config(struct ieee80211_hw *hw, u32 changed) +{ + struct ath10k_generic_iter ar_iter; + struct ath10k *ar = hw->priv; + struct ieee80211_conf *conf = &hw->conf; + int ret = 0; + u32 flags; + + mutex_lock(&ar->conf_mutex); + + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { + ath10k_dbg(ATH10K_DBG_MAC, "Config channel %d mhz\n", + conf->chandef.chan->center_freq); + spin_lock_bh(&ar->data_lock); + ar->rx_channel = conf->chandef.chan; + spin_unlock_bh(&ar->data_lock); + } + + if (changed & IEEE80211_CONF_CHANGE_PS) { + memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter)); + ar_iter.ar = ar; + flags = IEEE80211_IFACE_ITER_RESUME_ALL; + + ieee80211_iterate_active_interfaces_atomic(hw, + flags, + ath10k_ps_iter, + &ar_iter); + + ret = ar_iter.ret; + } + + if (changed & IEEE80211_CONF_CHANGE_MONITOR) { + if (conf->flags & IEEE80211_CONF_MONITOR) + ret = ath10k_monitor_create(ar); + else + ret = ath10k_monitor_destroy(ar); + } + + mutex_unlock(&ar->conf_mutex); + return ret; +} + +/* + * TODO: + * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE, + * because we will send mgmt frames without CCK. This requirement + * for P2P_FIND/GO_NEG should be handled by checking CCK flag + * in the TX packet. + */ +static int ath10k_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + enum wmi_sta_powersave_param param; + int ret = 0; + u32 value; + int bit; + + mutex_lock(&ar->conf_mutex); + + arvif->ar = ar; + arvif->vif = vif; + + if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) { + ath10k_warn("Only one monitor interface allowed\n"); + ret = -EBUSY; + goto exit; + } + + bit = ffs(ar->free_vdev_map); + if (bit == 0) { + ret = -EBUSY; + goto exit; + } + + arvif->vdev_id = bit - 1; + arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; + ar->free_vdev_map &= ~(1 << arvif->vdev_id); + + if (ar->p2p) + arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE; + + switch (vif->type) { + case NL80211_IFTYPE_UNSPECIFIED: + case NL80211_IFTYPE_STATION: + arvif->vdev_type = WMI_VDEV_TYPE_STA; + if (vif->p2p) + arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT; + break; + case NL80211_IFTYPE_ADHOC: + arvif->vdev_type = WMI_VDEV_TYPE_IBSS; + break; + case NL80211_IFTYPE_AP: + arvif->vdev_type = WMI_VDEV_TYPE_AP; + + if (vif->p2p) + arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO; + break; + case NL80211_IFTYPE_MONITOR: + arvif->vdev_type = WMI_VDEV_TYPE_MONITOR; + break; + default: + WARN_ON(1); + break; + } + + ath10k_dbg(ATH10K_DBG_MAC, "Add interface: id %d type %d subtype %d\n", + arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype); + + ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type, + arvif->vdev_subtype, vif->addr); + if (ret) { + ath10k_warn("WMI vdev create failed: ret %d\n", ret); + goto exit; + } + + ret = ath10k_wmi_vdev_set_param(ar, 0, WMI_VDEV_PARAM_DEF_KEYID, + arvif->def_wep_key_index); + if (ret) + ath10k_warn("Failed to set default keyid: %d\n", ret); + + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, + WMI_VDEV_PARAM_TX_ENCAP_TYPE, + ATH10K_HW_TXRX_NATIVE_WIFI); + if (ret) + ath10k_warn("Failed to set TX encap: %d\n", ret); + + if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { + ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr); + if (ret) { + ath10k_warn("Failed to create peer for AP: %d\n", ret); + goto exit; + } + } + + if (arvif->vdev_type == WMI_VDEV_TYPE_STA) { + param = WMI_STA_PS_PARAM_RX_WAKE_POLICY; + value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; + ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, + param, value); + if (ret) + ath10k_warn("Failed to set RX wake policy: %d\n", ret); + + param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD; + value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS; + ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, + param, value); + if (ret) + ath10k_warn("Failed to set TX wake thresh: %d\n", ret); + + param = WMI_STA_PS_PARAM_PSPOLL_COUNT; + value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX; + ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, + param, value); + if (ret) + ath10k_warn("Failed to set PSPOLL count: %d\n", ret); + } + + if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) + ar->monitor_present = true; + +exit: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static void ath10k_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + int ret; + + mutex_lock(&ar->conf_mutex); + + ath10k_dbg(ATH10K_DBG_MAC, "Remove interface: id %d\n", arvif->vdev_id); + + ar->free_vdev_map |= 1 << (arvif->vdev_id); + + if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { + ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr); + if (ret) + ath10k_warn("Failed to remove peer for AP: %d\n", ret); + + kfree(arvif->u.ap.noa_data); + } + + ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id); + if (ret) + ath10k_warn("WMI vdev delete failed: %d\n", ret); + + if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) + ar->monitor_present = false; + + ath10k_peer_cleanup(ar, arvif->vdev_id); + + mutex_unlock(&ar->conf_mutex); +} + +/* + * FIXME: Has to be verified. + */ +#define SUPPORTED_FILTERS \ + (FIF_PROMISC_IN_BSS | \ + FIF_ALLMULTI | \ + FIF_CONTROL | \ + FIF_PSPOLL | \ + FIF_OTHER_BSS | \ + FIF_BCN_PRBRESP_PROMISC | \ + FIF_PROBE_REQ | \ + FIF_FCSFAIL) + +static void ath10k_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct ath10k *ar = hw->priv; + int ret; + + mutex_lock(&ar->conf_mutex); + + changed_flags &= SUPPORTED_FILTERS; + *total_flags &= SUPPORTED_FILTERS; + ar->filter_flags = *total_flags; + + if ((ar->filter_flags & FIF_PROMISC_IN_BSS) && + !ar->monitor_enabled) { + ret = ath10k_monitor_start(ar, ar->monitor_vdev_id); + if (ret) + ath10k_warn("Unable to start monitor mode\n"); + else + ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode started\n"); + } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) && + ar->monitor_enabled) { + ret = ath10k_monitor_stop(ar); + if (ret) + ath10k_warn("Unable to stop monitor mode\n"); + else + ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode stopped\n"); + } + + mutex_unlock(&ar->conf_mutex); +} + +static void ath10k_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u32 changed) +{ + struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + int ret = 0; + + mutex_lock(&ar->conf_mutex); + + if (changed & BSS_CHANGED_IBSS) + ath10k_control_ibss(arvif, info, vif->addr); + + if (changed & BSS_CHANGED_BEACON_INT) { + arvif->beacon_interval = info->beacon_int; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, + WMI_VDEV_PARAM_BEACON_INTERVAL, + arvif->beacon_interval); + if (ret) + ath10k_warn("Failed to set beacon interval for VDEV: %d\n", + arvif->vdev_id); + else + ath10k_dbg(ATH10K_DBG_MAC, + "Beacon interval: %d set for VDEV: %d\n", + arvif->beacon_interval, arvif->vdev_id); + } + + if (changed & BSS_CHANGED_BEACON) { + ret = ath10k_wmi_pdev_set_param(ar, + WMI_PDEV_PARAM_BEACON_TX_MODE, + WMI_BEACON_STAGGERED_MODE); + if (ret) + ath10k_warn("Failed to set beacon mode for VDEV: %d\n", + arvif->vdev_id); + else + ath10k_dbg(ATH10K_DBG_MAC, + "Set staggered beacon mode for VDEV: %d\n", + arvif->vdev_id); + } + + if (changed & BSS_CHANGED_BEACON_INFO) { + arvif->dtim_period = info->dtim_period; + + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, + WMI_VDEV_PARAM_DTIM_PERIOD, + arvif->dtim_period); + if (ret) + ath10k_warn("Failed to set dtim period for VDEV: %d\n", + arvif->vdev_id); + else + ath10k_dbg(ATH10K_DBG_MAC, + "Set dtim period: %d for VDEV: %d\n", + arvif->dtim_period, arvif->vdev_id); + } + + if (changed & BSS_CHANGED_SSID && + vif->type == NL80211_IFTYPE_AP) { + arvif->u.ap.ssid_len = info->ssid_len; + if (info->ssid_len) + memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len); + arvif->u.ap.hidden_ssid = info->hidden_ssid; + } + + if (changed & BSS_CHANGED_BSSID) { + if (!is_zero_ether_addr(info->bssid)) { + ret = ath10k_peer_create(ar, arvif->vdev_id, + info->bssid); + if (ret) + ath10k_warn("Failed to add peer: %pM for VDEV: %d\n", + info->bssid, arvif->vdev_id); + else + ath10k_dbg(ATH10K_DBG_MAC, + "Added peer: %pM for VDEV: %d\n", + info->bssid, arvif->vdev_id); + + + if (vif->type == NL80211_IFTYPE_STATION) { + /* + * this is never erased as we it for crypto key + * clearing; this is FW requirement + */ + memcpy(arvif->u.sta.bssid, info->bssid, + ETH_ALEN); + + ret = ath10k_vdev_start(arvif); + if (!ret) + ath10k_dbg(ATH10K_DBG_MAC, + "VDEV: %d started with BSSID: %pM\n", + arvif->vdev_id, info->bssid); + } + + /* + * Mac80211 does not keep IBSS bssid when leaving IBSS, + * so driver need to store it. It is needed when leaving + * IBSS in order to remove BSSID peer. + */ + if (vif->type == NL80211_IFTYPE_ADHOC) + memcpy(arvif->u.ibss.bssid, info->bssid, + ETH_ALEN); + } + } + + if (changed & BSS_CHANGED_BEACON_ENABLED) + ath10k_control_beaconing(arvif, info); + + if (changed & BSS_CHANGED_ERP_CTS_PROT) { + u32 cts_prot; + if (info->use_cts_prot) + cts_prot = 1; + else + cts_prot = 0; + + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, + WMI_VDEV_PARAM_ENABLE_RTSCTS, + cts_prot); + if (ret) + ath10k_warn("Failed to set CTS prot for VDEV: %d\n", + arvif->vdev_id); + else + ath10k_dbg(ATH10K_DBG_MAC, + "Set CTS prot: %d for VDEV: %d\n", + cts_prot, arvif->vdev_id); + } + + if (changed & BSS_CHANGED_ERP_SLOT) { + u32 slottime; + if (info->use_short_slot) + slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */ + + else + slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */ + + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, + WMI_VDEV_PARAM_SLOT_TIME, + slottime); + if (ret) + ath10k_warn("Failed to set erp slot for VDEV: %d\n", + arvif->vdev_id); + else + ath10k_dbg(ATH10K_DBG_MAC, + "Set slottime: %d for VDEV: %d\n", + slottime, arvif->vdev_id); + } + + if (changed & BSS_CHANGED_ERP_PREAMBLE) { + u32 preamble; + if (info->use_short_preamble) + preamble = WMI_VDEV_PREAMBLE_SHORT; + else + preamble = WMI_VDEV_PREAMBLE_LONG; + + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, + WMI_VDEV_PARAM_PREAMBLE, + preamble); + if (ret) + ath10k_warn("Failed to set preamble for VDEV: %d\n", + arvif->vdev_id); + else + ath10k_dbg(ATH10K_DBG_MAC, + "Set preamble: %d for VDEV: %d\n", + preamble, arvif->vdev_id); + } + + if (changed & BSS_CHANGED_ASSOC) { + if (info->assoc) + ath10k_bss_assoc(hw, vif, info); + } + + mutex_unlock(&ar->conf_mutex); +} + +static int ath10k_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_scan_request *req) +{ + struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct wmi_start_scan_arg arg; + int ret = 0; + int i; + + mutex_lock(&ar->conf_mutex); + + spin_lock_bh(&ar->data_lock); + if (ar->scan.in_progress) { + spin_unlock_bh(&ar->data_lock); + ret = -EBUSY; + goto exit; + } + + INIT_COMPLETION(ar->scan.started); + INIT_COMPLETION(ar->scan.completed); + ar->scan.in_progress = true; + ar->scan.aborting = false; + ar->scan.is_roc = false; + ar->scan.vdev_id = arvif->vdev_id; + spin_unlock_bh(&ar->data_lock); + + memset(&arg, 0, sizeof(arg)); + ath10k_wmi_start_scan_init(ar, &arg); + arg.vdev_id = arvif->vdev_id; + arg.scan_id = ATH10K_SCAN_ID; + + if (!req->no_cck) + arg.scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES; + + if (req->ie_len) { + arg.ie_len = req->ie_len; + memcpy(arg.ie, req->ie, arg.ie_len); + } + + if (req->n_ssids) { + arg.n_ssids = req->n_ssids; + for (i = 0; i < arg.n_ssids; i++) { + arg.ssids[i].len = req->ssids[i].ssid_len; + arg.ssids[i].ssid = req->ssids[i].ssid; + } + } + + if (req->n_channels) { + arg.n_channels = req->n_channels; + for (i = 0; i < arg.n_channels; i++) + arg.channels[i] = req->channels[i]->center_freq; + } + + ret = ath10k_start_scan(ar, &arg); + if (ret) { + ath10k_warn("could not start hw scan (%d)\n", ret); + spin_lock_bh(&ar->data_lock); + ar->scan.in_progress = false; + spin_unlock_bh(&ar->data_lock); + } + +exit: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath10k *ar = hw->priv; + int ret; + + mutex_lock(&ar->conf_mutex); + ret = ath10k_abort_scan(ar); + if (ret) { + ath10k_warn("couldn't abort scan (%d). forcefully sending scan completion to mac80211\n", + ret); + ieee80211_scan_completed(hw, 1 /* aborted */); + } + mutex_unlock(&ar->conf_mutex); +} + +static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_peer *peer; + const u8 *peer_addr; + bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 || + key->cipher == WLAN_CIPHER_SUITE_WEP104; + int ret = 0; + + if (key->keyidx > WMI_MAX_KEY_INDEX) + return -ENOSPC; + + mutex_lock(&ar->conf_mutex); + + if (sta) + peer_addr = sta->addr; + else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) + peer_addr = vif->bss_conf.bssid; + else + peer_addr = vif->addr; + + key->hw_key_idx = key->keyidx; + + /* the peer should not disappear in mid-way (unless FW goes awry) since + * we already hold conf_mutex. we just make sure its there now. */ + spin_lock_bh(&ar->data_lock); + peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); + spin_unlock_bh(&ar->data_lock); + + if (!peer) { + if (cmd == SET_KEY) { + ath10k_warn("cannot install key for non-existent peer %pM\n", + peer_addr); + ret = -EOPNOTSUPP; + goto exit; + } else { + /* if the peer doesn't exist there is no key to disable + * anymore */ + goto exit; + } + } + + if (is_wep) { + if (cmd == SET_KEY) + arvif->wep_keys[key->keyidx] = key; + else + arvif->wep_keys[key->keyidx] = NULL; + + if (cmd == DISABLE_KEY) + ath10k_clear_vdev_key(arvif, key); + } + + ret = ath10k_install_key(arvif, key, cmd, peer_addr); + if (ret) { + ath10k_warn("ath10k_install_key failed (%d)\n", ret); + goto exit; + } + + spin_lock_bh(&ar->data_lock); + peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); + if (peer && cmd == SET_KEY) + peer->keys[key->keyidx] = key; + else if (peer && cmd == DISABLE_KEY) + peer->keys[key->keyidx] = NULL; + else if (peer == NULL) + /* impossible unless FW goes crazy */ + ath10k_warn("peer %pM disappeared!\n", peer_addr); + spin_unlock_bh(&ar->data_lock); + +exit: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static int ath10k_sta_state(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state) +{ + struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + int ret = 0; + + mutex_lock(&ar->conf_mutex); + + if (old_state == IEEE80211_STA_NOTEXIST && + new_state == IEEE80211_STA_NONE && + vif->type != NL80211_IFTYPE_STATION) { + /* + * New station addition. + */ + ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr); + if (ret) + ath10k_warn("Failed to add peer: %pM for VDEV: %d\n", + sta->addr, arvif->vdev_id); + else + ath10k_dbg(ATH10K_DBG_MAC, + "Added peer: %pM for VDEV: %d\n", + sta->addr, arvif->vdev_id); + } else if ((old_state == IEEE80211_STA_NONE && + new_state == IEEE80211_STA_NOTEXIST)) { + /* + * Existing station deletion. + */ + ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); + if (ret) + ath10k_warn("Failed to delete peer: %pM for VDEV: %d\n", + sta->addr, arvif->vdev_id); + else + ath10k_dbg(ATH10K_DBG_MAC, + "Removed peer: %pM for VDEV: %d\n", + sta->addr, arvif->vdev_id); + + if (vif->type == NL80211_IFTYPE_STATION) + ath10k_bss_disassoc(hw, vif); + } else if (old_state == IEEE80211_STA_AUTH && + new_state == IEEE80211_STA_ASSOC && + (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC)) { + /* + * New association. + */ + ret = ath10k_station_assoc(ar, arvif, sta); + if (ret) + ath10k_warn("Failed to associate station: %pM\n", + sta->addr); + else + ath10k_dbg(ATH10K_DBG_MAC, + "Station %pM moved to assoc state\n", + sta->addr); + } else if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTH && + (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC)) { + /* + * Disassociation. + */ + ret = ath10k_station_disassoc(ar, arvif, sta); + if (ret) + ath10k_warn("Failed to disassociate station: %pM\n", + sta->addr); + else + ath10k_dbg(ATH10K_DBG_MAC, + "Station %pM moved to disassociated state\n", + sta->addr); + } + + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, + u16 ac, bool enable) +{ + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + u32 value = 0; + int ret = 0; + + if (arvif->vdev_type != WMI_VDEV_TYPE_STA) + return 0; + + switch (ac) { + case IEEE80211_AC_VO: + value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN | + WMI_STA_PS_UAPSD_AC3_TRIGGER_EN; + break; + case IEEE80211_AC_VI: + value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN | + WMI_STA_PS_UAPSD_AC2_TRIGGER_EN; + break; + case IEEE80211_AC_BE: + value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN | + WMI_STA_PS_UAPSD_AC1_TRIGGER_EN; + break; + case IEEE80211_AC_BK: + value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN | + WMI_STA_PS_UAPSD_AC0_TRIGGER_EN; + break; + } + + if (enable) + arvif->u.sta.uapsd |= value; + else + arvif->u.sta.uapsd &= ~value; + + ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, + WMI_STA_PS_PARAM_UAPSD, + arvif->u.sta.uapsd); + if (ret) { + ath10k_warn("could not set uapsd params %d\n", ret); + goto exit; + } + + if (arvif->u.sta.uapsd) + value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD; + else + value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; + + ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, + WMI_STA_PS_PARAM_RX_WAKE_POLICY, + value); + if (ret) + ath10k_warn("could not set rx wake param %d\n", ret); + +exit: + return ret; +} + +static int ath10k_conf_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u16 ac, + const struct ieee80211_tx_queue_params *params) +{ + struct ath10k *ar = hw->priv; + struct wmi_wmm_params_arg *p = NULL; + int ret; + + mutex_lock(&ar->conf_mutex); + + switch (ac) { + case IEEE80211_AC_VO: + p = &ar->wmm_params.ac_vo; + break; + case IEEE80211_AC_VI: + p = &ar->wmm_params.ac_vi; + break; + case IEEE80211_AC_BE: + p = &ar->wmm_params.ac_be; + break; + case IEEE80211_AC_BK: + p = &ar->wmm_params.ac_bk; + break; + } + + if (WARN_ON(!p)) { + ret = -EINVAL; + goto exit; + } + + p->cwmin = params->cw_min; + p->cwmax = params->cw_max; + p->aifs = params->aifs; + + /* + * The channel time duration programmed in the HW is in absolute + * microseconds, while mac80211 gives the txop in units of + * 32 microseconds. + */ + p->txop = params->txop * 32; + + /* FIXME: FW accepts wmm params per hw, not per vif */ + ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params); + if (ret) { + ath10k_warn("could not set wmm params %d\n", ret); + goto exit; + } + + ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd); + if (ret) + ath10k_warn("could not set sta uapsd %d\n", ret); + +exit: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +#define ATH10K_ROC_TIMEOUT_HZ (2*HZ) + +static int ath10k_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel *chan, + int duration, + enum ieee80211_roc_type type) +{ + struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct wmi_start_scan_arg arg; + int ret; + + mutex_lock(&ar->conf_mutex); + + spin_lock_bh(&ar->data_lock); + if (ar->scan.in_progress) { + spin_unlock_bh(&ar->data_lock); + ret = -EBUSY; + goto exit; + } + + INIT_COMPLETION(ar->scan.started); + INIT_COMPLETION(ar->scan.completed); + INIT_COMPLETION(ar->scan.on_channel); + ar->scan.in_progress = true; + ar->scan.aborting = false; + ar->scan.is_roc = true; + ar->scan.vdev_id = arvif->vdev_id; + ar->scan.roc_freq = chan->center_freq; + spin_unlock_bh(&ar->data_lock); + + memset(&arg, 0, sizeof(arg)); + ath10k_wmi_start_scan_init(ar, &arg); + arg.vdev_id = arvif->vdev_id; + arg.scan_id = ATH10K_SCAN_ID; + arg.n_channels = 1; + arg.channels[0] = chan->center_freq; + arg.dwell_time_active = duration; + arg.dwell_time_passive = duration; + arg.max_scan_time = 2 * duration; + arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; + arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ; + + ret = ath10k_start_scan(ar, &arg); + if (ret) { + ath10k_warn("could not start roc scan (%d)\n", ret); + spin_lock_bh(&ar->data_lock); + ar->scan.in_progress = false; + spin_unlock_bh(&ar->data_lock); + goto exit; + } + + ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ); + if (ret == 0) { + ath10k_warn("could not switch to channel for roc scan\n"); + ath10k_abort_scan(ar); + ret = -ETIMEDOUT; + goto exit; + } + + ret = 0; +exit: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw) +{ + struct ath10k *ar = hw->priv; + + mutex_lock(&ar->conf_mutex); + ath10k_abort_scan(ar); + mutex_unlock(&ar->conf_mutex); + + return 0; +} + +/* + * Both RTS and Fragmentation threshold are interface-specific + * in ath10k, but device-specific in mac80211. + */ +static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif) +{ + struct ath10k_generic_iter *ar_iter = data; + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + u32 rts = ar_iter->ar->hw->wiphy->rts_threshold; + + rts = min_t(u32, rts, ATH10K_RTS_MAX); + + ar_iter->ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id, + WMI_VDEV_PARAM_RTS_THRESHOLD, + rts); + if (ar_iter->ret) + ath10k_warn("Failed to set RTS threshold for VDEV: %d\n", + arvif->vdev_id); + else + ath10k_dbg(ATH10K_DBG_MAC, + "Set RTS threshold: %d for VDEV: %d\n", + rts, arvif->vdev_id); +} + +static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +{ + struct ath10k_generic_iter ar_iter; + struct ath10k *ar = hw->priv; + + memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter)); + ar_iter.ar = ar; + + mutex_lock(&ar->conf_mutex); + ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL, + ath10k_set_rts_iter, &ar_iter); + mutex_unlock(&ar->conf_mutex); + + return ar_iter.ret; +} + +static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif) +{ + struct ath10k_generic_iter *ar_iter = data; + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + u32 frag = ar_iter->ar->hw->wiphy->frag_threshold; + int ret; + + frag = clamp_t(u32, frag, + ATH10K_FRAGMT_THRESHOLD_MIN, + ATH10K_FRAGMT_THRESHOLD_MAX); + + ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id, + WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD, + frag); + + ar_iter->ret = ret; + if (ar_iter->ret) + ath10k_warn("Failed to set frag threshold for VDEV: %d\n", + arvif->vdev_id); + else + ath10k_dbg(ATH10K_DBG_MAC, + "Set frag threshold: %d for VDEV: %d\n", + frag, arvif->vdev_id); +} + +static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value) +{ + struct ath10k_generic_iter ar_iter; + struct ath10k *ar = hw->priv; + + memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter)); + ar_iter.ar = ar; + + mutex_lock(&ar->conf_mutex); + ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL, + ath10k_set_frag_iter, &ar_iter); + mutex_unlock(&ar->conf_mutex); + + return ar_iter.ret; +} + +static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop) +{ + struct ath10k *ar = hw->priv; + int ret; + + /* mac80211 doesn't care if we really xmit queued frames or not + * we'll collect those frames either way if we stop/delete vdevs */ + if (drop) + return; + + ret = wait_event_timeout(ar->htt->empty_tx_wq, ({ + bool empty; + spin_lock_bh(&ar->htt->tx_lock); + empty = bitmap_empty(ar->htt->used_msdu_ids, + ar->htt->max_num_pending_tx); + spin_unlock_bh(&ar->htt->tx_lock); + (empty); + }), ATH10K_FLUSH_TIMEOUT_HZ); + if (ret <= 0) + ath10k_warn("tx not flushed\n"); +} + +/* TODO: Implement this function properly + * For now it is needed to reply to Probe Requests in IBSS mode. + * Propably we need this information from FW. + */ +static int ath10k_tx_last_beacon(struct ieee80211_hw *hw) +{ + return 1; +} + +static const struct ieee80211_ops ath10k_ops = { + .tx = ath10k_tx, + .start = ath10k_start, + .stop = ath10k_stop, + .config = ath10k_config, + .add_interface = ath10k_add_interface, + .remove_interface = ath10k_remove_interface, + .configure_filter = ath10k_configure_filter, + .bss_info_changed = ath10k_bss_info_changed, + .hw_scan = ath10k_hw_scan, + .cancel_hw_scan = ath10k_cancel_hw_scan, + .set_key = ath10k_set_key, + .sta_state = ath10k_sta_state, + .conf_tx = ath10k_conf_tx, + .remain_on_channel = ath10k_remain_on_channel, + .cancel_remain_on_channel = ath10k_cancel_remain_on_channel, + .set_rts_threshold = ath10k_set_rts_threshold, + .set_frag_threshold = ath10k_set_frag_threshold, + .flush = ath10k_flush, + .tx_last_beacon = ath10k_tx_last_beacon, +}; + +#define RATETAB_ENT(_rate, _rateid, _flags) { \ + .bitrate = (_rate), \ + .flags = (_flags), \ + .hw_value = (_rateid), \ +} + +#define CHAN2G(_channel, _freq, _flags) { \ + .band = IEEE80211_BAND_2GHZ, \ + .hw_value = (_channel), \ + .center_freq = (_freq), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +#define CHAN5G(_channel, _freq, _flags) { \ + .band = IEEE80211_BAND_5GHZ, \ + .hw_value = (_channel), \ + .center_freq = (_freq), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +static const struct ieee80211_channel ath10k_2ghz_channels[] = { + CHAN2G(1, 2412, 0), + CHAN2G(2, 2417, 0), + CHAN2G(3, 2422, 0), + CHAN2G(4, 2427, 0), + CHAN2G(5, 2432, 0), + CHAN2G(6, 2437, 0), + CHAN2G(7, 2442, 0), + CHAN2G(8, 2447, 0), + CHAN2G(9, 2452, 0), + CHAN2G(10, 2457, 0), + CHAN2G(11, 2462, 0), + CHAN2G(12, 2467, 0), + CHAN2G(13, 2472, 0), + CHAN2G(14, 2484, 0), +}; + +static const struct ieee80211_channel ath10k_5ghz_channels[] = { + CHAN5G(36, 5180, 14), + CHAN5G(40, 5200, 15), + CHAN5G(44, 5220, 16), + CHAN5G(48, 5240, 17), + CHAN5G(52, 5260, 18), + CHAN5G(56, 5280, 19), + CHAN5G(60, 5300, 20), + CHAN5G(64, 5320, 21), + CHAN5G(100, 5500, 22), + CHAN5G(104, 5520, 23), + CHAN5G(108, 5540, 24), + CHAN5G(112, 5560, 25), + CHAN5G(116, 5580, 26), + CHAN5G(120, 5600, 27), + CHAN5G(124, 5620, 28), + CHAN5G(128, 5640, 29), + CHAN5G(132, 5660, 30), + CHAN5G(136, 5680, 31), + CHAN5G(140, 5700, 32), + CHAN5G(149, 5745, 33), + CHAN5G(153, 5765, 34), + CHAN5G(157, 5785, 35), + CHAN5G(161, 5805, 36), + CHAN5G(165, 5825, 37), +}; + +static struct ieee80211_rate ath10k_rates[] = { + /* CCK */ + RATETAB_ENT(10, 0x82, 0), + RATETAB_ENT(20, 0x84, 0), + RATETAB_ENT(55, 0x8b, 0), + RATETAB_ENT(110, 0x96, 0), + /* OFDM */ + RATETAB_ENT(60, 0x0c, 0), + RATETAB_ENT(90, 0x12, 0), + RATETAB_ENT(120, 0x18, 0), + RATETAB_ENT(180, 0x24, 0), + RATETAB_ENT(240, 0x30, 0), + RATETAB_ENT(360, 0x48, 0), + RATETAB_ENT(480, 0x60, 0), + RATETAB_ENT(540, 0x6c, 0), +}; + +#define ath10k_a_rates (ath10k_rates + 4) +#define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - 4) +#define ath10k_g_rates (ath10k_rates + 0) +#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates)) + +struct ath10k *ath10k_mac_create(void) +{ + struct ieee80211_hw *hw; + struct ath10k *ar; + + hw = ieee80211_alloc_hw(sizeof(struct ath10k), &ath10k_ops); + if (!hw) + return NULL; + + ar = hw->priv; + ar->hw = hw; + + return ar; +} + +void ath10k_mac_destroy(struct ath10k *ar) +{ + ieee80211_free_hw(ar->hw); +} + +static const struct ieee80211_iface_limit ath10k_if_limits[] = { + { + .max = 8, + .types = BIT(NL80211_IFTYPE_STATION) + | BIT(NL80211_IFTYPE_P2P_CLIENT) + | BIT(NL80211_IFTYPE_P2P_GO) + | BIT(NL80211_IFTYPE_AP) + } +}; + +static const struct ieee80211_iface_combination ath10k_if_comb = { + .limits = ath10k_if_limits, + .n_limits = ARRAY_SIZE(ath10k_if_limits), + .max_interfaces = 8, + .num_different_channels = 1, + .beacon_int_infra_match = true, +}; + +static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar) +{ + struct ieee80211_sta_vht_cap vht_cap = {0}; + u16 mcs_map; + + vht_cap.vht_supported = 1; + vht_cap.cap = ar->vht_cap_info; + + /* FIXME: check dynamically how many streams board supports */ + mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | + IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | + IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 14; + + vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); + vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); + + return vht_cap; +} + +static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar) +{ + int i; + struct ieee80211_sta_ht_cap ht_cap = {0}; + + if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED)) + return ht_cap; + + ht_cap.ht_supported = 1; + ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; + ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; + ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; + ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40; + ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT; + + if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI) + ht_cap.cap |= IEEE80211_HT_CAP_SGI_20; + + if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI) + ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; + + if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) { + u32 smps; + + smps = WLAN_HT_CAP_SM_PS_DYNAMIC; + smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT; + + ht_cap.cap |= smps; + } + + if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC) + ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC; + + if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) { + u32 stbc; + + stbc = ar->ht_cap_info; + stbc &= WMI_HT_CAP_RX_STBC; + stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT; + stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT; + stbc &= IEEE80211_HT_CAP_RX_STBC; + + ht_cap.cap |= stbc; + } + + if (ar->ht_cap_info & WMI_HT_CAP_LDPC) + ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; + + if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT) + ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT; + + /* max AMSDU is implicitly taken from vht_cap_info */ + if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK) + ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU; + + for (i = 0; i < WMI_MAX_SPATIAL_STREAM; i++) + ht_cap.mcs.rx_mask[i] = 0xFF; + + ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; + + return ht_cap; +} + + +static void ath10k_get_arvif_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct ath10k_vif_iter *arvif_iter = data; + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + + if (arvif->vdev_id == arvif_iter->vdev_id) + arvif_iter->arvif = arvif; +} + +struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id) +{ + struct ath10k_vif_iter arvif_iter; + u32 flags; + + memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter)); + arvif_iter.vdev_id = vdev_id; + + flags = IEEE80211_IFACE_ITER_RESUME_ALL; + ieee80211_iterate_active_interfaces_atomic(ar->hw, + flags, + ath10k_get_arvif_iter, + &arvif_iter); + if (!arvif_iter.arvif) { + ath10k_warn("No VIF found for VDEV: %d\n", vdev_id); + return NULL; + } + + return arvif_iter.arvif; +} + +int ath10k_mac_register(struct ath10k *ar) +{ + struct ieee80211_supported_band *band; + struct ieee80211_sta_vht_cap vht_cap; + struct ieee80211_sta_ht_cap ht_cap; + void *channels; + int ret; + + SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr); + + SET_IEEE80211_DEV(ar->hw, ar->dev); + + ht_cap = ath10k_get_ht_cap(ar); + vht_cap = ath10k_create_vht_cap(ar); + + if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { + channels = kmemdup(ath10k_2ghz_channels, + sizeof(ath10k_2ghz_channels), + GFP_KERNEL); + if (!channels) + return -ENOMEM; + + band = &ar->mac.sbands[IEEE80211_BAND_2GHZ]; + band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels); + band->channels = channels; + band->n_bitrates = ath10k_g_rates_size; + band->bitrates = ath10k_g_rates; + band->ht_cap = ht_cap; + + /* vht is not supported in 2.4 GHz */ + + ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band; + } + + if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { + channels = kmemdup(ath10k_5ghz_channels, + sizeof(ath10k_5ghz_channels), + GFP_KERNEL); + if (!channels) { + if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { + band = &ar->mac.sbands[IEEE80211_BAND_2GHZ]; + kfree(band->channels); + } + return -ENOMEM; + } + + band = &ar->mac.sbands[IEEE80211_BAND_5GHZ]; + band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels); + band->channels = channels; + band->n_bitrates = ath10k_a_rates_size; + band->bitrates = ath10k_a_rates; + band->ht_cap = ht_cap; + band->vht_cap = vht_cap; + ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band; + } + + ar->hw->wiphy->interface_modes = + BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC) | + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO); + + ar->hw->flags = IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_SUPPORTS_PS | + IEEE80211_HW_SUPPORTS_DYNAMIC_PS | + IEEE80211_HW_SUPPORTS_UAPSD | + IEEE80211_HW_MFP_CAPABLE | + IEEE80211_HW_REPORTS_TX_ACK_STATUS | + IEEE80211_HW_HAS_RATE_CONTROL | + IEEE80211_HW_SUPPORTS_STATIC_SMPS | + IEEE80211_HW_WANT_MONITOR_VIF | + IEEE80211_HW_AP_LINK_PS; + + if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) + ar->hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS; + + if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) { + ar->hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; + ar->hw->flags |= IEEE80211_HW_TX_AMPDU_SETUP_IN_HW; + } + + ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; + ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; + + ar->hw->vif_data_size = sizeof(struct ath10k_vif); + + ar->hw->channel_change_time = 5000; + ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL; + + ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; + ar->hw->wiphy->max_remain_on_channel_duration = 5000; + + ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; + /* + * on LL hardware queues are managed entirely by the FW + * so we only advertise to mac we can do the queues thing + */ + ar->hw->queues = 4; + + ar->hw->wiphy->iface_combinations = &ath10k_if_comb; + ar->hw->wiphy->n_iface_combinations = 1; + + ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, + ath10k_reg_notifier); + if (ret) { + ath10k_err("Regulatory initialization failed\n"); + return ret; + } + + ret = ieee80211_register_hw(ar->hw); + if (ret) { + ath10k_err("ieee80211 registration failed: %d\n", ret); + return ret; + } + + if (!ath_is_world_regd(&ar->ath_common.regulatory)) { + ret = regulatory_hint(ar->hw->wiphy, + ar->ath_common.regulatory.alpha2); + if (ret) + goto exit; + } + + return 0; +exit: + ieee80211_unregister_hw(ar->hw); + return ret; +} + +void ath10k_mac_unregister(struct ath10k *ar) +{ + ieee80211_unregister_hw(ar->hw); + + kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels); + kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels); + + SET_IEEE80211_DEV(ar->hw, NULL); +} diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h new file mode 100644 index 000000000000..27fc92e58829 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/mac.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _MAC_H_ +#define _MAC_H_ + +#include <net/mac80211.h> +#include "core.h" + +struct ath10k_generic_iter { + struct ath10k *ar; + int ret; +}; + +struct ath10k *ath10k_mac_create(void); +void ath10k_mac_destroy(struct ath10k *ar); +int ath10k_mac_register(struct ath10k *ar); +void ath10k_mac_unregister(struct ath10k *ar); +struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id); +void ath10k_reset_scan(unsigned long ptr); +void ath10k_offchan_tx_purge(struct ath10k *ar); +void ath10k_offchan_tx_work(struct work_struct *work); + +static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif) +{ + return (struct ath10k_vif *)vif->drv_priv; +} + +static inline void ath10k_tx_h_seq_no(struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_vif *vif = info->control.vif; + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + + if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { + if (arvif->tx_seq_no == 0) + arvif->tx_seq_no = 0x1000; + + if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) + arvif->tx_seq_no += 0x10; + hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); + hdr->seq_ctrl |= cpu_to_le16(arvif->tx_seq_no); + } +} + +#endif /* _MAC_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c new file mode 100644 index 000000000000..8e4e8327d33e --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -0,0 +1,2506 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/pci.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> + +#include "core.h" +#include "debug.h" + +#include "targaddrs.h" +#include "bmi.h" + +#include "hif.h" +#include "htc.h" + +#include "ce.h" +#include "pci.h" + +unsigned int ath10k_target_ps; +module_param(ath10k_target_ps, uint, 0644); +MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option"); + +#define QCA988X_1_0_DEVICE_ID (0xabcd) +#define QCA988X_2_0_DEVICE_ID (0x003c) + +static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = { + { PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */ + { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ + {0} +}; + +static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address, + u32 *data); + +static void ath10k_pci_process_ce(struct ath10k *ar); +static int ath10k_pci_post_rx(struct ath10k *ar); +static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info, + int num); +static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info); +static void ath10k_pci_stop_ce(struct ath10k *ar); + +static const struct ce_attr host_ce_config_wlan[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,}, + /* could be moved to share CE3 */ + /* target->host HTT + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,}, + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,}, + /* host->target WMI */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* host->target HTT */ + { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0, + CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,}, + /* unused */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* Target autonomous hif_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,}, +}; + +/* Target firmware's Copy Engine configuration. */ +static const struct ce_pipe_config target_ce_config_wlan[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,}, + /* target->host HTT + HTC control */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,}, + /* target->host WMI */ + { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target WMI */ + { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target HTT */ + { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,}, + /* NB: 50% of src nentries, since tx has 2 frags */ + /* unused */ + { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* Reserved for target autonomous hif_memcpy */ + { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,}, + /* CE7 used only by Host */ +}; + +/* + * Diagnostic read/write access is provided for startup/config/debug usage. + * Caller must guarantee proper alignment, when applicable, and single user + * at any moment. + */ +static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, + int nbytes) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int ret = 0; + u32 buf; + unsigned int completed_nbytes, orig_nbytes, remaining_bytes; + unsigned int id; + unsigned int flags; + struct ce_state *ce_diag; + /* Host buffer address in CE space */ + u32 ce_data; + dma_addr_t ce_data_base = 0; + void *data_buf = NULL; + int i; + + /* + * This code cannot handle reads to non-memory space. Redirect to the + * register read fn but preserve the multi word read capability of + * this fn + */ + if (address < DRAM_BASE_ADDRESS) { + if (!IS_ALIGNED(address, 4) || + !IS_ALIGNED((unsigned long)data, 4)) + return -EIO; + + while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access( + ar, address, (u32 *)data)) == 0)) { + nbytes -= sizeof(u32); + address += sizeof(u32); + data += sizeof(u32); + } + return ret; + } + + ce_diag = ar_pci->ce_diag; + + /* + * Allocate a temporary bounce buffer to hold caller's data + * to be DMA'ed from Target. This guarantees + * 1) 4-byte alignment + * 2) Buffer in DMA-able space + */ + orig_nbytes = nbytes; + data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev, + orig_nbytes, + &ce_data_base); + + if (!data_buf) { + ret = -ENOMEM; + goto done; + } + memset(data_buf, 0, orig_nbytes); + + remaining_bytes = orig_nbytes; + ce_data = ce_data_base; + while (remaining_bytes) { + nbytes = min_t(unsigned int, remaining_bytes, + DIAG_TRANSFER_LIMIT); + + ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data); + if (ret != 0) + goto done; + + /* Request CE to send from Target(!) address to Host buffer */ + /* + * The address supplied by the caller is in the + * Target CPU virtual address space. + * + * In order to use this address with the diagnostic CE, + * convert it from Target CPU virtual address space + * to CE address space + */ + ath10k_pci_wake(ar); + address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, + address); + ath10k_pci_sleep(ar); + + ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0, + 0); + if (ret) + goto done; + + i = 0; + while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf, + &completed_nbytes, + &id) != 0) { + mdelay(1); + if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { + ret = -EBUSY; + goto done; + } + } + + if (nbytes != completed_nbytes) { + ret = -EIO; + goto done; + } + + if (buf != (u32) address) { + ret = -EIO; + goto done; + } + + i = 0; + while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf, + &completed_nbytes, + &id, &flags) != 0) { + mdelay(1); + + if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { + ret = -EBUSY; + goto done; + } + } + + if (nbytes != completed_nbytes) { + ret = -EIO; + goto done; + } + + if (buf != ce_data) { + ret = -EIO; + goto done; + } + + remaining_bytes -= nbytes; + address += nbytes; + ce_data += nbytes; + } + +done: + if (ret == 0) { + /* Copy data from allocated DMA buf to caller's buf */ + WARN_ON_ONCE(orig_nbytes & 3); + for (i = 0; i < orig_nbytes / sizeof(__le32); i++) { + ((u32 *)data)[i] = + __le32_to_cpu(((__le32 *)data_buf)[i]); + } + } else + ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", + __func__, address); + + if (data_buf) + pci_free_consistent(ar_pci->pdev, orig_nbytes, + data_buf, ce_data_base); + + return ret; +} + +/* Read 4-byte aligned data from Target memory or register */ +static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address, + u32 *data) +{ + /* Assume range doesn't cross this boundary */ + if (address >= DRAM_BASE_ADDRESS) + return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32)); + + ath10k_pci_wake(ar); + *data = ath10k_pci_read32(ar, address); + ath10k_pci_sleep(ar); + return 0; +} + +static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, + const void *data, int nbytes) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int ret = 0; + u32 buf; + unsigned int completed_nbytes, orig_nbytes, remaining_bytes; + unsigned int id; + unsigned int flags; + struct ce_state *ce_diag; + void *data_buf = NULL; + u32 ce_data; /* Host buffer address in CE space */ + dma_addr_t ce_data_base = 0; + int i; + + ce_diag = ar_pci->ce_diag; + + /* + * Allocate a temporary bounce buffer to hold caller's data + * to be DMA'ed to Target. This guarantees + * 1) 4-byte alignment + * 2) Buffer in DMA-able space + */ + orig_nbytes = nbytes; + data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev, + orig_nbytes, + &ce_data_base); + if (!data_buf) { + ret = -ENOMEM; + goto done; + } + + /* Copy caller's data to allocated DMA buf */ + WARN_ON_ONCE(orig_nbytes & 3); + for (i = 0; i < orig_nbytes / sizeof(__le32); i++) + ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]); + + /* + * The address supplied by the caller is in the + * Target CPU virtual address space. + * + * In order to use this address with the diagnostic CE, + * convert it from + * Target CPU virtual address space + * to + * CE address space + */ + ath10k_pci_wake(ar); + address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address); + ath10k_pci_sleep(ar); + + remaining_bytes = orig_nbytes; + ce_data = ce_data_base; + while (remaining_bytes) { + /* FIXME: check cast */ + nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); + + /* Set up to receive directly into Target(!) address */ + ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address); + if (ret != 0) + goto done; + + /* + * Request CE to send caller-supplied data that + * was copied to bounce buffer to Target(!) address. + */ + ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data, + nbytes, 0, 0); + if (ret != 0) + goto done; + + i = 0; + while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf, + &completed_nbytes, + &id) != 0) { + mdelay(1); + + if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { + ret = -EBUSY; + goto done; + } + } + + if (nbytes != completed_nbytes) { + ret = -EIO; + goto done; + } + + if (buf != ce_data) { + ret = -EIO; + goto done; + } + + i = 0; + while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf, + &completed_nbytes, + &id, &flags) != 0) { + mdelay(1); + + if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { + ret = -EBUSY; + goto done; + } + } + + if (nbytes != completed_nbytes) { + ret = -EIO; + goto done; + } + + if (buf != address) { + ret = -EIO; + goto done; + } + + remaining_bytes -= nbytes; + address += nbytes; + ce_data += nbytes; + } + +done: + if (data_buf) { + pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf, + ce_data_base); + } + + if (ret != 0) + ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__, + address); + + return ret; +} + +/* Write 4B data to Target memory or register */ +static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address, + u32 data) +{ + /* Assume range doesn't cross this boundary */ + if (address >= DRAM_BASE_ADDRESS) + return ath10k_pci_diag_write_mem(ar, address, &data, + sizeof(u32)); + + ath10k_pci_wake(ar); + ath10k_pci_write32(ar, address, data); + ath10k_pci_sleep(ar); + return 0; +} + +static bool ath10k_pci_target_is_awake(struct ath10k *ar) +{ + void __iomem *mem = ath10k_pci_priv(ar)->mem; + u32 val; + val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + + RTC_STATE_ADDRESS); + return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON); +} + +static void ath10k_pci_wait(struct ath10k *ar) +{ + int n = 100; + + while (n-- && !ath10k_pci_target_is_awake(ar)) + msleep(10); + + if (n < 0) + ath10k_warn("Unable to wakeup target\n"); +} + +void ath10k_do_pci_wake(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + void __iomem *pci_addr = ar_pci->mem; + int tot_delay = 0; + int curr_delay = 5; + + if (atomic_read(&ar_pci->keep_awake_count) == 0) { + /* Force AWAKE */ + iowrite32(PCIE_SOC_WAKE_V_MASK, + pci_addr + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS); + } + atomic_inc(&ar_pci->keep_awake_count); + + if (ar_pci->verified_awake) + return; + + for (;;) { + if (ath10k_pci_target_is_awake(ar)) { + ar_pci->verified_awake = true; + break; + } + + if (tot_delay > PCIE_WAKE_TIMEOUT) { + ath10k_warn("target takes too long to wake up (awake count %d)\n", + atomic_read(&ar_pci->keep_awake_count)); + break; + } + + udelay(curr_delay); + tot_delay += curr_delay; + + if (curr_delay < 50) + curr_delay += 5; + } +} + +void ath10k_do_pci_sleep(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + void __iomem *pci_addr = ar_pci->mem; + + if (atomic_dec_and_test(&ar_pci->keep_awake_count)) { + /* Allow sleep */ + ar_pci->verified_awake = false; + iowrite32(PCIE_SOC_WAKE_RESET, + pci_addr + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS); + } +} + +/* + * FIXME: Handle OOM properly. + */ +static inline +struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info) +{ + struct ath10k_pci_compl *compl = NULL; + + spin_lock_bh(&pipe_info->pipe_lock); + if (list_empty(&pipe_info->compl_free)) { + ath10k_warn("Completion buffers are full\n"); + goto exit; + } + compl = list_first_entry(&pipe_info->compl_free, + struct ath10k_pci_compl, list); + list_del(&compl->list); +exit: + spin_unlock_bh(&pipe_info->pipe_lock); + return compl; +} + +/* Called by lower (CE) layer when a send to Target completes. */ +static void ath10k_pci_ce_send_done(struct ce_state *ce_state, + void *transfer_context, + u32 ce_data, + unsigned int nbytes, + unsigned int transfer_id) +{ + struct ath10k *ar = ce_state->ar; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id]; + struct ath10k_pci_compl *compl; + bool process = false; + + do { + /* + * For the send completion of an item in sendlist, just + * increment num_sends_allowed. The upper layer callback will + * be triggered when last fragment is done with send. + */ + if (transfer_context == CE_SENDLIST_ITEM_CTXT) { + spin_lock_bh(&pipe_info->pipe_lock); + pipe_info->num_sends_allowed++; + spin_unlock_bh(&pipe_info->pipe_lock); + continue; + } + + compl = get_free_compl(pipe_info); + if (!compl) + break; + + compl->send_or_recv = HIF_CE_COMPLETE_SEND; + compl->ce_state = ce_state; + compl->pipe_info = pipe_info; + compl->transfer_context = transfer_context; + compl->nbytes = nbytes; + compl->transfer_id = transfer_id; + compl->flags = 0; + + /* + * Add the completion to the processing queue. + */ + spin_lock_bh(&ar_pci->compl_lock); + list_add_tail(&compl->list, &ar_pci->compl_process); + spin_unlock_bh(&ar_pci->compl_lock); + + process = true; + } while (ath10k_ce_completed_send_next(ce_state, + &transfer_context, + &ce_data, &nbytes, + &transfer_id) == 0); + + /* + * If only some of the items within a sendlist have completed, + * don't invoke completion processing until the entire sendlist + * has been sent. + */ + if (!process) + return; + + ath10k_pci_process_ce(ar); +} + +/* Called by lower (CE) layer when data is received from the Target. */ +static void ath10k_pci_ce_recv_data(struct ce_state *ce_state, + void *transfer_context, u32 ce_data, + unsigned int nbytes, + unsigned int transfer_id, + unsigned int flags) +{ + struct ath10k *ar = ce_state->ar; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id]; + struct ath10k_pci_compl *compl; + struct sk_buff *skb; + + do { + compl = get_free_compl(pipe_info); + if (!compl) + break; + + compl->send_or_recv = HIF_CE_COMPLETE_RECV; + compl->ce_state = ce_state; + compl->pipe_info = pipe_info; + compl->transfer_context = transfer_context; + compl->nbytes = nbytes; + compl->transfer_id = transfer_id; + compl->flags = flags; + + skb = transfer_context; + dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + /* + * Add the completion to the processing queue. + */ + spin_lock_bh(&ar_pci->compl_lock); + list_add_tail(&compl->list, &ar_pci->compl_process); + spin_unlock_bh(&ar_pci->compl_lock); + + } while (ath10k_ce_completed_recv_next(ce_state, + &transfer_context, + &ce_data, &nbytes, + &transfer_id, + &flags) == 0); + + ath10k_pci_process_ce(ar); +} + +/* Send the first nbytes bytes of the buffer */ +static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id, + unsigned int transfer_id, + unsigned int bytes, struct sk_buff *nbuf) +{ + struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf); + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]); + struct ce_state *ce_hdl = pipe_info->ce_hdl; + struct ce_sendlist sendlist; + unsigned int len; + u32 flags = 0; + int ret; + + memset(&sendlist, 0, sizeof(struct ce_sendlist)); + + len = min(bytes, nbuf->len); + bytes -= len; + + if (len & 3) + ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len); + + ath10k_dbg(ATH10K_DBG_PCI, + "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n", + nbuf->data, (unsigned long long) skb_cb->paddr, + nbuf->len, len); + ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, + "ath10k tx: data: ", + nbuf->data, nbuf->len); + + ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags); + + /* Make sure we have resources to handle this request */ + spin_lock_bh(&pipe_info->pipe_lock); + if (!pipe_info->num_sends_allowed) { + ath10k_warn("Pipe: %d is full\n", pipe_id); + spin_unlock_bh(&pipe_info->pipe_lock); + return -ENOSR; + } + pipe_info->num_sends_allowed--; + spin_unlock_bh(&pipe_info->pipe_lock); + + ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id); + if (ret) + ath10k_warn("CE send failed: %p\n", nbuf); + + return ret; +} + +static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]); + int ret; + + spin_lock_bh(&pipe_info->pipe_lock); + ret = pipe_info->num_sends_allowed; + spin_unlock_bh(&pipe_info->pipe_lock); + + return ret; +} + +static void ath10k_pci_hif_dump_area(struct ath10k *ar) +{ + u32 reg_dump_area = 0; + u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {}; + u32 host_addr; + int ret; + u32 i; + + ath10k_err("firmware crashed!\n"); + ath10k_err("hardware name %s version 0x%x\n", + ar->hw_params.name, ar->target_version); + ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major, + ar->fw_version_minor, ar->fw_version_release, + ar->fw_version_build); + + host_addr = host_interest_item_address(HI_ITEM(hi_failure_state)); + if (ath10k_pci_diag_read_mem(ar, host_addr, + ®_dump_area, sizeof(u32)) != 0) { + ath10k_warn("could not read hi_failure_state\n"); + return; + } + + ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area); + + ret = ath10k_pci_diag_read_mem(ar, reg_dump_area, + ®_dump_values[0], + REG_DUMP_COUNT_QCA988X * sizeof(u32)); + if (ret != 0) { + ath10k_err("could not dump FW Dump Area\n"); + return; + } + + BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4); + + ath10k_err("target Register Dump\n"); + for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4) + ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n", + i, + reg_dump_values[i], + reg_dump_values[i + 1], + reg_dump_values[i + 2], + reg_dump_values[i + 3]); +} + +static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, + int force) +{ + if (!force) { + int resources; + /* + * Decide whether to actually poll for completions, or just + * wait for a later chance. + * If there seem to be plenty of resources left, then just wait + * since checking involves reading a CE register, which is a + * relatively expensive operation. + */ + resources = ath10k_pci_hif_get_free_queue_number(ar, pipe); + + /* + * If at least 50% of the total resources are still available, + * don't bother checking again yet. + */ + if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1)) + return; + } + ath10k_ce_per_engine_service(ar, pipe); +} + +static void ath10k_pci_hif_post_init(struct ath10k *ar, + struct ath10k_hif_cb *callbacks) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); + + memcpy(&ar_pci->msg_callbacks_current, callbacks, + sizeof(ar_pci->msg_callbacks_current)); +} + +static int ath10k_pci_start_ce(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ce_state *ce_diag = ar_pci->ce_diag; + const struct ce_attr *attr; + struct hif_ce_pipe_info *pipe_info; + struct ath10k_pci_compl *compl; + int i, pipe_num, completions, disable_interrupts; + + spin_lock_init(&ar_pci->compl_lock); + INIT_LIST_HEAD(&ar_pci->compl_process); + + for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { + pipe_info = &ar_pci->pipe_info[pipe_num]; + + spin_lock_init(&pipe_info->pipe_lock); + INIT_LIST_HEAD(&pipe_info->compl_free); + + /* Handle Diagnostic CE specially */ + if (pipe_info->ce_hdl == ce_diag) + continue; + + attr = &host_ce_config_wlan[pipe_num]; + completions = 0; + + if (attr->src_nentries) { + disable_interrupts = attr->flags & CE_ATTR_DIS_INTR; + ath10k_ce_send_cb_register(pipe_info->ce_hdl, + ath10k_pci_ce_send_done, + disable_interrupts); + completions += attr->src_nentries; + pipe_info->num_sends_allowed = attr->src_nentries - 1; + } + + if (attr->dest_nentries) { + ath10k_ce_recv_cb_register(pipe_info->ce_hdl, + ath10k_pci_ce_recv_data); + completions += attr->dest_nentries; + } + + if (completions == 0) + continue; + + for (i = 0; i < completions; i++) { + compl = kmalloc(sizeof(struct ath10k_pci_compl), + GFP_KERNEL); + if (!compl) { + ath10k_warn("No memory for completion state\n"); + ath10k_pci_stop_ce(ar); + return -ENOMEM; + } + + compl->send_or_recv = HIF_CE_COMPLETE_FREE; + list_add_tail(&compl->list, &pipe_info->compl_free); + } + } + + return 0; +} + +static void ath10k_pci_stop_ce(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_pci_compl *compl; + struct sk_buff *skb; + int i; + + ath10k_ce_disable_interrupts(ar); + + /* Cancel the pending tasklet */ + tasklet_kill(&ar_pci->intr_tq); + + for (i = 0; i < CE_COUNT; i++) + tasklet_kill(&ar_pci->pipe_info[i].intr); + + /* Mark pending completions as aborted, so that upper layers free up + * their associated resources */ + spin_lock_bh(&ar_pci->compl_lock); + list_for_each_entry(compl, &ar_pci->compl_process, list) { + skb = (struct sk_buff *)compl->transfer_context; + ATH10K_SKB_CB(skb)->is_aborted = true; + } + spin_unlock_bh(&ar_pci->compl_lock); +} + +static void ath10k_pci_cleanup_ce(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_pci_compl *compl, *tmp; + struct hif_ce_pipe_info *pipe_info; + struct sk_buff *netbuf; + int pipe_num; + + /* Free pending completions. */ + spin_lock_bh(&ar_pci->compl_lock); + if (!list_empty(&ar_pci->compl_process)) + ath10k_warn("pending completions still present! possible memory leaks.\n"); + + list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) { + list_del(&compl->list); + netbuf = (struct sk_buff *)compl->transfer_context; + dev_kfree_skb_any(netbuf); + kfree(compl); + } + spin_unlock_bh(&ar_pci->compl_lock); + + /* Free unused completions for each pipe. */ + for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { + pipe_info = &ar_pci->pipe_info[pipe_num]; + + spin_lock_bh(&pipe_info->pipe_lock); + list_for_each_entry_safe(compl, tmp, + &pipe_info->compl_free, list) { + list_del(&compl->list); + kfree(compl); + } + spin_unlock_bh(&pipe_info->pipe_lock); + } +} + +static void ath10k_pci_process_ce(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ar->hif.priv; + struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current; + struct ath10k_pci_compl *compl; + struct sk_buff *skb; + unsigned int nbytes; + int ret, send_done = 0; + + /* Upper layers aren't ready to handle tx/rx completions in parallel so + * we must serialize all completion processing. */ + + spin_lock_bh(&ar_pci->compl_lock); + if (ar_pci->compl_processing) { + spin_unlock_bh(&ar_pci->compl_lock); + return; + } + ar_pci->compl_processing = true; + spin_unlock_bh(&ar_pci->compl_lock); + + for (;;) { + spin_lock_bh(&ar_pci->compl_lock); + if (list_empty(&ar_pci->compl_process)) { + spin_unlock_bh(&ar_pci->compl_lock); + break; + } + compl = list_first_entry(&ar_pci->compl_process, + struct ath10k_pci_compl, list); + list_del(&compl->list); + spin_unlock_bh(&ar_pci->compl_lock); + + if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) { + cb->tx_completion(ar, + compl->transfer_context, + compl->transfer_id); + send_done = 1; + } else { + ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1); + if (ret) { + ath10k_warn("Unable to post recv buffer for pipe: %d\n", + compl->pipe_info->pipe_num); + break; + } + + skb = (struct sk_buff *)compl->transfer_context; + nbytes = compl->nbytes; + + ath10k_dbg(ATH10K_DBG_PCI, + "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n", + skb, nbytes); + ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, + "ath10k rx: ", skb->data, nbytes); + + if (skb->len + skb_tailroom(skb) >= nbytes) { + skb_trim(skb, 0); + skb_put(skb, nbytes); + cb->rx_completion(ar, skb, + compl->pipe_info->pipe_num); + } else { + ath10k_warn("rxed more than expected (nbytes %d, max %d)", + nbytes, + skb->len + skb_tailroom(skb)); + } + } + + compl->send_or_recv = HIF_CE_COMPLETE_FREE; + + /* + * Add completion back to the pipe's free list. + */ + spin_lock_bh(&compl->pipe_info->pipe_lock); + list_add_tail(&compl->list, &compl->pipe_info->compl_free); + compl->pipe_info->num_sends_allowed += send_done; + spin_unlock_bh(&compl->pipe_info->pipe_lock); + } + + spin_lock_bh(&ar_pci->compl_lock); + ar_pci->compl_processing = false; + spin_unlock_bh(&ar_pci->compl_lock); +} + +/* TODO - temporary mapping while we have too few CE's */ +static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, + u16 service_id, u8 *ul_pipe, + u8 *dl_pipe, int *ul_is_polled, + int *dl_is_polled) +{ + int ret = 0; + + /* polling for received messages not supported */ + *dl_is_polled = 0; + + switch (service_id) { + case ATH10K_HTC_SVC_ID_HTT_DATA_MSG: + /* + * Host->target HTT gets its own pipe, so it can be polled + * while other pipes are interrupt driven. + */ + *ul_pipe = 4; + /* + * Use the same target->host pipe for HTC ctrl, HTC raw + * streams, and HTT. + */ + *dl_pipe = 1; + break; + + case ATH10K_HTC_SVC_ID_RSVD_CTRL: + case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS: + /* + * Note: HTC_RAW_STREAMS_SVC is currently unused, and + * HTC_CTRL_RSVD_SVC could share the same pipe as the + * WMI services. So, if another CE is needed, change + * this to *ul_pipe = 3, which frees up CE 0. + */ + /* *ul_pipe = 3; */ + *ul_pipe = 0; + *dl_pipe = 1; + break; + + case ATH10K_HTC_SVC_ID_WMI_DATA_BK: + case ATH10K_HTC_SVC_ID_WMI_DATA_BE: + case ATH10K_HTC_SVC_ID_WMI_DATA_VI: + case ATH10K_HTC_SVC_ID_WMI_DATA_VO: + + case ATH10K_HTC_SVC_ID_WMI_CONTROL: + *ul_pipe = 3; + *dl_pipe = 2; + break; + + /* pipe 5 unused */ + /* pipe 6 reserved */ + /* pipe 7 reserved */ + + default: + ret = -1; + break; + } + *ul_is_polled = + (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0; + + return ret; +} + +static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, + u8 *ul_pipe, u8 *dl_pipe) +{ + int ul_is_polled, dl_is_polled; + + (void)ath10k_pci_hif_map_service_to_pipe(ar, + ATH10K_HTC_SVC_ID_RSVD_CTRL, + ul_pipe, + dl_pipe, + &ul_is_polled, + &dl_is_polled); +} + +static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info, + int num) +{ + struct ath10k *ar = pipe_info->hif_ce_state; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ce_state *ce_state = pipe_info->ce_hdl; + struct sk_buff *skb; + dma_addr_t ce_data; + int i, ret = 0; + + if (pipe_info->buf_sz == 0) + return 0; + + for (i = 0; i < num; i++) { + skb = dev_alloc_skb(pipe_info->buf_sz); + if (!skb) { + ath10k_warn("could not allocate skbuff for pipe %d\n", + num); + ret = -ENOMEM; + goto err; + } + + WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); + + ce_data = dma_map_single(ar->dev, skb->data, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + + if (unlikely(dma_mapping_error(ar->dev, ce_data))) { + ath10k_warn("could not dma map skbuff\n"); + dev_kfree_skb_any(skb); + ret = -EIO; + goto err; + } + + ATH10K_SKB_CB(skb)->paddr = ce_data; + + pci_dma_sync_single_for_device(ar_pci->pdev, ce_data, + pipe_info->buf_sz, + PCI_DMA_FROMDEVICE); + + ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb, + ce_data); + if (ret) { + ath10k_warn("could not enqueue to pipe %d (%d)\n", + num, ret); + goto err; + } + } + + return ret; + +err: + ath10k_pci_rx_pipe_cleanup(pipe_info); + return ret; +} + +static int ath10k_pci_post_rx(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct hif_ce_pipe_info *pipe_info; + const struct ce_attr *attr; + int pipe_num, ret = 0; + + for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { + pipe_info = &ar_pci->pipe_info[pipe_num]; + attr = &host_ce_config_wlan[pipe_num]; + + if (attr->dest_nentries == 0) + continue; + + ret = ath10k_pci_post_rx_pipe(pipe_info, + attr->dest_nentries - 1); + if (ret) { + ath10k_warn("Unable to replenish recv buffers for pipe: %d\n", + pipe_num); + + for (; pipe_num >= 0; pipe_num--) { + pipe_info = &ar_pci->pipe_info[pipe_num]; + ath10k_pci_rx_pipe_cleanup(pipe_info); + } + return ret; + } + } + + return 0; +} + +static int ath10k_pci_hif_start(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int ret; + + ret = ath10k_pci_start_ce(ar); + if (ret) { + ath10k_warn("could not start CE (%d)\n", ret); + return ret; + } + + /* Post buffers once to start things off. */ + ret = ath10k_pci_post_rx(ar); + if (ret) { + ath10k_warn("could not post rx pipes (%d)\n", ret); + return ret; + } + + ar_pci->started = 1; + return 0; +} + +static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info) +{ + struct ath10k *ar; + struct ath10k_pci *ar_pci; + struct ce_state *ce_hdl; + u32 buf_sz; + struct sk_buff *netbuf; + u32 ce_data; + + buf_sz = pipe_info->buf_sz; + + /* Unused Copy Engine */ + if (buf_sz == 0) + return; + + ar = pipe_info->hif_ce_state; + ar_pci = ath10k_pci_priv(ar); + + if (!ar_pci->started) + return; + + ce_hdl = pipe_info->ce_hdl; + + while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf, + &ce_data) == 0) { + dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr, + netbuf->len + skb_tailroom(netbuf), + DMA_FROM_DEVICE); + dev_kfree_skb_any(netbuf); + } +} + +static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info) +{ + struct ath10k *ar; + struct ath10k_pci *ar_pci; + struct ce_state *ce_hdl; + struct sk_buff *netbuf; + u32 ce_data; + unsigned int nbytes; + unsigned int id; + u32 buf_sz; + + buf_sz = pipe_info->buf_sz; + + /* Unused Copy Engine */ + if (buf_sz == 0) + return; + + ar = pipe_info->hif_ce_state; + ar_pci = ath10k_pci_priv(ar); + + if (!ar_pci->started) + return; + + ce_hdl = pipe_info->ce_hdl; + + while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf, + &ce_data, &nbytes, &id) == 0) { + if (netbuf != CE_SENDLIST_ITEM_CTXT) + /* + * Indicate the completion to higer layer to free + * the buffer + */ + ATH10K_SKB_CB(netbuf)->is_aborted = true; + ar_pci->msg_callbacks_current.tx_completion(ar, + netbuf, + id); + } +} + +/* + * Cleanup residual buffers for device shutdown: + * buffers that were enqueued for receive + * buffers that were to be sent + * Note: Buffers that had completed but which were + * not yet processed are on a completion queue. They + * are handled when the completion thread shuts down. + */ +static void ath10k_pci_buffer_cleanup(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int pipe_num; + + for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { + struct hif_ce_pipe_info *pipe_info; + + pipe_info = &ar_pci->pipe_info[pipe_num]; + ath10k_pci_rx_pipe_cleanup(pipe_info); + ath10k_pci_tx_pipe_cleanup(pipe_info); + } +} + +static void ath10k_pci_ce_deinit(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct hif_ce_pipe_info *pipe_info; + int pipe_num; + + for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { + pipe_info = &ar_pci->pipe_info[pipe_num]; + if (pipe_info->ce_hdl) { + ath10k_ce_deinit(pipe_info->ce_hdl); + pipe_info->ce_hdl = NULL; + pipe_info->buf_sz = 0; + } + } +} + +static void ath10k_pci_hif_stop(struct ath10k *ar) +{ + ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); + + ath10k_pci_stop_ce(ar); + + /* At this point, asynchronous threads are stopped, the target should + * not DMA nor interrupt. We process the leftovers and then free + * everything else up. */ + + ath10k_pci_process_ce(ar); + ath10k_pci_cleanup_ce(ar); + ath10k_pci_buffer_cleanup(ar); + ath10k_pci_ce_deinit(ar); +} + +static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, + void *req, u32 req_len, + void *resp, u32 *resp_len) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl; + struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl; + dma_addr_t req_paddr = 0; + dma_addr_t resp_paddr = 0; + struct bmi_xfer xfer = {}; + void *treq, *tresp = NULL; + int ret = 0; + + if (resp && !resp_len) + return -EINVAL; + + if (resp && resp_len && *resp_len == 0) + return -EINVAL; + + treq = kmemdup(req, req_len, GFP_KERNEL); + if (!treq) + return -ENOMEM; + + req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE); + ret = dma_mapping_error(ar->dev, req_paddr); + if (ret) + goto err_dma; + + if (resp && resp_len) { + tresp = kzalloc(*resp_len, GFP_KERNEL); + if (!tresp) { + ret = -ENOMEM; + goto err_req; + } + + resp_paddr = dma_map_single(ar->dev, tresp, *resp_len, + DMA_FROM_DEVICE); + ret = dma_mapping_error(ar->dev, resp_paddr); + if (ret) + goto err_req; + + xfer.wait_for_resp = true; + xfer.resp_len = 0; + + ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr); + } + + init_completion(&xfer.done); + + ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0); + if (ret) + goto err_resp; + + ret = wait_for_completion_timeout(&xfer.done, + BMI_COMMUNICATION_TIMEOUT_HZ); + if (ret <= 0) { + u32 unused_buffer; + unsigned int unused_nbytes; + unsigned int unused_id; + + ret = -ETIMEDOUT; + ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer, + &unused_nbytes, &unused_id); + } else { + /* non-zero means we did not time out */ + ret = 0; + } + +err_resp: + if (resp) { + u32 unused_buffer; + + ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer); + dma_unmap_single(ar->dev, resp_paddr, + *resp_len, DMA_FROM_DEVICE); + } +err_req: + dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE); + + if (ret == 0 && resp_len) { + *resp_len = min(*resp_len, xfer.resp_len); + memcpy(resp, tresp, xfer.resp_len); + } +err_dma: + kfree(treq); + kfree(tresp); + + return ret; +} + +static void ath10k_pci_bmi_send_done(struct ce_state *ce_state, + void *transfer_context, + u32 data, + unsigned int nbytes, + unsigned int transfer_id) +{ + struct bmi_xfer *xfer = transfer_context; + + if (xfer->wait_for_resp) + return; + + complete(&xfer->done); +} + +static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state, + void *transfer_context, + u32 data, + unsigned int nbytes, + unsigned int transfer_id, + unsigned int flags) +{ + struct bmi_xfer *xfer = transfer_context; + + if (!xfer->wait_for_resp) { + ath10k_warn("unexpected: BMI data received; ignoring\n"); + return; + } + + xfer->resp_len = nbytes; + complete(&xfer->done); +} + +/* + * Map from service/endpoint to Copy Engine. + * This table is derived from the CE_PCI TABLE, above. + * It is passed to the Target at startup for use by firmware. + */ +static const struct service_to_pipe target_service_to_ce_map_wlan[] = { + { + ATH10K_HTC_SVC_ID_WMI_DATA_VO, + PIPEDIR_OUT, /* out = UL = host -> target */ + 3, + }, + { + ATH10K_HTC_SVC_ID_WMI_DATA_VO, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + ATH10K_HTC_SVC_ID_WMI_DATA_BK, + PIPEDIR_OUT, /* out = UL = host -> target */ + 3, + }, + { + ATH10K_HTC_SVC_ID_WMI_DATA_BK, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + ATH10K_HTC_SVC_ID_WMI_DATA_BE, + PIPEDIR_OUT, /* out = UL = host -> target */ + 3, + }, + { + ATH10K_HTC_SVC_ID_WMI_DATA_BE, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + ATH10K_HTC_SVC_ID_WMI_DATA_VI, + PIPEDIR_OUT, /* out = UL = host -> target */ + 3, + }, + { + ATH10K_HTC_SVC_ID_WMI_DATA_VI, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + ATH10K_HTC_SVC_ID_WMI_CONTROL, + PIPEDIR_OUT, /* out = UL = host -> target */ + 3, + }, + { + ATH10K_HTC_SVC_ID_WMI_CONTROL, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + ATH10K_HTC_SVC_ID_RSVD_CTRL, + PIPEDIR_OUT, /* out = UL = host -> target */ + 0, /* could be moved to 3 (share with WMI) */ + }, + { + ATH10K_HTC_SVC_ID_RSVD_CTRL, + PIPEDIR_IN, /* in = DL = target -> host */ + 1, + }, + { + ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */ + PIPEDIR_OUT, /* out = UL = host -> target */ + 0, + }, + { + ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */ + PIPEDIR_IN, /* in = DL = target -> host */ + 1, + }, + { + ATH10K_HTC_SVC_ID_HTT_DATA_MSG, + PIPEDIR_OUT, /* out = UL = host -> target */ + 4, + }, + { + ATH10K_HTC_SVC_ID_HTT_DATA_MSG, + PIPEDIR_IN, /* in = DL = target -> host */ + 1, + }, + + /* (Additions here) */ + + { /* Must be last */ + 0, + 0, + 0, + }, +}; + +/* + * Send an interrupt to the device to wake up the Target CPU + * so it has an opportunity to notice any changed state. + */ +static int ath10k_pci_wake_target_cpu(struct ath10k *ar) +{ + int ret; + u32 core_ctrl; + + ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS | + CORE_CTRL_ADDRESS, + &core_ctrl); + if (ret) { + ath10k_warn("Unable to read core ctrl\n"); + return ret; + } + + /* A_INUM_FIRMWARE interrupt to Target CPU */ + core_ctrl |= CORE_CTRL_CPU_INTR_MASK; + + ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS | + CORE_CTRL_ADDRESS, + core_ctrl); + if (ret) + ath10k_warn("Unable to set interrupt mask\n"); + + return ret; +} + +static int ath10k_pci_init_config(struct ath10k *ar) +{ + u32 interconnect_targ_addr; + u32 pcie_state_targ_addr = 0; + u32 pipe_cfg_targ_addr = 0; + u32 svc_to_pipe_map = 0; + u32 pcie_config_flags = 0; + u32 ealloc_value; + u32 ealloc_targ_addr; + u32 flag2_value; + u32 flag2_targ_addr; + int ret = 0; + + /* Download to Target the CE Config and the service-to-CE map */ + interconnect_targ_addr = + host_interest_item_address(HI_ITEM(hi_interconnect_state)); + + /* Supply Target-side CE configuration */ + ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr, + &pcie_state_targ_addr); + if (ret != 0) { + ath10k_err("Failed to get pcie state addr: %d\n", ret); + return ret; + } + + if (pcie_state_targ_addr == 0) { + ret = -EIO; + ath10k_err("Invalid pcie state addr\n"); + return ret; + } + + ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr + + offsetof(struct pcie_state, + pipe_cfg_addr), + &pipe_cfg_targ_addr); + if (ret != 0) { + ath10k_err("Failed to get pipe cfg addr: %d\n", ret); + return ret; + } + + if (pipe_cfg_targ_addr == 0) { + ret = -EIO; + ath10k_err("Invalid pipe cfg addr\n"); + return ret; + } + + ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr, + target_ce_config_wlan, + sizeof(target_ce_config_wlan)); + + if (ret != 0) { + ath10k_err("Failed to write pipe cfg: %d\n", ret); + return ret; + } + + ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr + + offsetof(struct pcie_state, + svc_to_pipe_map), + &svc_to_pipe_map); + if (ret != 0) { + ath10k_err("Failed to get svc/pipe map: %d\n", ret); + return ret; + } + + if (svc_to_pipe_map == 0) { + ret = -EIO; + ath10k_err("Invalid svc_to_pipe map\n"); + return ret; + } + + ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map, + target_service_to_ce_map_wlan, + sizeof(target_service_to_ce_map_wlan)); + if (ret != 0) { + ath10k_err("Failed to write svc/pipe map: %d\n", ret); + return ret; + } + + ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr + + offsetof(struct pcie_state, + config_flags), + &pcie_config_flags); + if (ret != 0) { + ath10k_err("Failed to get pcie config_flags: %d\n", ret); + return ret; + } + + pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1; + + ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr + + offsetof(struct pcie_state, config_flags), + &pcie_config_flags, + sizeof(pcie_config_flags)); + if (ret != 0) { + ath10k_err("Failed to write pcie config_flags: %d\n", ret); + return ret; + } + + /* configure early allocation */ + ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc)); + + ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value); + if (ret != 0) { + ath10k_err("Faile to get early alloc val: %d\n", ret); + return ret; + } + + /* first bank is switched to IRAM */ + ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & + HI_EARLY_ALLOC_MAGIC_MASK); + ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) & + HI_EARLY_ALLOC_IRAM_BANKS_MASK); + + ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value); + if (ret != 0) { + ath10k_err("Failed to set early alloc val: %d\n", ret); + return ret; + } + + /* Tell Target to proceed with initialization */ + flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2)); + + ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value); + if (ret != 0) { + ath10k_err("Failed to get option val: %d\n", ret); + return ret; + } + + flag2_value |= HI_OPTION_EARLY_CFG_DONE; + + ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value); + if (ret != 0) { + ath10k_err("Failed to set option val: %d\n", ret); + return ret; + } + + return 0; +} + + + +static int ath10k_pci_ce_init(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct hif_ce_pipe_info *pipe_info; + const struct ce_attr *attr; + int pipe_num; + + for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { + pipe_info = &ar_pci->pipe_info[pipe_num]; + pipe_info->pipe_num = pipe_num; + pipe_info->hif_ce_state = ar; + attr = &host_ce_config_wlan[pipe_num]; + + pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr); + if (pipe_info->ce_hdl == NULL) { + ath10k_err("Unable to initialize CE for pipe: %d\n", + pipe_num); + + /* It is safe to call it here. It checks if ce_hdl is + * valid for each pipe */ + ath10k_pci_ce_deinit(ar); + return -1; + } + + if (pipe_num == ar_pci->ce_count - 1) { + /* + * Reserve the ultimate CE for + * diagnostic Window support + */ + ar_pci->ce_diag = + ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl; + continue; + } + + pipe_info->buf_sz = (size_t) (attr->src_sz_max); + } + + /* + * Initially, establish CE completion handlers for use with BMI. + * These are overwritten with generic handlers after we exit BMI phase. + */ + pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; + ath10k_ce_send_cb_register(pipe_info->ce_hdl, + ath10k_pci_bmi_send_done, 0); + + pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST]; + ath10k_ce_recv_cb_register(pipe_info->ce_hdl, + ath10k_pci_bmi_recv_data); + + return 0; +} + +static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + u32 fw_indicator_address, fw_indicator; + + ath10k_pci_wake(ar); + + fw_indicator_address = ar_pci->fw_indicator_address; + fw_indicator = ath10k_pci_read32(ar, fw_indicator_address); + + if (fw_indicator & FW_IND_EVENT_PENDING) { + /* ACK: clear Target-side pending event */ + ath10k_pci_write32(ar, fw_indicator_address, + fw_indicator & ~FW_IND_EVENT_PENDING); + + if (ar_pci->started) { + ath10k_pci_hif_dump_area(ar); + } else { + /* + * Probable Target failure before we're prepared + * to handle it. Generally unexpected. + */ + ath10k_warn("early firmware event indicated\n"); + } + } + + ath10k_pci_sleep(ar); +} + +static const struct ath10k_hif_ops ath10k_pci_hif_ops = { + .send_head = ath10k_pci_hif_send_head, + .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, + .start = ath10k_pci_hif_start, + .stop = ath10k_pci_hif_stop, + .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe, + .get_default_pipe = ath10k_pci_hif_get_default_pipe, + .send_complete_check = ath10k_pci_hif_send_complete_check, + .init = ath10k_pci_hif_post_init, + .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, +}; + +static void ath10k_pci_ce_tasklet(unsigned long ptr) +{ + struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr; + struct ath10k_pci *ar_pci = pipe->ar_pci; + + ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num); +} + +static void ath10k_msi_err_tasklet(unsigned long data) +{ + struct ath10k *ar = (struct ath10k *)data; + + ath10k_pci_fw_interrupt_handler(ar); +} + +/* + * Handler for a per-engine interrupt on a PARTICULAR CE. + * This is used in cases where each CE has a private MSI interrupt. + */ +static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg) +{ + struct ath10k *ar = arg; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL; + + if (ce_id < 0 || ce_id > ARRAY_SIZE(ar_pci->pipe_info)) { + ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id); + return IRQ_HANDLED; + } + + /* + * NOTE: We are able to derive ce_id from irq because we + * use a one-to-one mapping for CE's 0..5. + * CE's 6 & 7 do not use interrupts at all. + * + * This mapping must be kept in sync with the mapping + * used by firmware. + */ + tasklet_schedule(&ar_pci->pipe_info[ce_id].intr); + return IRQ_HANDLED; +} + +static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg) +{ + struct ath10k *ar = arg; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + tasklet_schedule(&ar_pci->msi_fw_err); + return IRQ_HANDLED; +} + +/* + * Top-level interrupt handler for all PCI interrupts from a Target. + * When a block of MSI interrupts is allocated, this top-level handler + * is not used; instead, we directly call the correct sub-handler. + */ +static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) +{ + struct ath10k *ar = arg; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + if (ar_pci->num_msi_intrs == 0) { + /* + * IMPORTANT: INTR_CLR regiser has to be set after + * INTR_ENABLE is set to 0, otherwise interrupt can not be + * really cleared. + */ + iowrite32(0, ar_pci->mem + + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS)); + iowrite32(PCIE_INTR_FIRMWARE_MASK | + PCIE_INTR_CE_MASK_ALL, + ar_pci->mem + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_CLR_ADDRESS)); + /* + * IMPORTANT: this extra read transaction is required to + * flush the posted write buffer. + */ + (void) ioread32(ar_pci->mem + + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS)); + } + + tasklet_schedule(&ar_pci->intr_tq); + + return IRQ_HANDLED; +} + +static void ath10k_pci_tasklet(unsigned long data) +{ + struct ath10k *ar = (struct ath10k *)data; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */ + ath10k_ce_per_engine_service_any(ar); + + if (ar_pci->num_msi_intrs == 0) { + /* Enable Legacy PCI line interrupts */ + iowrite32(PCIE_INTR_FIRMWARE_MASK | + PCIE_INTR_CE_MASK_ALL, + ar_pci->mem + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS)); + /* + * IMPORTANT: this extra read transaction is required to + * flush the posted write buffer + */ + (void) ioread32(ar_pci->mem + + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS)); + } +} + +static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int ret; + int i; + + ret = pci_enable_msi_block(ar_pci->pdev, num); + if (ret) + return ret; + + ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, + ath10k_pci_msi_fw_handler, + IRQF_SHARED, "ath10k_pci", ar); + if (ret) + return ret; + + for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) { + ret = request_irq(ar_pci->pdev->irq + i, + ath10k_pci_per_engine_handler, + IRQF_SHARED, "ath10k_pci", ar); + if (ret) { + ath10k_warn("request_irq(%d) failed %d\n", + ar_pci->pdev->irq + i, ret); + + for (; i >= MSI_ASSIGN_CE_INITIAL; i--) + free_irq(ar_pci->pdev->irq, ar); + + pci_disable_msi(ar_pci->pdev); + return ret; + } + } + + ath10k_info("MSI-X interrupt handling (%d intrs)\n", num); + return 0; +} + +static int ath10k_pci_start_intr_msi(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int ret; + + ret = pci_enable_msi(ar_pci->pdev); + if (ret < 0) + return ret; + + ret = request_irq(ar_pci->pdev->irq, + ath10k_pci_interrupt_handler, + IRQF_SHARED, "ath10k_pci", ar); + if (ret < 0) { + pci_disable_msi(ar_pci->pdev); + return ret; + } + + ath10k_info("MSI interrupt handling\n"); + return 0; +} + +static int ath10k_pci_start_intr_legacy(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int ret; + + ret = request_irq(ar_pci->pdev->irq, + ath10k_pci_interrupt_handler, + IRQF_SHARED, "ath10k_pci", ar); + if (ret < 0) + return ret; + + /* + * Make sure to wake the Target before enabling Legacy + * Interrupt. + */ + iowrite32(PCIE_SOC_WAKE_V_MASK, + ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS); + + ath10k_pci_wait(ar); + + /* + * A potential race occurs here: The CORE_BASE write + * depends on target correctly decoding AXI address but + * host won't know when target writes BAR to CORE_CTRL. + * This write might get lost if target has NOT written BAR. + * For now, fix the race by repeating the write in below + * synchronization checking. + */ + iowrite32(PCIE_INTR_FIRMWARE_MASK | + PCIE_INTR_CE_MASK_ALL, + ar_pci->mem + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS)); + iowrite32(PCIE_SOC_WAKE_RESET, + ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS); + + ath10k_info("legacy interrupt handling\n"); + return 0; +} + +static int ath10k_pci_start_intr(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int num = MSI_NUM_REQUEST; + int ret; + int i; + + tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar); + tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet, + (unsigned long) ar); + + for (i = 0; i < CE_COUNT; i++) { + ar_pci->pipe_info[i].ar_pci = ar_pci; + tasklet_init(&ar_pci->pipe_info[i].intr, + ath10k_pci_ce_tasklet, + (unsigned long)&ar_pci->pipe_info[i]); + } + + if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features)) + num = 1; + + if (num > 1) { + ret = ath10k_pci_start_intr_msix(ar, num); + if (ret == 0) + goto exit; + + ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret); + num = 1; + } + + if (num == 1) { + ret = ath10k_pci_start_intr_msi(ar); + if (ret == 0) + goto exit; + + ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n", + ret); + num = 0; + } + + ret = ath10k_pci_start_intr_legacy(ar); + +exit: + ar_pci->num_msi_intrs = num; + ar_pci->ce_count = CE_COUNT; + return ret; +} + +static void ath10k_pci_stop_intr(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int i; + + /* There's at least one interrupt irregardless whether its legacy INTR + * or MSI or MSI-X */ + for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) + free_irq(ar_pci->pdev->irq + i, ar); + + if (ar_pci->num_msi_intrs > 0) + pci_disable_msi(ar_pci->pdev); +} + +static int ath10k_pci_reset_target(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int wait_limit = 300; /* 3 sec */ + + /* Wait for Target to finish initialization before we proceed. */ + iowrite32(PCIE_SOC_WAKE_V_MASK, + ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS); + + ath10k_pci_wait(ar); + + while (wait_limit-- && + !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) & + FW_IND_INITIALIZED)) { + if (ar_pci->num_msi_intrs == 0) + /* Fix potential race by repeating CORE_BASE writes */ + iowrite32(PCIE_INTR_FIRMWARE_MASK | + PCIE_INTR_CE_MASK_ALL, + ar_pci->mem + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS)); + mdelay(10); + } + + if (wait_limit < 0) { + ath10k_err("Target stalled\n"); + iowrite32(PCIE_SOC_WAKE_RESET, + ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS); + return -EIO; + } + + iowrite32(PCIE_SOC_WAKE_RESET, + ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS); + + return 0; +} + +static void ath10k_pci_device_reset(struct ath10k_pci *ar_pci) +{ + struct ath10k *ar = ar_pci->ar; + void __iomem *mem = ar_pci->mem; + int i; + u32 val; + + if (!SOC_GLOBAL_RESET_ADDRESS) + return; + + if (!mem) + return; + + ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, + PCIE_SOC_WAKE_V_MASK); + for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { + if (ath10k_pci_target_is_awake(ar)) + break; + msleep(1); + } + + /* Put Target, including PCIe, into RESET. */ + val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS); + val |= 1; + ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val); + + for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { + if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) & + RTC_STATE_COLD_RESET_MASK) + break; + msleep(1); + } + + /* Pull Target, including PCIe, out of RESET. */ + val &= ~1; + ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val); + + for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { + if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) & + RTC_STATE_COLD_RESET_MASK)) + break; + msleep(1); + } + + ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); +} + +static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci) +{ + int i; + + for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) { + if (!test_bit(i, ar_pci->features)) + continue; + + switch (i) { + case ATH10K_PCI_FEATURE_MSI_X: + ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n"); + break; + case ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND: + ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n"); + break; + } + } +} + +static int ath10k_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_dev) +{ + void __iomem *mem; + int ret = 0; + struct ath10k *ar; + struct ath10k_pci *ar_pci; + u32 lcr_val; + + ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); + + ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL); + if (ar_pci == NULL) + return -ENOMEM; + + ar_pci->pdev = pdev; + ar_pci->dev = &pdev->dev; + + switch (pci_dev->device) { + case QCA988X_1_0_DEVICE_ID: + set_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features); + break; + case QCA988X_2_0_DEVICE_ID: + set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features); + break; + default: + ret = -ENODEV; + ath10k_err("Unkown device ID: %d\n", pci_dev->device); + goto err_ar_pci; + } + + ath10k_pci_dump_features(ar_pci); + + ar = ath10k_core_create(ar_pci, ar_pci->dev, ATH10K_BUS_PCI, + &ath10k_pci_hif_ops); + if (!ar) { + ath10k_err("ath10k_core_create failed!\n"); + ret = -EINVAL; + goto err_ar_pci; + } + + /* Enable QCA988X_1.0 HW workarounds */ + if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) + spin_lock_init(&ar_pci->hw_v1_workaround_lock); + + ar_pci->ar = ar; + ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS; + atomic_set(&ar_pci->keep_awake_count, 0); + + pci_set_drvdata(pdev, ar); + + /* + * Without any knowledge of the Host, the Target may have been reset or + * power cycled and its Config Space may no longer reflect the PCI + * address space that was assigned earlier by the PCI infrastructure. + * Refresh it now. + */ + ret = pci_assign_resource(pdev, BAR_NUM); + if (ret) { + ath10k_err("cannot assign PCI space: %d\n", ret); + goto err_ar; + } + + ret = pci_enable_device(pdev); + if (ret) { + ath10k_err("cannot enable PCI device: %d\n", ret); + goto err_ar; + } + + /* Request MMIO resources */ + ret = pci_request_region(pdev, BAR_NUM, "ath"); + if (ret) { + ath10k_err("PCI MMIO reservation error: %d\n", ret); + goto err_device; + } + + /* + * Target structures have a limit of 32 bit DMA pointers. + * DMA pointers can be wider than 32 bits by default on some systems. + */ + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret) { + ath10k_err("32-bit DMA not available: %d\n", ret); + goto err_region; + } + + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret) { + ath10k_err("cannot enable 32-bit consistent DMA\n"); + goto err_region; + } + + /* Set bus master bit in PCI_COMMAND to enable DMA */ + pci_set_master(pdev); + + /* + * Temporary FIX: disable ASPM + * Will be removed after the OTP is programmed + */ + pci_read_config_dword(pdev, 0x80, &lcr_val); + pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00)); + + /* Arrange for access to Target SoC registers. */ + mem = pci_iomap(pdev, BAR_NUM, 0); + if (!mem) { + ath10k_err("PCI iomap error\n"); + ret = -EIO; + goto err_master; + } + + ar_pci->mem = mem; + + spin_lock_init(&ar_pci->ce_lock); + + ar_pci->cacheline_sz = dma_get_cache_alignment(); + + ret = ath10k_pci_start_intr(ar); + if (ret) { + ath10k_err("could not start interrupt handling (%d)\n", ret); + goto err_iomap; + } + + /* + * Bring the target up cleanly. + * + * The target may be in an undefined state with an AUX-powered Target + * and a Host in WoW mode. If the Host crashes, loses power, or is + * restarted (without unloading the driver) then the Target is left + * (aux) powered and running. On a subsequent driver load, the Target + * is in an unexpected state. We try to catch that here in order to + * reset the Target and retry the probe. + */ + ath10k_pci_device_reset(ar_pci); + + ret = ath10k_pci_reset_target(ar); + if (ret) + goto err_intr; + + if (ath10k_target_ps) { + ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save enabled\n"); + } else { + /* Force AWAKE forever */ + ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save disabled\n"); + ath10k_do_pci_wake(ar); + } + + ret = ath10k_pci_ce_init(ar); + if (ret) + goto err_intr; + + ret = ath10k_pci_init_config(ar); + if (ret) + goto err_ce; + + ret = ath10k_pci_wake_target_cpu(ar); + if (ret) { + ath10k_err("could not wake up target CPU (%d)\n", ret); + goto err_ce; + } + + ret = ath10k_core_register(ar); + if (ret) { + ath10k_err("could not register driver core (%d)\n", ret); + goto err_ce; + } + + return 0; + +err_ce: + ath10k_pci_ce_deinit(ar); +err_intr: + ath10k_pci_stop_intr(ar); +err_iomap: + pci_iounmap(pdev, mem); +err_master: + pci_clear_master(pdev); +err_region: + pci_release_region(pdev, BAR_NUM); +err_device: + pci_disable_device(pdev); +err_ar: + pci_set_drvdata(pdev, NULL); + ath10k_core_destroy(ar); +err_ar_pci: + /* call HIF PCI free here */ + kfree(ar_pci); + + return ret; +} + +static void ath10k_pci_remove(struct pci_dev *pdev) +{ + struct ath10k *ar = pci_get_drvdata(pdev); + struct ath10k_pci *ar_pci; + + ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); + + if (!ar) + return; + + ar_pci = ath10k_pci_priv(ar); + + if (!ar_pci) + return; + + tasklet_kill(&ar_pci->msi_fw_err); + + ath10k_core_unregister(ar); + ath10k_pci_stop_intr(ar); + + pci_set_drvdata(pdev, NULL); + pci_iounmap(pdev, ar_pci->mem); + pci_release_region(pdev, BAR_NUM); + pci_clear_master(pdev); + pci_disable_device(pdev); + + ath10k_core_destroy(ar); + kfree(ar_pci); +} + +#if defined(CONFIG_PM_SLEEP) + +#define ATH10K_PCI_PM_CONTROL 0x44 + +static int ath10k_pci_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct ath10k *ar = pci_get_drvdata(pdev); + struct ath10k_pci *ar_pci; + u32 val; + int ret, retval; + + ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); + + if (!ar) + return -ENODEV; + + ar_pci = ath10k_pci_priv(ar); + if (!ar_pci) + return -ENODEV; + + if (ath10k_core_target_suspend(ar)) + return -EBUSY; + + ret = wait_event_interruptible_timeout(ar->event_queue, + ar->is_target_paused == true, + 1 * HZ); + if (ret < 0) { + ath10k_warn("suspend interrupted (%d)\n", ret); + retval = ret; + goto resume; + } else if (ret == 0) { + ath10k_warn("suspend timed out - target pause event never came\n"); + retval = EIO; + goto resume; + } + + /* + * reset is_target_paused and host can check that in next time, + * or it will always be TRUE and host just skip the waiting + * condition, it causes target assert due to host already + * suspend + */ + ar->is_target_paused = false; + + pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val); + + if ((val & 0x000000ff) != 0x3) { + pci_save_state(pdev); + pci_disable_device(pdev); + pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL, + (val & 0xffffff00) | 0x03); + } + + return 0; +resume: + ret = ath10k_core_target_resume(ar); + if (ret) + ath10k_warn("could not resume (%d)\n", ret); + + return retval; +} + +static int ath10k_pci_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct ath10k *ar = pci_get_drvdata(pdev); + struct ath10k_pci *ar_pci; + int ret; + u32 val; + + ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); + + if (!ar) + return -ENODEV; + ar_pci = ath10k_pci_priv(ar); + + if (!ar_pci) + return -ENODEV; + + ret = pci_enable_device(pdev); + if (ret) { + ath10k_warn("cannot enable PCI device: %d\n", ret); + return ret; + } + + pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val); + + if ((val & 0x000000ff) != 0) { + pci_restore_state(pdev); + pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL, + val & 0xffffff00); + /* + * Suspend/Resume resets the PCI configuration space, + * so we have to re-disable the RETRY_TIMEOUT register (0x41) + * to keep PCI Tx retries from interfering with C3 CPU state + */ + pci_read_config_dword(pdev, 0x40, &val); + + if ((val & 0x0000ff00) != 0) + pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); + } + + ret = ath10k_core_target_resume(ar); + if (ret) + ath10k_warn("target resume failed: %d\n", ret); + + return ret; +} + +static SIMPLE_DEV_PM_OPS(ath10k_dev_pm_ops, + ath10k_pci_suspend, + ath10k_pci_resume); + +#define ATH10K_PCI_PM_OPS (&ath10k_dev_pm_ops) + +#else + +#define ATH10K_PCI_PM_OPS NULL + +#endif /* CONFIG_PM_SLEEP */ + +MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); + +static struct pci_driver ath10k_pci_driver = { + .name = "ath10k_pci", + .id_table = ath10k_pci_id_table, + .probe = ath10k_pci_probe, + .remove = ath10k_pci_remove, + .driver.pm = ATH10K_PCI_PM_OPS, +}; + +static int __init ath10k_pci_init(void) +{ + int ret; + + ret = pci_register_driver(&ath10k_pci_driver); + if (ret) + ath10k_err("pci_register_driver failed [%d]\n", ret); + + return ret; +} +module_init(ath10k_pci_init); + +static void __exit ath10k_pci_exit(void) +{ + pci_unregister_driver(&ath10k_pci_driver); +} + +module_exit(ath10k_pci_exit); + +MODULE_AUTHOR("Qualcomm Atheros"); +MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE); +MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE); +MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE); +MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE); +MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE); +MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h new file mode 100644 index 000000000000..d2a055a07dc6 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -0,0 +1,355 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _PCI_H_ +#define _PCI_H_ + +#include <linux/interrupt.h> + +#include "hw.h" +#include "ce.h" + +/* FW dump area */ +#define REG_DUMP_COUNT_QCA988X 60 + +/* + * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite + */ +#define DIAG_TRANSFER_LIMIT 2048 + +/* + * maximum number of bytes that can be + * handled atomically by DiagRead/DiagWrite + */ +#define DIAG_TRANSFER_LIMIT 2048 + +struct bmi_xfer { + struct completion done; + bool wait_for_resp; + u32 resp_len; +}; + +struct ath10k_pci_compl { + struct list_head list; + int send_or_recv; + struct ce_state *ce_state; + struct hif_ce_pipe_info *pipe_info; + void *transfer_context; + unsigned int nbytes; + unsigned int transfer_id; + unsigned int flags; +}; + +/* compl_state.send_or_recv */ +#define HIF_CE_COMPLETE_FREE 0 +#define HIF_CE_COMPLETE_SEND 1 +#define HIF_CE_COMPLETE_RECV 2 + +/* + * PCI-specific Target state + * + * NOTE: Structure is shared between Host software and Target firmware! + * + * Much of this may be of interest to the Host so + * HOST_INTEREST->hi_interconnect_state points here + * (and all members are 32-bit quantities in order to + * facilitate Host access). In particular, Host software is + * required to initialize pipe_cfg_addr and svc_to_pipe_map. + */ +struct pcie_state { + /* Pipe configuration Target address */ + /* NB: ce_pipe_config[CE_COUNT] */ + u32 pipe_cfg_addr; + + /* Service to pipe map Target address */ + /* NB: service_to_pipe[PIPE_TO_CE_MAP_CN] */ + u32 svc_to_pipe_map; + + /* number of MSI interrupts requested */ + u32 msi_requested; + + /* number of MSI interrupts granted */ + u32 msi_granted; + + /* Message Signalled Interrupt address */ + u32 msi_addr; + + /* Base data */ + u32 msi_data; + + /* + * Data for firmware interrupt; + * MSI data for other interrupts are + * in various SoC registers + */ + u32 msi_fw_intr_data; + + /* PCIE_PWR_METHOD_* */ + u32 power_mgmt_method; + + /* PCIE_CONFIG_FLAG_* */ + u32 config_flags; +}; + +/* PCIE_CONFIG_FLAG definitions */ +#define PCIE_CONFIG_FLAG_ENABLE_L1 0x0000001 + +/* Host software's Copy Engine configuration. */ +#define CE_ATTR_FLAGS 0 + +/* + * Configuration information for a Copy Engine pipe. + * Passed from Host to Target during startup (one per CE). + * + * NOTE: Structure is shared between Host software and Target firmware! + */ +struct ce_pipe_config { + u32 pipenum; + u32 pipedir; + u32 nentries; + u32 nbytes_max; + u32 flags; + u32 reserved; +}; + +/* + * Directions for interconnect pipe configuration. + * These definitions may be used during configuration and are shared + * between Host and Target. + * + * Pipe Directions are relative to the Host, so PIPEDIR_IN means + * "coming IN over air through Target to Host" as with a WiFi Rx operation. + * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air" + * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man" + * Target since things that are "PIPEDIR_OUT" are coming IN to the Target + * over the interconnect. + */ +#define PIPEDIR_NONE 0 +#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */ +#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */ +#define PIPEDIR_INOUT 3 /* bidirectional */ + +/* Establish a mapping between a service/direction and a pipe. */ +struct service_to_pipe { + u32 service_id; + u32 pipedir; + u32 pipenum; +}; + +enum ath10k_pci_features { + ATH10K_PCI_FEATURE_MSI_X = 0, + ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND = 1, + + /* keep last */ + ATH10K_PCI_FEATURE_COUNT +}; + +/* Per-pipe state. */ +struct hif_ce_pipe_info { + /* Handle of underlying Copy Engine */ + struct ce_state *ce_hdl; + + /* Our pipe number; facilitiates use of pipe_info ptrs. */ + u8 pipe_num; + + /* Convenience back pointer to hif_ce_state. */ + struct ath10k *hif_ce_state; + + size_t buf_sz; + + /* protects compl_free and num_send_allowed */ + spinlock_t pipe_lock; + + /* List of free CE completion slots */ + struct list_head compl_free; + + /* Limit the number of outstanding send requests. */ + int num_sends_allowed; + + struct ath10k_pci *ar_pci; + struct tasklet_struct intr; +}; + +struct ath10k_pci { + struct pci_dev *pdev; + struct device *dev; + struct ath10k *ar; + void __iomem *mem; + int cacheline_sz; + + DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT); + + /* + * Number of MSI interrupts granted, 0 --> using legacy PCI line + * interrupts. + */ + int num_msi_intrs; + + struct tasklet_struct intr_tq; + struct tasklet_struct msi_fw_err; + + /* Number of Copy Engines supported */ + unsigned int ce_count; + + int started; + + atomic_t keep_awake_count; + bool verified_awake; + + /* List of CE completions to be processed */ + struct list_head compl_process; + + /* protects compl_processing and compl_process */ + spinlock_t compl_lock; + + bool compl_processing; + + struct hif_ce_pipe_info pipe_info[CE_COUNT_MAX]; + + struct ath10k_hif_cb msg_callbacks_current; + + /* Target address used to signal a pending firmware event */ + u32 fw_indicator_address; + + /* Copy Engine used for Diagnostic Accesses */ + struct ce_state *ce_diag; + + /* FIXME: document what this really protects */ + spinlock_t ce_lock; + + /* Map CE id to ce_state */ + struct ce_state *ce_id_to_state[CE_COUNT_MAX]; + + /* makes sure that dummy reads are atomic */ + spinlock_t hw_v1_workaround_lock; +}; + +static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) +{ + return ar->hif.priv; +} + +static inline u32 ath10k_pci_reg_read32(void __iomem *mem, u32 addr) +{ + return ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + addr); +} + +static inline void ath10k_pci_reg_write32(void __iomem *mem, u32 addr, u32 val) +{ + iowrite32(val, mem + PCIE_LOCAL_BASE_ADDRESS + addr); +} + +#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */ +#define PCIE_WAKE_TIMEOUT 5000 /* 5ms */ + +#define BAR_NUM 0 + +#define CDC_WAR_MAGIC_STR 0xceef0000 +#define CDC_WAR_DATA_CE 4 + +/* + * TODO: Should be a function call specific to each Target-type. + * This convoluted macro converts from Target CPU Virtual Address Space to CE + * Address Space. As part of this process, we conservatively fetch the current + * PCIE_BAR. MOST of the time, this should match the upper bits of PCI space + * for this device; but that's not guaranteed. + */ +#define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \ + (((ioread32((pci_addr)+(SOC_CORE_BASE_ADDRESS| \ + CORE_CTRL_ADDRESS)) & 0x7ff) << 21) | \ + 0x100000 | ((addr) & 0xfffff)) + +/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */ +#define DIAG_ACCESS_CE_TIMEOUT_MS 10 + +/* + * This API allows the Host to access Target registers directly + * and relatively efficiently over PCIe. + * This allows the Host to avoid extra overhead associated with + * sending a message to firmware and waiting for a response message + * from firmware, as is done on other interconnects. + * + * Yet there is some complexity with direct accesses because the + * Target's power state is not known a priori. The Host must issue + * special PCIe reads/writes in order to explicitly wake the Target + * and to verify that it is awake and will remain awake. + * + * Usage: + * + * Use ath10k_pci_read32 and ath10k_pci_write32 to access Target space. + * These calls must be bracketed by ath10k_pci_wake and + * ath10k_pci_sleep. A single BEGIN/END pair is adequate for + * multiple READ/WRITE operations. + * + * Use ath10k_pci_wake to put the Target in a state in + * which it is legal for the Host to directly access it. This + * may involve waking the Target from a low power state, which + * may take up to 2Ms! + * + * Use ath10k_pci_sleep to tell the Target that as far as + * this code path is concerned, it no longer needs to remain + * directly accessible. BEGIN/END is under a reference counter; + * multiple code paths may issue BEGIN/END on a single targid. + */ +static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, + u32 value) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + void __iomem *addr = ar_pci->mem; + + if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) { + unsigned long irq_flags; + + spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags); + + ioread32(addr+offset+4); /* 3rd read prior to write */ + ioread32(addr+offset+4); /* 2nd read prior to write */ + ioread32(addr+offset+4); /* 1st read prior to write */ + iowrite32(value, addr+offset); + + spin_unlock_irqrestore(&ar_pci->hw_v1_workaround_lock, + irq_flags); + } else { + iowrite32(value, addr+offset); + } +} + +static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + return ioread32(ar_pci->mem + offset); +} + +extern unsigned int ath10k_target_ps; + +void ath10k_do_pci_wake(struct ath10k *ar); +void ath10k_do_pci_sleep(struct ath10k *ar); + +static inline void ath10k_pci_wake(struct ath10k *ar) +{ + if (ath10k_target_ps) + ath10k_do_pci_wake(ar); +} + +static inline void ath10k_pci_sleep(struct ath10k *ar) +{ + if (ath10k_target_ps) + ath10k_do_pci_sleep(ar); +} + +#endif /* _PCI_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h new file mode 100644 index 000000000000..bfec6c8f2ecb --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/rx_desc.h @@ -0,0 +1,990 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _RX_DESC_H_ +#define _RX_DESC_H_ + +enum rx_attention_flags { + RX_ATTENTION_FLAGS_FIRST_MPDU = 1 << 0, + RX_ATTENTION_FLAGS_LAST_MPDU = 1 << 1, + RX_ATTENTION_FLAGS_MCAST_BCAST = 1 << 2, + RX_ATTENTION_FLAGS_PEER_IDX_INVALID = 1 << 3, + RX_ATTENTION_FLAGS_PEER_IDX_TIMEOUT = 1 << 4, + RX_ATTENTION_FLAGS_POWER_MGMT = 1 << 5, + RX_ATTENTION_FLAGS_NON_QOS = 1 << 6, + RX_ATTENTION_FLAGS_NULL_DATA = 1 << 7, + RX_ATTENTION_FLAGS_MGMT_TYPE = 1 << 8, + RX_ATTENTION_FLAGS_CTRL_TYPE = 1 << 9, + RX_ATTENTION_FLAGS_MORE_DATA = 1 << 10, + RX_ATTENTION_FLAGS_EOSP = 1 << 11, + RX_ATTENTION_FLAGS_U_APSD_TRIGGER = 1 << 12, + RX_ATTENTION_FLAGS_FRAGMENT = 1 << 13, + RX_ATTENTION_FLAGS_ORDER = 1 << 14, + RX_ATTENTION_FLAGS_CLASSIFICATION = 1 << 15, + RX_ATTENTION_FLAGS_OVERFLOW_ERR = 1 << 16, + RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR = 1 << 17, + RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL = 1 << 18, + RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL = 1 << 19, + RX_ATTENTION_FLAGS_SA_IDX_INVALID = 1 << 20, + RX_ATTENTION_FLAGS_DA_IDX_INVALID = 1 << 21, + RX_ATTENTION_FLAGS_SA_IDX_TIMEOUT = 1 << 22, + RX_ATTENTION_FLAGS_DA_IDX_TIMEOUT = 1 << 23, + RX_ATTENTION_FLAGS_ENCRYPT_REQUIRED = 1 << 24, + RX_ATTENTION_FLAGS_DIRECTED = 1 << 25, + RX_ATTENTION_FLAGS_BUFFER_FRAGMENT = 1 << 26, + RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR = 1 << 27, + RX_ATTENTION_FLAGS_TKIP_MIC_ERR = 1 << 28, + RX_ATTENTION_FLAGS_DECRYPT_ERR = 1 << 29, + RX_ATTENTION_FLAGS_FCS_ERR = 1 << 30, + RX_ATTENTION_FLAGS_MSDU_DONE = 1 << 31, +}; + +struct rx_attention { + __le32 flags; /* %RX_ATTENTION_FLAGS_ */ +} __packed; + +/* + * first_mpdu + * Indicates the first MSDU of the PPDU. If both first_mpdu + * and last_mpdu are set in the MSDU then this is a not an + * A-MPDU frame but a stand alone MPDU. Interior MPDU in an + * A-MPDU shall have both first_mpdu and last_mpdu bits set to + * 0. The PPDU start status will only be valid when this bit + * is set. + * + * last_mpdu + * Indicates the last MSDU of the last MPDU of the PPDU. The + * PPDU end status will only be valid when this bit is set. + * + * mcast_bcast + * Multicast / broadcast indicator. Only set when the MAC + * address 1 bit 0 is set indicating mcast/bcast and the BSSID + * matches one of the 4 BSSID registers. Only set when + * first_msdu is set. + * + * peer_idx_invalid + * Indicates no matching entries within the the max search + * count. Only set when first_msdu is set. + * + * peer_idx_timeout + * Indicates an unsuccessful search for the peer index due to + * timeout. Only set when first_msdu is set. + * + * power_mgmt + * Power management bit set in the 802.11 header. Only set + * when first_msdu is set. + * + * non_qos + * Set if packet is not a non-QoS data frame. Only set when + * first_msdu is set. + * + * null_data + * Set if frame type indicates either null data or QoS null + * data format. Only set when first_msdu is set. + * + * mgmt_type + * Set if packet is a management packet. Only set when + * first_msdu is set. + * + * ctrl_type + * Set if packet is a control packet. Only set when first_msdu + * is set. + * + * more_data + * Set if more bit in frame control is set. Only set when + * first_msdu is set. + * + * eosp + * Set if the EOSP (end of service period) bit in the QoS + * control field is set. Only set when first_msdu is set. + * + * u_apsd_trigger + * Set if packet is U-APSD trigger. Key table will have bits + * per TID to indicate U-APSD trigger. + * + * fragment + * Indicates that this is an 802.11 fragment frame. This is + * set when either the more_frag bit is set in the frame + * control or the fragment number is not zero. Only set when + * first_msdu is set. + * + * order + * Set if the order bit in the frame control is set. Only set + * when first_msdu is set. + * + * classification + * Indicates that this status has a corresponding MSDU that + * requires FW processing. The OLE will have classification + * ring mask registers which will indicate the ring(s) for + * packets and descriptors which need FW attention. + * + * overflow_err + * PCU Receive FIFO does not have enough space to store the + * full receive packet. Enough space is reserved in the + * receive FIFO for the status is written. This MPDU remaining + * packets in the PPDU will be filtered and no Ack response + * will be transmitted. + * + * msdu_length_err + * Indicates that the MSDU length from the 802.3 encapsulated + * length field extends beyond the MPDU boundary. + * + * tcp_udp_chksum_fail + * Indicates that the computed checksum (tcp_udp_chksum) did + * not match the checksum in the TCP/UDP header. + * + * ip_chksum_fail + * Indicates that the computed checksum did not match the + * checksum in the IP header. + * + * sa_idx_invalid + * Indicates no matching entry was found in the address search + * table for the source MAC address. + * + * da_idx_invalid + * Indicates no matching entry was found in the address search + * table for the destination MAC address. + * + * sa_idx_timeout + * Indicates an unsuccessful search for the source MAC address + * due to the expiring of the search timer. + * + * da_idx_timeout + * Indicates an unsuccessful search for the destination MAC + * address due to the expiring of the search timer. + * + * encrypt_required + * Indicates that this data type frame is not encrypted even if + * the policy for this MPDU requires encryption as indicated in + * the peer table key type. + * + * directed + * MPDU is a directed packet which means that the RA matched + * our STA addresses. In proxySTA it means that the TA matched + * an entry in our address search table with the corresponding + * 'no_ack' bit is the address search entry cleared. + * + * buffer_fragment + * Indicates that at least one of the rx buffers has been + * fragmented. If set the FW should look at the rx_frag_info + * descriptor described below. + * + * mpdu_length_err + * Indicates that the MPDU was pre-maturely terminated + * resulting in a truncated MPDU. Don't trust the MPDU length + * field. + * + * tkip_mic_err + * Indicates that the MPDU Michael integrity check failed + * + * decrypt_err + * Indicates that the MPDU decrypt integrity check failed + * + * fcs_err + * Indicates that the MPDU FCS check failed + * + * msdu_done + * If set indicates that the RX packet data, RX header data, RX + * PPDU start descriptor, RX MPDU start/end descriptor, RX MSDU + * start/end descriptors and RX Attention descriptor are all + * valid. This bit must be in the last octet of the + * descriptor. + */ + +struct rx_frag_info { + u8 ring0_more_count; + u8 ring1_more_count; + u8 ring2_more_count; + u8 ring3_more_count; +} __packed; + +/* + * ring0_more_count + * Indicates the number of more buffers associated with RX DMA + * ring 0. Field is filled in by the RX_DMA. + * + * ring1_more_count + * Indicates the number of more buffers associated with RX DMA + * ring 1. Field is filled in by the RX_DMA. + * + * ring2_more_count + * Indicates the number of more buffers associated with RX DMA + * ring 2. Field is filled in by the RX_DMA. + * + * ring3_more_count + * Indicates the number of more buffers associated with RX DMA + * ring 3. Field is filled in by the RX_DMA. + */ + +enum htt_rx_mpdu_encrypt_type { + HTT_RX_MPDU_ENCRYPT_WEP40 = 0, + HTT_RX_MPDU_ENCRYPT_WEP104 = 1, + HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC = 2, + HTT_RX_MPDU_ENCRYPT_WEP128 = 3, + HTT_RX_MPDU_ENCRYPT_TKIP_WPA = 4, + HTT_RX_MPDU_ENCRYPT_WAPI = 5, + HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2 = 6, + HTT_RX_MPDU_ENCRYPT_NONE = 7, +}; + +#define RX_MPDU_START_INFO0_PEER_IDX_MASK 0x000007ff +#define RX_MPDU_START_INFO0_PEER_IDX_LSB 0 +#define RX_MPDU_START_INFO0_SEQ_NUM_MASK 0x0fff0000 +#define RX_MPDU_START_INFO0_SEQ_NUM_LSB 16 +#define RX_MPDU_START_INFO0_ENCRYPT_TYPE_MASK 0xf0000000 +#define RX_MPDU_START_INFO0_ENCRYPT_TYPE_LSB 28 +#define RX_MPDU_START_INFO0_FROM_DS (1 << 11) +#define RX_MPDU_START_INFO0_TO_DS (1 << 12) +#define RX_MPDU_START_INFO0_ENCRYPTED (1 << 13) +#define RX_MPDU_START_INFO0_RETRY (1 << 14) +#define RX_MPDU_START_INFO0_TXBF_H_INFO (1 << 15) + +#define RX_MPDU_START_INFO1_TID_MASK 0xf0000000 +#define RX_MPDU_START_INFO1_TID_LSB 28 +#define RX_MPDU_START_INFO1_DIRECTED (1 << 16) + +struct rx_mpdu_start { + __le32 info0; + union { + struct { + __le32 pn31_0; + __le32 info1; /* %RX_MPDU_START_INFO1_ */ + } __packed; + struct { + u8 pn[6]; + } __packed; + } __packed; +} __packed; + +/* + * peer_idx + * The index of the address search table which associated with + * the peer table entry corresponding to this MPDU. Only valid + * when first_msdu is set. + * + * fr_ds + * Set if the from DS bit is set in the frame control. Only + * valid when first_msdu is set. + * + * to_ds + * Set if the to DS bit is set in the frame control. Only + * valid when first_msdu is set. + * + * encrypted + * Protected bit from the frame control. Only valid when + * first_msdu is set. + * + * retry + * Retry bit from the frame control. Only valid when + * first_msdu is set. + * + * txbf_h_info + * The MPDU data will contain H information. Primarily used + * for debug. + * + * seq_num + * The sequence number from the 802.11 header. Only valid when + * first_msdu is set. + * + * encrypt_type + * Indicates type of decrypt cipher used (as defined in the + * peer table) + * 0: WEP40 + * 1: WEP104 + * 2: TKIP without MIC + * 3: WEP128 + * 4: TKIP (WPA) + * 5: WAPI + * 6: AES-CCM (WPA2) + * 7: No cipher + * Only valid when first_msdu_is set + * + * pn_31_0 + * Bits [31:0] of the PN number extracted from the IV field + * WEP: IV = {key_id_octet, pn2, pn1, pn0}. Only pn[23:0] is + * valid. + * TKIP: IV = {pn5, pn4, pn3, pn2, key_id_octet, pn0, + * WEPSeed[1], pn1}. Only pn[47:0] is valid. + * AES-CCM: IV = {pn5, pn4, pn3, pn2, key_id_octet, 0x0, pn1, + * pn0}. Only pn[47:0] is valid. + * WAPI: IV = {key_id_octet, 0x0, pn15, pn14, pn13, pn12, pn11, + * pn10, pn9, pn8, pn7, pn6, pn5, pn4, pn3, pn2, pn1, pn0}. + * The ext_wapi_pn[127:48] in the rx_msdu_misc descriptor and + * pn[47:0] are valid. + * Only valid when first_msdu is set. + * + * pn_47_32 + * Bits [47:32] of the PN number. See description for + * pn_31_0. The remaining PN fields are in the rx_msdu_end + * descriptor + * + * pn + * Use this field to access the pn without worrying about + * byte-order and bitmasking/bitshifting. + * + * directed + * See definition in RX attention descriptor + * + * reserved_2 + * Reserved: HW should fill with zero. FW should ignore. + * + * tid + * The TID field in the QoS control field + */ + +#define RX_MPDU_END_INFO0_RESERVED_0_MASK 0x00001fff +#define RX_MPDU_END_INFO0_RESERVED_0_LSB 0 +#define RX_MPDU_END_INFO0_POST_DELIM_CNT_MASK 0x0fff0000 +#define RX_MPDU_END_INFO0_POST_DELIM_CNT_LSB 16 +#define RX_MPDU_END_INFO0_OVERFLOW_ERR (1 << 13) +#define RX_MPDU_END_INFO0_LAST_MPDU (1 << 14) +#define RX_MPDU_END_INFO0_POST_DELIM_ERR (1 << 15) +#define RX_MPDU_END_INFO0_MPDU_LENGTH_ERR (1 << 28) +#define RX_MPDU_END_INFO0_TKIP_MIC_ERR (1 << 29) +#define RX_MPDU_END_INFO0_DECRYPT_ERR (1 << 30) +#define RX_MPDU_END_INFO0_FCS_ERR (1 << 31) + +struct rx_mpdu_end { + __le32 info0; +} __packed; + +/* + * reserved_0 + * Reserved + * + * overflow_err + * PCU Receive FIFO does not have enough space to store the + * full receive packet. Enough space is reserved in the + * receive FIFO for the status is written. This MPDU remaining + * packets in the PPDU will be filtered and no Ack response + * will be transmitted. + * + * last_mpdu + * Indicates that this is the last MPDU of a PPDU. + * + * post_delim_err + * Indicates that a delimiter FCS error occurred after this + * MPDU before the next MPDU. Only valid when last_msdu is + * set. + * + * post_delim_cnt + * Count of the delimiters after this MPDU. This requires the + * last MPDU to be held until all the EOF descriptors have been + * received. This may be inefficient in the future when + * ML-MIMO is used. Only valid when last_mpdu is set. + * + * mpdu_length_err + * See definition in RX attention descriptor + * + * tkip_mic_err + * See definition in RX attention descriptor + * + * decrypt_err + * See definition in RX attention descriptor + * + * fcs_err + * See definition in RX attention descriptor + */ + +#define RX_MSDU_START_INFO0_MSDU_LENGTH_MASK 0x00003fff +#define RX_MSDU_START_INFO0_MSDU_LENGTH_LSB 0 +#define RX_MSDU_START_INFO0_IP_OFFSET_MASK 0x000fc000 +#define RX_MSDU_START_INFO0_IP_OFFSET_LSB 14 +#define RX_MSDU_START_INFO0_RING_MASK_MASK 0x00f00000 +#define RX_MSDU_START_INFO0_RING_MASK_LSB 20 +#define RX_MSDU_START_INFO0_TCP_UDP_OFFSET_MASK 0x7f000000 +#define RX_MSDU_START_INFO0_TCP_UDP_OFFSET_LSB 24 + +#define RX_MSDU_START_INFO1_MSDU_NUMBER_MASK 0x000000ff +#define RX_MSDU_START_INFO1_MSDU_NUMBER_LSB 0 +#define RX_MSDU_START_INFO1_DECAP_FORMAT_MASK 0x00000300 +#define RX_MSDU_START_INFO1_DECAP_FORMAT_LSB 8 +#define RX_MSDU_START_INFO1_SA_IDX_MASK 0x07ff0000 +#define RX_MSDU_START_INFO1_SA_IDX_LSB 16 +#define RX_MSDU_START_INFO1_IPV4_PROTO (1 << 10) +#define RX_MSDU_START_INFO1_IPV6_PROTO (1 << 11) +#define RX_MSDU_START_INFO1_TCP_PROTO (1 << 12) +#define RX_MSDU_START_INFO1_UDP_PROTO (1 << 13) +#define RX_MSDU_START_INFO1_IP_FRAG (1 << 14) +#define RX_MSDU_START_INFO1_TCP_ONLY_ACK (1 << 15) + +enum rx_msdu_decap_format { + RX_MSDU_DECAP_RAW = 0, + RX_MSDU_DECAP_NATIVE_WIFI = 1, + RX_MSDU_DECAP_ETHERNET2_DIX = 2, + RX_MSDU_DECAP_8023_SNAP_LLC = 3 +}; + +struct rx_msdu_start { + __le32 info0; /* %RX_MSDU_START_INFO0_ */ + __le32 flow_id_crc; + __le32 info1; /* %RX_MSDU_START_INFO1_ */ +} __packed; + +/* + * msdu_length + * MSDU length in bytes after decapsulation. This field is + * still valid for MPDU frames without A-MSDU. It still + * represents MSDU length after decapsulation + * + * ip_offset + * Indicates the IP offset in bytes from the start of the + * packet after decapsulation. Only valid if ipv4_proto or + * ipv6_proto is set. + * + * ring_mask + * Indicates the destination RX rings for this MSDU. + * + * tcp_udp_offset + * Indicates the offset in bytes to the start of TCP or UDP + * header from the start of the IP header after decapsulation. + * Only valid if tcp_prot or udp_prot is set. The value 0 + * indicates that the offset is longer than 127 bytes. + * + * reserved_0c + * Reserved: HW should fill with zero. FW should ignore. + * + * flow_id_crc + * The flow_id_crc runs CRC32 on the following information: + * IPv4 option: dest_addr[31:0], src_addr [31:0], {24'b0, + * protocol[7:0]}. + * IPv6 option: dest_addr[127:0], src_addr [127:0], {24'b0, + * next_header[7:0]} + * UDP case: sort_port[15:0], dest_port[15:0] + * TCP case: sort_port[15:0], dest_port[15:0], + * {header_length[3:0], 6'b0, flags[5:0], window_size[15:0]}, + * {16'b0, urgent_ptr[15:0]}, all options except 32-bit + * timestamp. + * + * msdu_number + * Indicates the MSDU number within a MPDU. This value is + * reset to zero at the start of each MPDU. If the number of + * MSDU exceeds 255 this number will wrap using modulo 256. + * + * decap_format + * Indicates the format after decapsulation: + * 0: RAW: No decapsulation + * 1: Native WiFi + * 2: Ethernet 2 (DIX) + * 3: 802.3 (SNAP/LLC) + * + * ipv4_proto + * Set if L2 layer indicates IPv4 protocol. + * + * ipv6_proto + * Set if L2 layer indicates IPv6 protocol. + * + * tcp_proto + * Set if the ipv4_proto or ipv6_proto are set and the IP + * protocol indicates TCP. + * + * udp_proto + * Set if the ipv4_proto or ipv6_proto are set and the IP + * protocol indicates UDP. + * + * ip_frag + * Indicates that either the IP More frag bit is set or IP frag + * number is non-zero. If set indicates that this is a + * fragmented IP packet. + * + * tcp_only_ack + * Set if only the TCP Ack bit is set in the TCP flags and if + * the TCP payload is 0. + * + * sa_idx + * The offset in the address table which matches the MAC source + * address. + * + * reserved_2b + * Reserved: HW should fill with zero. FW should ignore. + */ + +#define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_MASK 0x00003fff +#define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_LSB 0 +#define RX_MSDU_END_INFO0_FIRST_MSDU (1 << 14) +#define RX_MSDU_END_INFO0_LAST_MSDU (1 << 15) +#define RX_MSDU_END_INFO0_PRE_DELIM_ERR (1 << 30) +#define RX_MSDU_END_INFO0_RESERVED_3B (1 << 31) + +struct rx_msdu_end { + __le16 ip_hdr_cksum; + __le16 tcp_hdr_cksum; + u8 key_id_octet; + u8 classification_filter; + u8 wapi_pn[10]; + __le32 info0; +} __packed; + +/* + *ip_hdr_chksum + * This can include the IP header checksum or the pseudo header + * checksum used by TCP/UDP checksum. + * + *tcp_udp_chksum + * The value of the computed TCP/UDP checksum. A mode bit + * selects whether this checksum is the full checksum or the + * partial checksum which does not include the pseudo header. + * + *key_id_octet + * The key ID octet from the IV. Only valid when first_msdu is + * set. + * + *classification_filter + * Indicates the number classification filter rule + * + *ext_wapi_pn_63_48 + * Extension PN (packet number) which is only used by WAPI. + * This corresponds to WAPI PN bits [63:48] (pn6 and pn7). The + * WAPI PN bits [63:0] are in the pn field of the rx_mpdu_start + * descriptor. + * + *ext_wapi_pn_95_64 + * Extension PN (packet number) which is only used by WAPI. + * This corresponds to WAPI PN bits [95:64] (pn8, pn9, pn10 and + * pn11). + * + *ext_wapi_pn_127_96 + * Extension PN (packet number) which is only used by WAPI. + * This corresponds to WAPI PN bits [127:96] (pn12, pn13, pn14, + * pn15). + * + *reported_mpdu_length + * MPDU length before decapsulation. Only valid when + * first_msdu is set. This field is taken directly from the + * length field of the A-MPDU delimiter or the preamble length + * field for non-A-MPDU frames. + * + *first_msdu + * Indicates the first MSDU of A-MSDU. If both first_msdu and + * last_msdu are set in the MSDU then this is a non-aggregated + * MSDU frame: normal MPDU. Interior MSDU in an A-MSDU shall + * have both first_mpdu and last_mpdu bits set to 0. + * + *last_msdu + * Indicates the last MSDU of the A-MSDU. MPDU end status is + * only valid when last_msdu is set. + * + *reserved_3a + * Reserved: HW should fill with zero. FW should ignore. + * + *pre_delim_err + * Indicates that the first delimiter had a FCS failure. Only + * valid when first_mpdu and first_msdu are set. + * + *reserved_3b + * Reserved: HW should fill with zero. FW should ignore. + */ + +#define RX_PPDU_START_SIG_RATE_SELECT_OFDM 0 +#define RX_PPDU_START_SIG_RATE_SELECT_CCK 1 + +#define RX_PPDU_START_SIG_RATE_OFDM_48 0 +#define RX_PPDU_START_SIG_RATE_OFDM_24 1 +#define RX_PPDU_START_SIG_RATE_OFDM_12 2 +#define RX_PPDU_START_SIG_RATE_OFDM_6 3 +#define RX_PPDU_START_SIG_RATE_OFDM_54 4 +#define RX_PPDU_START_SIG_RATE_OFDM_36 5 +#define RX_PPDU_START_SIG_RATE_OFDM_18 6 +#define RX_PPDU_START_SIG_RATE_OFDM_9 7 + +#define RX_PPDU_START_SIG_RATE_CCK_LP_11 0 +#define RX_PPDU_START_SIG_RATE_CCK_LP_5_5 1 +#define RX_PPDU_START_SIG_RATE_CCK_LP_2 2 +#define RX_PPDU_START_SIG_RATE_CCK_LP_1 3 +#define RX_PPDU_START_SIG_RATE_CCK_SP_11 4 +#define RX_PPDU_START_SIG_RATE_CCK_SP_5_5 5 +#define RX_PPDU_START_SIG_RATE_CCK_SP_2 6 + +#define HTT_RX_PPDU_START_PREAMBLE_LEGACY 0x04 +#define HTT_RX_PPDU_START_PREAMBLE_HT 0x08 +#define HTT_RX_PPDU_START_PREAMBLE_HT_WITH_TXBF 0x09 +#define HTT_RX_PPDU_START_PREAMBLE_VHT 0x0C +#define HTT_RX_PPDU_START_PREAMBLE_VHT_WITH_TXBF 0x0D + +#define RX_PPDU_START_INFO0_IS_GREENFIELD (1 << 0) + +#define RX_PPDU_START_INFO1_L_SIG_RATE_MASK 0x0000000f +#define RX_PPDU_START_INFO1_L_SIG_RATE_LSB 0 +#define RX_PPDU_START_INFO1_L_SIG_LENGTH_MASK 0x0001ffe0 +#define RX_PPDU_START_INFO1_L_SIG_LENGTH_LSB 5 +#define RX_PPDU_START_INFO1_L_SIG_TAIL_MASK 0x00fc0000 +#define RX_PPDU_START_INFO1_L_SIG_TAIL_LSB 18 +#define RX_PPDU_START_INFO1_PREAMBLE_TYPE_MASK 0xff000000 +#define RX_PPDU_START_INFO1_PREAMBLE_TYPE_LSB 24 +#define RX_PPDU_START_INFO1_L_SIG_RATE_SELECT (1 << 4) +#define RX_PPDU_START_INFO1_L_SIG_PARITY (1 << 17) + +#define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_MASK 0x00ffffff +#define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_LSB 0 + +#define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_MASK 0x00ffffff +#define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_LSB 0 +#define RX_PPDU_START_INFO3_TXBF_H_INFO (1 << 24) + +#define RX_PPDU_START_INFO4_VHT_SIG_B_MASK 0x1fffffff +#define RX_PPDU_START_INFO4_VHT_SIG_B_LSB 0 + +#define RX_PPDU_START_INFO5_SERVICE_MASK 0x0000ffff +#define RX_PPDU_START_INFO5_SERVICE_LSB 0 + +struct rx_ppdu_start { + struct { + u8 pri20_mhz; + u8 ext20_mhz; + u8 ext40_mhz; + u8 ext80_mhz; + } rssi_chains[4]; + u8 rssi_comb; + __le16 rsvd0; + u8 info0; /* %RX_PPDU_START_INFO0_ */ + __le32 info1; /* %RX_PPDU_START_INFO1_ */ + __le32 info2; /* %RX_PPDU_START_INFO2_ */ + __le32 info3; /* %RX_PPDU_START_INFO3_ */ + __le32 info4; /* %RX_PPDU_START_INFO4_ */ + __le32 info5; /* %RX_PPDU_START_INFO5_ */ +} __packed; + +/* + * rssi_chain0_pri20 + * RSSI of RX PPDU on chain 0 of primary 20 MHz bandwidth. + * Value of 0x80 indicates invalid. + * + * rssi_chain0_sec20 + * RSSI of RX PPDU on chain 0 of secondary 20 MHz bandwidth. + * Value of 0x80 indicates invalid. + * + * rssi_chain0_sec40 + * RSSI of RX PPDU on chain 0 of secondary 40 MHz bandwidth. + * Value of 0x80 indicates invalid. + * + * rssi_chain0_sec80 + * RSSI of RX PPDU on chain 0 of secondary 80 MHz bandwidth. + * Value of 0x80 indicates invalid. + * + * rssi_chain1_pri20 + * RSSI of RX PPDU on chain 1 of primary 20 MHz bandwidth. + * Value of 0x80 indicates invalid. + * + * rssi_chain1_sec20 + * RSSI of RX PPDU on chain 1 of secondary 20 MHz bandwidth. + * Value of 0x80 indicates invalid. + * + * rssi_chain1_sec40 + * RSSI of RX PPDU on chain 1 of secondary 40 MHz bandwidth. + * Value of 0x80 indicates invalid. + * + * rssi_chain1_sec80 + * RSSI of RX PPDU on chain 1 of secondary 80 MHz bandwidth. + * Value of 0x80 indicates invalid. + * + * rssi_chain2_pri20 + * RSSI of RX PPDU on chain 2 of primary 20 MHz bandwidth. + * Value of 0x80 indicates invalid. + * + * rssi_chain2_sec20 + * RSSI of RX PPDU on chain 2 of secondary 20 MHz bandwidth. + * Value of 0x80 indicates invalid. + * + * rssi_chain2_sec40 + * RSSI of RX PPDU on chain 2 of secondary 40 MHz bandwidth. + * Value of 0x80 indicates invalid. + * + * rssi_chain2_sec80 + * RSSI of RX PPDU on chain 2 of secondary 80 MHz bandwidth. + * Value of 0x80 indicates invalid. + * + * rssi_chain3_pri20 + * RSSI of RX PPDU on chain 3 of primary 20 MHz bandwidth. + * Value of 0x80 indicates invalid. + * + * rssi_chain3_sec20 + * RSSI of RX PPDU on chain 3 of secondary 20 MHz bandwidth. + * Value of 0x80 indicates invalid. + * + * rssi_chain3_sec40 + * RSSI of RX PPDU on chain 3 of secondary 40 MHz bandwidth. + * Value of 0x80 indicates invalid. + * + * rssi_chain3_sec80 + * RSSI of RX PPDU on chain 3 of secondary 80 MHz bandwidth. + * Value of 0x80 indicates invalid. + * + * rssi_comb + * The combined RSSI of RX PPDU of all active chains and + * bandwidths. Value of 0x80 indicates invalid. + * + * reserved_4a + * Reserved: HW should fill with 0, FW should ignore. + * + * is_greenfield + * Do we really support this? + * + * reserved_4b + * Reserved: HW should fill with 0, FW should ignore. + * + * l_sig_rate + * If l_sig_rate_select is 0: + * 0x8: OFDM 48 Mbps + * 0x9: OFDM 24 Mbps + * 0xA: OFDM 12 Mbps + * 0xB: OFDM 6 Mbps + * 0xC: OFDM 54 Mbps + * 0xD: OFDM 36 Mbps + * 0xE: OFDM 18 Mbps + * 0xF: OFDM 9 Mbps + * If l_sig_rate_select is 1: + * 0x8: CCK 11 Mbps long preamble + * 0x9: CCK 5.5 Mbps long preamble + * 0xA: CCK 2 Mbps long preamble + * 0xB: CCK 1 Mbps long preamble + * 0xC: CCK 11 Mbps short preamble + * 0xD: CCK 5.5 Mbps short preamble + * 0xE: CCK 2 Mbps short preamble + * + * l_sig_rate_select + * Legacy signal rate select. If set then l_sig_rate indicates + * CCK rates. If clear then l_sig_rate indicates OFDM rates. + * + * l_sig_length + * Length of legacy frame in octets. + * + * l_sig_parity + * Odd parity over l_sig_rate and l_sig_length + * + * l_sig_tail + * Tail bits for Viterbi decoder + * + * preamble_type + * Indicates the type of preamble ahead: + * 0x4: Legacy (OFDM/CCK) + * 0x8: HT + * 0x9: HT with TxBF + * 0xC: VHT + * 0xD: VHT with TxBF + * 0x80 - 0xFF: Reserved for special baseband data types such + * as radar and spectral scan. + * + * ht_sig_vht_sig_a_1 + * If preamble_type == 0x8 or 0x9 + * HT-SIG (first 24 bits) + * If preamble_type == 0xC or 0xD + * VHT-SIG A (first 24 bits) + * Else + * Reserved + * + * reserved_6 + * Reserved: HW should fill with 0, FW should ignore. + * + * ht_sig_vht_sig_a_2 + * If preamble_type == 0x8 or 0x9 + * HT-SIG (last 24 bits) + * If preamble_type == 0xC or 0xD + * VHT-SIG A (last 24 bits) + * Else + * Reserved + * + * txbf_h_info + * Indicates that the packet data carries H information which + * is used for TxBF debug. + * + * reserved_7 + * Reserved: HW should fill with 0, FW should ignore. + * + * vht_sig_b + * WiFi 1.0 and WiFi 2.0 will likely have this field to be all + * 0s since the BB does not plan on decoding VHT SIG-B. + * + * reserved_8 + * Reserved: HW should fill with 0, FW should ignore. + * + * service + * Service field from BB for OFDM, HT and VHT packets. CCK + * packets will have service field of 0. + * + * reserved_9 + * Reserved: HW should fill with 0, FW should ignore. +*/ + + +#define RX_PPDU_END_FLAGS_PHY_ERR (1 << 0) +#define RX_PPDU_END_FLAGS_RX_LOCATION (1 << 1) +#define RX_PPDU_END_FLAGS_TXBF_H_INFO (1 << 2) + +#define RX_PPDU_END_INFO0_RX_ANTENNA_MASK 0x00ffffff +#define RX_PPDU_END_INFO0_RX_ANTENNA_LSB 0 +#define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK (1 << 24) +#define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL (1 << 25) + +#define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15) + +struct rx_ppdu_end { + __le32 evm_p0; + __le32 evm_p1; + __le32 evm_p2; + __le32 evm_p3; + __le32 evm_p4; + __le32 evm_p5; + __le32 evm_p6; + __le32 evm_p7; + __le32 evm_p8; + __le32 evm_p9; + __le32 evm_p10; + __le32 evm_p11; + __le32 evm_p12; + __le32 evm_p13; + __le32 evm_p14; + __le32 evm_p15; + __le32 tsf_timestamp; + __le32 wb_timestamp; + u8 locationing_timestamp; + u8 phy_err_code; + __le16 flags; /* %RX_PPDU_END_FLAGS_ */ + __le32 info0; /* %RX_PPDU_END_INFO0_ */ + __le16 bb_length; + __le16 info1; /* %RX_PPDU_END_INFO1_ */ +} __packed; + +/* + * evm_p0 + * EVM for pilot 0. Contain EVM for streams: 0, 1, 2 and 3. + * + * evm_p1 + * EVM for pilot 1. Contain EVM for streams: 0, 1, 2 and 3. + * + * evm_p2 + * EVM for pilot 2. Contain EVM for streams: 0, 1, 2 and 3. + * + * evm_p3 + * EVM for pilot 3. Contain EVM for streams: 0, 1, 2 and 3. + * + * evm_p4 + * EVM for pilot 4. Contain EVM for streams: 0, 1, 2 and 3. + * + * evm_p5 + * EVM for pilot 5. Contain EVM for streams: 0, 1, 2 and 3. + * + * evm_p6 + * EVM for pilot 6. Contain EVM for streams: 0, 1, 2 and 3. + * + * evm_p7 + * EVM for pilot 7. Contain EVM for streams: 0, 1, 2 and 3. + * + * evm_p8 + * EVM for pilot 8. Contain EVM for streams: 0, 1, 2 and 3. + * + * evm_p9 + * EVM for pilot 9. Contain EVM for streams: 0, 1, 2 and 3. + * + * evm_p10 + * EVM for pilot 10. Contain EVM for streams: 0, 1, 2 and 3. + * + * evm_p11 + * EVM for pilot 11. Contain EVM for streams: 0, 1, 2 and 3. + * + * evm_p12 + * EVM for pilot 12. Contain EVM for streams: 0, 1, 2 and 3. + * + * evm_p13 + * EVM for pilot 13. Contain EVM for streams: 0, 1, 2 and 3. + * + * evm_p14 + * EVM for pilot 14. Contain EVM for streams: 0, 1, 2 and 3. + * + * evm_p15 + * EVM for pilot 15. Contain EVM for streams: 0, 1, 2 and 3. + * + * tsf_timestamp + * Receive TSF timestamp sampled on the rising edge of + * rx_clear. For PHY errors this may be the current TSF when + * phy_error is asserted if the rx_clear does not assert before + * the end of the PHY error. + * + * wb_timestamp + * WLAN/BT timestamp is a 1 usec resolution timestamp which + * does not get updated based on receive beacon like TSF. The + * same rules for capturing tsf_timestamp are used to capture + * the wb_timestamp. + * + * locationing_timestamp + * Timestamp used for locationing. This timestamp is used to + * indicate fractions of usec. For example if the MAC clock is + * running at 80 MHz, the timestamp will increment every 12.5 + * nsec. The value starts at 0 and increments to 79 and + * returns to 0 and repeats. This information is valid for + * every PPDU. This information can be used in conjunction + * with wb_timestamp to capture large delta times. + * + * phy_err_code + * See the 1.10.8.1.2 for the list of the PHY error codes. + * + * phy_err + * Indicates a PHY error was detected for this PPDU. + * + * rx_location + * Indicates that location information was requested. + * + * txbf_h_info + * Indicates that the packet data carries H information which + * is used for TxBF debug. + * + * reserved_18 + * Reserved: HW should fill with 0, FW should ignore. + * + * rx_antenna + * Receive antenna value + * + * tx_ht_vht_ack + * Indicates that a HT or VHT Ack/BA frame was transmitted in + * response to this receive packet. + * + * bb_captured_channel + * Indicates that the BB has captured a channel dump. FW can + * then read the channel dump memory. This may indicate that + * the channel was captured either based on PCU setting the + * capture_channel bit BB descriptor or FW setting the + * capture_channel mode bit. + * + * reserved_19 + * Reserved: HW should fill with 0, FW should ignore. + * + * bb_length + * Indicates the number of bytes of baseband information for + * PPDUs where the BB descriptor preamble type is 0x80 to 0xFF + * which indicates that this is not a normal PPDU but rather + * contains baseband debug information. + * + * reserved_20 + * Reserved: HW should fill with 0, FW should ignore. + * + * ppdu_done + * PPDU end status is only valid when ppdu_done bit is set. + * Every time HW sets this bit in memory FW/SW must clear this + * bit in memory. FW will initialize all the ppdu_done dword + * to 0. +*/ + +#define FW_RX_DESC_INFO0_DISCARD (1 << 0) +#define FW_RX_DESC_INFO0_FORWARD (1 << 1) +#define FW_RX_DESC_INFO0_INSPECT (1 << 5) +#define FW_RX_DESC_INFO0_EXT_MASK 0xC0 +#define FW_RX_DESC_INFO0_EXT_LSB 6 + +struct fw_rx_desc_base { + u8 info0; +} __packed; + +#endif /* _RX_DESC_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h new file mode 100644 index 000000000000..be7ba1e78afe --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/targaddrs.h @@ -0,0 +1,449 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __TARGADDRS_H__ +#define __TARGADDRS_H__ + +/* + * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the + * host_interest structure. It must match the address of the _host_interest + * symbol (see linker script). + * + * Host Interest is shared between Host and Target in order to coordinate + * between the two, and is intended to remain constant (with additions only + * at the end) across software releases. + * + * All addresses are available here so that it's possible to + * write a single binary that works with all Target Types. + * May be used in assembler code as well as C. + */ +#define QCA988X_HOST_INTEREST_ADDRESS 0x00400800 +#define HOST_INTEREST_MAX_SIZE 0x200 + +/* + * These are items that the Host may need to access via BMI or via the + * Diagnostic Window. The position of items in this structure must remain + * constant across firmware revisions! Types for each item must be fixed + * size across target and host platforms. More items may be added at the end. + */ +struct host_interest { + /* + * Pointer to application-defined area, if any. + * Set by Target application during startup. + */ + u32 hi_app_host_interest; /* 0x00 */ + + /* Pointer to register dump area, valid after Target crash. */ + u32 hi_failure_state; /* 0x04 */ + + /* Pointer to debug logging header */ + u32 hi_dbglog_hdr; /* 0x08 */ + + u32 hi_unused0c; /* 0x0c */ + + /* + * General-purpose flag bits, similar to SOC_OPTION_* flags. + * Can be used by application rather than by OS. + */ + u32 hi_option_flag; /* 0x10 */ + + /* + * Boolean that determines whether or not to + * display messages on the serial port. + */ + u32 hi_serial_enable; /* 0x14 */ + + /* Start address of DataSet index, if any */ + u32 hi_dset_list_head; /* 0x18 */ + + /* Override Target application start address */ + u32 hi_app_start; /* 0x1c */ + + /* Clock and voltage tuning */ + u32 hi_skip_clock_init; /* 0x20 */ + u32 hi_core_clock_setting; /* 0x24 */ + u32 hi_cpu_clock_setting; /* 0x28 */ + u32 hi_system_sleep_setting; /* 0x2c */ + u32 hi_xtal_control_setting; /* 0x30 */ + u32 hi_pll_ctrl_setting_24ghz; /* 0x34 */ + u32 hi_pll_ctrl_setting_5ghz; /* 0x38 */ + u32 hi_ref_voltage_trim_setting; /* 0x3c */ + u32 hi_clock_info; /* 0x40 */ + + /* Host uses BE CPU or not */ + u32 hi_be; /* 0x44 */ + + u32 hi_stack; /* normal stack */ /* 0x48 */ + u32 hi_err_stack; /* error stack */ /* 0x4c */ + u32 hi_desired_cpu_speed_hz; /* 0x50 */ + + /* Pointer to Board Data */ + u32 hi_board_data; /* 0x54 */ + + /* + * Indication of Board Data state: + * 0: board data is not yet initialized. + * 1: board data is initialized; unknown size + * >1: number of bytes of initialized board data + */ + u32 hi_board_data_initialized; /* 0x58 */ + + u32 hi_dset_ram_index_table; /* 0x5c */ + + u32 hi_desired_baud_rate; /* 0x60 */ + u32 hi_dbglog_config; /* 0x64 */ + u32 hi_end_ram_reserve_sz; /* 0x68 */ + u32 hi_mbox_io_block_sz; /* 0x6c */ + + u32 hi_num_bpatch_streams; /* 0x70 -- unused */ + u32 hi_mbox_isr_yield_limit; /* 0x74 */ + + u32 hi_refclk_hz; /* 0x78 */ + u32 hi_ext_clk_detected; /* 0x7c */ + u32 hi_dbg_uart_txpin; /* 0x80 */ + u32 hi_dbg_uart_rxpin; /* 0x84 */ + u32 hi_hci_uart_baud; /* 0x88 */ + u32 hi_hci_uart_pin_assignments; /* 0x8C */ + + u32 hi_hci_uart_baud_scale_val; /* 0x90 */ + u32 hi_hci_uart_baud_step_val; /* 0x94 */ + + u32 hi_allocram_start; /* 0x98 */ + u32 hi_allocram_sz; /* 0x9c */ + u32 hi_hci_bridge_flags; /* 0xa0 */ + u32 hi_hci_uart_support_pins; /* 0xa4 */ + + u32 hi_hci_uart_pwr_mgmt_params; /* 0xa8 */ + + /* + * 0xa8 - [1]: 0 = UART FC active low, 1 = UART FC active high + * [31:16]: wakeup timeout in ms + */ + /* Pointer to extended board Data */ + u32 hi_board_ext_data; /* 0xac */ + u32 hi_board_ext_data_config; /* 0xb0 */ + /* + * Bit [0] : valid + * Bit[31:16: size + */ + /* + * hi_reset_flag is used to do some stuff when target reset. + * such as restore app_start after warm reset or + * preserve host Interest area, or preserve ROM data, literals etc. + */ + u32 hi_reset_flag; /* 0xb4 */ + /* indicate hi_reset_flag is valid */ + u32 hi_reset_flag_valid; /* 0xb8 */ + u32 hi_hci_uart_pwr_mgmt_params_ext; /* 0xbc */ + /* 0xbc - [31:0]: idle timeout in ms */ + /* ACS flags */ + u32 hi_acs_flags; /* 0xc0 */ + u32 hi_console_flags; /* 0xc4 */ + u32 hi_nvram_state; /* 0xc8 */ + u32 hi_option_flag2; /* 0xcc */ + + /* If non-zero, override values sent to Host in WMI_READY event. */ + u32 hi_sw_version_override; /* 0xd0 */ + u32 hi_abi_version_override; /* 0xd4 */ + + /* + * Percentage of high priority RX traffic to total expected RX traffic + * applicable only to ar6004 + */ + u32 hi_hp_rx_traffic_ratio; /* 0xd8 */ + + /* test applications flags */ + u32 hi_test_apps_related; /* 0xdc */ + /* location of test script */ + u32 hi_ota_testscript; /* 0xe0 */ + /* location of CAL data */ + u32 hi_cal_data; /* 0xe4 */ + + /* Number of packet log buffers */ + u32 hi_pktlog_num_buffers; /* 0xe8 */ + + /* wow extension configuration */ + u32 hi_wow_ext_config; /* 0xec */ + u32 hi_pwr_save_flags; /* 0xf0 */ + + /* Spatial Multiplexing Power Save (SMPS) options */ + u32 hi_smps_options; /* 0xf4 */ + + /* Interconnect-specific state */ + u32 hi_interconnect_state; /* 0xf8 */ + + /* Coex configuration flags */ + u32 hi_coex_config; /* 0xfc */ + + /* Early allocation support */ + u32 hi_early_alloc; /* 0x100 */ + /* FW swap field */ + /* + * Bits of this 32bit word will be used to pass specific swap + * instruction to FW + */ + /* + * Bit 0 -- AP Nart descriptor no swap. When this bit is set + * FW will not swap TX descriptor. Meaning packets are formed + * on the target processor. + */ + /* Bit 1 - unused */ + u32 hi_fw_swap; /* 0x104 */ +} __packed; + +#define HI_ITEM(item) offsetof(struct host_interest, item) + +/* Bits defined in hi_option_flag */ + +/* Enable timer workaround */ +#define HI_OPTION_TIMER_WAR 0x01 +/* Limit BMI command credits */ +#define HI_OPTION_BMI_CRED_LIMIT 0x02 +/* Relay Dot11 hdr to/from host */ +#define HI_OPTION_RELAY_DOT11_HDR 0x04 +/* MAC addr method 0-locally administred 1-globally unique addrs */ +#define HI_OPTION_MAC_ADDR_METHOD 0x08 +/* Firmware Bridging */ +#define HI_OPTION_FW_BRIDGE 0x10 +/* Enable CPU profiling */ +#define HI_OPTION_ENABLE_PROFILE 0x20 +/* Disable debug logging */ +#define HI_OPTION_DISABLE_DBGLOG 0x40 +/* Skip Era Tracking */ +#define HI_OPTION_SKIP_ERA_TRACKING 0x80 +/* Disable PAPRD (debug) */ +#define HI_OPTION_PAPRD_DISABLE 0x100 +#define HI_OPTION_NUM_DEV_LSB 0x200 +#define HI_OPTION_NUM_DEV_MSB 0x800 +#define HI_OPTION_DEV_MODE_LSB 0x1000 +#define HI_OPTION_DEV_MODE_MSB 0x8000000 +/* Disable LowFreq Timer Stabilization */ +#define HI_OPTION_NO_LFT_STBL 0x10000000 +/* Skip regulatory scan */ +#define HI_OPTION_SKIP_REG_SCAN 0x20000000 +/* + * Do regulatory scan during init before + * sending WMI ready event to host + */ +#define HI_OPTION_INIT_REG_SCAN 0x40000000 + +/* REV6: Do not adjust memory map */ +#define HI_OPTION_SKIP_MEMMAP 0x80000000 + +#define HI_OPTION_MAC_ADDR_METHOD_SHIFT 3 + +/* 2 bits of hi_option_flag are used to represent 3 modes */ +#define HI_OPTION_FW_MODE_IBSS 0x0 /* IBSS Mode */ +#define HI_OPTION_FW_MODE_BSS_STA 0x1 /* STA Mode */ +#define HI_OPTION_FW_MODE_AP 0x2 /* AP Mode */ +#define HI_OPTION_FW_MODE_BT30AMP 0x3 /* BT30 AMP Mode */ + +/* 2 bits of hi_option flag are usedto represent 4 submodes */ +#define HI_OPTION_FW_SUBMODE_NONE 0x0 /* Normal mode */ +#define HI_OPTION_FW_SUBMODE_P2PDEV 0x1 /* p2p device mode */ +#define HI_OPTION_FW_SUBMODE_P2PCLIENT 0x2 /* p2p client mode */ +#define HI_OPTION_FW_SUBMODE_P2PGO 0x3 /* p2p go mode */ + +/* Num dev Mask */ +#define HI_OPTION_NUM_DEV_MASK 0x7 +#define HI_OPTION_NUM_DEV_SHIFT 0x9 + +/* firmware bridging */ +#define HI_OPTION_FW_BRIDGE_SHIFT 0x04 + +/* +Fw Mode/SubMode Mask +|-----------------------------------------------------------------------------| +| SUB | SUB | SUB | SUB | | | | | +|MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0]| +| (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2) | +|-----------------------------------------------------------------------------| +*/ +#define HI_OPTION_FW_MODE_BITS 0x2 +#define HI_OPTION_FW_MODE_MASK 0x3 +#define HI_OPTION_FW_MODE_SHIFT 0xC +#define HI_OPTION_ALL_FW_MODE_MASK 0xFF + +#define HI_OPTION_FW_SUBMODE_BITS 0x2 +#define HI_OPTION_FW_SUBMODE_MASK 0x3 +#define HI_OPTION_FW_SUBMODE_SHIFT 0x14 +#define HI_OPTION_ALL_FW_SUBMODE_MASK 0xFF00 +#define HI_OPTION_ALL_FW_SUBMODE_SHIFT 0x8 + + +/* hi_option_flag2 options */ +#define HI_OPTION_OFFLOAD_AMSDU 0x01 +#define HI_OPTION_DFS_SUPPORT 0x02 /* Enable DFS support */ +#define HI_OPTION_ENABLE_RFKILL 0x04 /* RFKill Enable Feature*/ +#define HI_OPTION_RADIO_RETENTION_DISABLE 0x08 /* Disable radio retention */ +#define HI_OPTION_EARLY_CFG_DONE 0x10 /* Early configuration is complete */ + +#define HI_OPTION_RF_KILL_SHIFT 0x2 +#define HI_OPTION_RF_KILL_MASK 0x1 + +/* hi_reset_flag */ +/* preserve App Start address */ +#define HI_RESET_FLAG_PRESERVE_APP_START 0x01 +/* preserve host interest */ +#define HI_RESET_FLAG_PRESERVE_HOST_INTEREST 0x02 +/* preserve ROM data */ +#define HI_RESET_FLAG_PRESERVE_ROMDATA 0x04 +#define HI_RESET_FLAG_PRESERVE_NVRAM_STATE 0x08 +#define HI_RESET_FLAG_PRESERVE_BOOT_INFO 0x10 +#define HI_RESET_FLAG_WARM_RESET 0x20 + +/* define hi_fw_swap bits */ +#define HI_DESC_IN_FW_BIT 0x01 + +/* indicate the reset flag is valid */ +#define HI_RESET_FLAG_IS_VALID 0x12345678 + +/* ACS is enabled */ +#define HI_ACS_FLAGS_ENABLED (1 << 0) +/* Use physical WWAN device */ +#define HI_ACS_FLAGS_USE_WWAN (1 << 1) +/* Use test VAP */ +#define HI_ACS_FLAGS_TEST_VAP (1 << 2) + +/* + * CONSOLE FLAGS + * + * Bit Range Meaning + * --------- -------------------------------- + * 2..0 UART ID (0 = Default) + * 3 Baud Select (0 = 9600, 1 = 115200) + * 30..4 Reserved + * 31 Enable Console + * + */ + +#define HI_CONSOLE_FLAGS_ENABLE (1 << 31) +#define HI_CONSOLE_FLAGS_UART_MASK (0x7) +#define HI_CONSOLE_FLAGS_UART_SHIFT 0 +#define HI_CONSOLE_FLAGS_BAUD_SELECT (1 << 3) + +/* SM power save options */ +#define HI_SMPS_ALLOW_MASK (0x00000001) +#define HI_SMPS_MODE_MASK (0x00000002) +#define HI_SMPS_MODE_STATIC (0x00000000) +#define HI_SMPS_MODE_DYNAMIC (0x00000002) +#define HI_SMPS_DISABLE_AUTO_MODE (0x00000004) +#define HI_SMPS_DATA_THRESH_MASK (0x000007f8) +#define HI_SMPS_DATA_THRESH_SHIFT (3) +#define HI_SMPS_RSSI_THRESH_MASK (0x0007f800) +#define HI_SMPS_RSSI_THRESH_SHIFT (11) +#define HI_SMPS_LOWPWR_CM_MASK (0x00380000) +#define HI_SMPS_LOWPWR_CM_SHIFT (15) +#define HI_SMPS_HIPWR_CM_MASK (0x03c00000) +#define HI_SMPS_HIPWR_CM_SHIFT (19) + +/* + * WOW Extension configuration + * + * Bit Range Meaning + * --------- -------------------------------- + * 8..0 Size of each WOW pattern (max 511) + * 15..9 Number of patterns per list (max 127) + * 17..16 Number of lists (max 4) + * 30..18 Reserved + * 31 Enabled + * + * set values (except enable) to zeros for default settings + */ + +#define HI_WOW_EXT_ENABLED_MASK (1 << 31) +#define HI_WOW_EXT_NUM_LIST_SHIFT 16 +#define HI_WOW_EXT_NUM_LIST_MASK (0x3 << HI_WOW_EXT_NUM_LIST_SHIFT) +#define HI_WOW_EXT_NUM_PATTERNS_SHIFT 9 +#define HI_WOW_EXT_NUM_PATTERNS_MASK (0x7F << HI_WOW_EXT_NUM_PATTERNS_SHIFT) +#define HI_WOW_EXT_PATTERN_SIZE_SHIFT 0 +#define HI_WOW_EXT_PATTERN_SIZE_MASK (0x1FF << HI_WOW_EXT_PATTERN_SIZE_SHIFT) + +#define HI_WOW_EXT_MAKE_CONFIG(num_lists, count, size) \ + ((((num_lists) << HI_WOW_EXT_NUM_LIST_SHIFT) & \ + HI_WOW_EXT_NUM_LIST_MASK) | \ + (((count) << HI_WOW_EXT_NUM_PATTERNS_SHIFT) & \ + HI_WOW_EXT_NUM_PATTERNS_MASK) | \ + (((size) << HI_WOW_EXT_PATTERN_SIZE_SHIFT) & \ + HI_WOW_EXT_PATTERN_SIZE_MASK)) + +#define HI_WOW_EXT_GET_NUM_LISTS(config) \ + (((config) & HI_WOW_EXT_NUM_LIST_MASK) >> HI_WOW_EXT_NUM_LIST_SHIFT) +#define HI_WOW_EXT_GET_NUM_PATTERNS(config) \ + (((config) & HI_WOW_EXT_NUM_PATTERNS_MASK) >> \ + HI_WOW_EXT_NUM_PATTERNS_SHIFT) +#define HI_WOW_EXT_GET_PATTERN_SIZE(config) \ + (((config) & HI_WOW_EXT_PATTERN_SIZE_MASK) >> \ + HI_WOW_EXT_PATTERN_SIZE_SHIFT) + +/* + * Early allocation configuration + * Support RAM bank configuration before BMI done and this eases the memory + * allocation at very early stage + * Bit Range Meaning + * --------- ---------------------------------- + * [0:3] number of bank assigned to be IRAM + * [4:15] reserved + * [16:31] magic number + * + * Note: + * 1. target firmware would check magic number and if it's a match, firmware + * would consider the bits[0:15] are valid and base on that to calculate + * the end of DRAM. Early allocation would be located at that area and + * may be reclaimed when necesary + * 2. if no magic number is found, early allocation would happen at "_end" + * symbol of ROM which is located before the app-data and might NOT be + * re-claimable. If this is adopted, link script should keep this in + * mind to avoid data corruption. + */ +#define HI_EARLY_ALLOC_MAGIC 0x6d8a +#define HI_EARLY_ALLOC_MAGIC_MASK 0xffff0000 +#define HI_EARLY_ALLOC_MAGIC_SHIFT 16 +#define HI_EARLY_ALLOC_IRAM_BANKS_MASK 0x0000000f +#define HI_EARLY_ALLOC_IRAM_BANKS_SHIFT 0 + +#define HI_EARLY_ALLOC_VALID() \ + ((((HOST_INTEREST->hi_early_alloc) & HI_EARLY_ALLOC_MAGIC_MASK) >> \ + HI_EARLY_ALLOC_MAGIC_SHIFT) == (HI_EARLY_ALLOC_MAGIC)) +#define HI_EARLY_ALLOC_GET_IRAM_BANKS() \ + (((HOST_INTEREST->hi_early_alloc) & HI_EARLY_ALLOC_IRAM_BANKS_MASK) \ + >> HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) + +/*power save flag bit definitions*/ +#define HI_PWR_SAVE_LPL_ENABLED 0x1 +/*b1-b3 reserved*/ +/*b4-b5 : dev0 LPL type : 0 - none + 1- Reduce Pwr Search + 2- Reduce Pwr Listen*/ +/*b6-b7 : dev1 LPL type and so on for Max 8 devices*/ +#define HI_PWR_SAVE_LPL_DEV0_LSB 4 +#define HI_PWR_SAVE_LPL_DEV_MASK 0x3 +/*power save related utility macros*/ +#define HI_LPL_ENABLED() \ + ((HOST_INTEREST->hi_pwr_save_flags & HI_PWR_SAVE_LPL_ENABLED)) +#define HI_DEV_LPL_TYPE_GET(_devix) \ + (HOST_INTEREST->hi_pwr_save_flags & ((HI_PWR_SAVE_LPL_DEV_MASK) << \ + (HI_PWR_SAVE_LPL_DEV0_LSB + (_devix)*2))) + +#define HOST_INTEREST_SMPS_IS_ALLOWED() \ + ((HOST_INTEREST->hi_smps_options & HI_SMPS_ALLOW_MASK)) + +/* Reserve 1024 bytes for extended board data */ +#define QCA988X_BOARD_DATA_SZ 7168 +#define QCA988X_BOARD_EXT_DATA_SZ 0 + +#endif /* __TARGADDRS_H__ */ diff --git a/drivers/net/wireless/ath/ath10k/trace.c b/drivers/net/wireless/ath/ath10k/trace.c new file mode 100644 index 000000000000..4a31e2c6fbd4 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/trace.c @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/module.h> + +#define CREATE_TRACE_POINTS +#include "trace.h" diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h new file mode 100644 index 000000000000..85e806bf7257 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/trace.h @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) + +#include <linux/tracepoint.h> + +#define _TRACE_H_ + +/* create empty functions when tracing is disabled */ +#if !defined(CONFIG_ATH10K_TRACING) +#undef TRACE_EVENT +#define TRACE_EVENT(name, proto, ...) \ +static inline void trace_ ## name(proto) {} +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(...) +#undef DEFINE_EVENT +#define DEFINE_EVENT(evt_class, name, proto, ...) \ +static inline void trace_ ## name(proto) {} +#endif /* !CONFIG_ATH10K_TRACING || __CHECKER__ */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ath10k + +#define ATH10K_MSG_MAX 200 + +DECLARE_EVENT_CLASS(ath10k_log_event, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf), + TP_STRUCT__entry( + __dynamic_array(char, msg, ATH10K_MSG_MAX) + ), + TP_fast_assign( + WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg), + ATH10K_MSG_MAX, + vaf->fmt, + *vaf->va) >= ATH10K_MSG_MAX); + ), + TP_printk("%s", __get_str(msg)) +); + +DEFINE_EVENT(ath10k_log_event, ath10k_log_err, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf) +); + +DEFINE_EVENT(ath10k_log_event, ath10k_log_warn, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf) +); + +DEFINE_EVENT(ath10k_log_event, ath10k_log_info, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf) +); + +TRACE_EVENT(ath10k_log_dbg, + TP_PROTO(unsigned int level, struct va_format *vaf), + TP_ARGS(level, vaf), + TP_STRUCT__entry( + __field(unsigned int, level) + __dynamic_array(char, msg, ATH10K_MSG_MAX) + ), + TP_fast_assign( + __entry->level = level; + WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg), + ATH10K_MSG_MAX, + vaf->fmt, + *vaf->va) >= ATH10K_MSG_MAX); + ), + TP_printk("%s", __get_str(msg)) +); + +TRACE_EVENT(ath10k_log_dbg_dump, + TP_PROTO(const char *msg, const char *prefix, + const void *buf, size_t buf_len), + + TP_ARGS(msg, prefix, buf, buf_len), + + TP_STRUCT__entry( + __string(msg, msg) + __string(prefix, prefix) + __field(size_t, buf_len) + __dynamic_array(u8, buf, buf_len) + ), + + TP_fast_assign( + __assign_str(msg, msg); + __assign_str(prefix, prefix); + __entry->buf_len = buf_len; + memcpy(__get_dynamic_array(buf), buf, buf_len); + ), + + TP_printk( + "%s/%s\n", __get_str(prefix), __get_str(msg) + ) +); + +TRACE_EVENT(ath10k_wmi_cmd, + TP_PROTO(int id, void *buf, size_t buf_len), + + TP_ARGS(id, buf, buf_len), + + TP_STRUCT__entry( + __field(unsigned int, id) + __field(size_t, buf_len) + __dynamic_array(u8, buf, buf_len) + ), + + TP_fast_assign( + __entry->id = id; + __entry->buf_len = buf_len; + memcpy(__get_dynamic_array(buf), buf, buf_len); + ), + + TP_printk( + "id %d len %zu", + __entry->id, + __entry->buf_len + ) +); + +TRACE_EVENT(ath10k_wmi_event, + TP_PROTO(int id, void *buf, size_t buf_len), + + TP_ARGS(id, buf, buf_len), + + TP_STRUCT__entry( + __field(unsigned int, id) + __field(size_t, buf_len) + __dynamic_array(u8, buf, buf_len) + ), + + TP_fast_assign( + __entry->id = id; + __entry->buf_len = buf_len; + memcpy(__get_dynamic_array(buf), buf, buf_len); + ), + + TP_printk( + "id %d len %zu", + __entry->id, + __entry->buf_len + ) +); + +#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/ + +/* we don't want to use include/trace/events */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c new file mode 100644 index 000000000000..68b6faefd1d8 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -0,0 +1,417 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "core.h" +#include "txrx.h" +#include "htt.h" +#include "mac.h" +#include "debug.h" + +static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb) +{ + if (!ATH10K_SKB_CB(skb)->htt.is_offchan) + return; + + /* If the original wait_for_completion() timed out before + * {data,mgmt}_tx_completed() was called then we could complete + * offchan_tx_completed for a different skb. Prevent this by using + * offchan_tx_skb. */ + spin_lock_bh(&ar->data_lock); + if (ar->offchan_tx_skb != skb) { + ath10k_warn("completed old offchannel frame\n"); + goto out; + } + + complete(&ar->offchan_tx_completed); + ar->offchan_tx_skb = NULL; /* just for sanity */ + + ath10k_dbg(ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb); +out: + spin_unlock_bh(&ar->data_lock); +} + +void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc) +{ + struct device *dev = htt->ar->dev; + struct ieee80211_tx_info *info; + struct sk_buff *txfrag = ATH10K_SKB_CB(txdesc)->htt.txfrag; + struct sk_buff *msdu = ATH10K_SKB_CB(txdesc)->htt.msdu; + int ret; + + if (ATH10K_SKB_CB(txdesc)->htt.refcount == 0) + return; + + ATH10K_SKB_CB(txdesc)->htt.refcount--; + + if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0) + return; + + if (txfrag) { + ret = ath10k_skb_unmap(dev, txfrag); + if (ret) + ath10k_warn("txfrag unmap failed (%d)\n", ret); + + dev_kfree_skb_any(txfrag); + } + + ret = ath10k_skb_unmap(dev, msdu); + if (ret) + ath10k_warn("data skb unmap failed (%d)\n", ret); + + ath10k_report_offchan_tx(htt->ar, msdu); + + info = IEEE80211_SKB_CB(msdu); + memset(&info->status, 0, sizeof(info->status)); + + if (ATH10K_SKB_CB(txdesc)->htt.discard) { + ieee80211_free_txskb(htt->ar->hw, msdu); + goto exit; + } + + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) + info->flags |= IEEE80211_TX_STAT_ACK; + + if (ATH10K_SKB_CB(txdesc)->htt.no_ack) + info->flags &= ~IEEE80211_TX_STAT_ACK; + + ieee80211_tx_status(htt->ar->hw, msdu); + /* we do not own the msdu anymore */ + +exit: + spin_lock_bh(&htt->tx_lock); + htt->pending_tx[ATH10K_SKB_CB(txdesc)->htt.msdu_id] = NULL; + ath10k_htt_tx_free_msdu_id(htt, ATH10K_SKB_CB(txdesc)->htt.msdu_id); + __ath10k_htt_tx_dec_pending(htt); + if (bitmap_empty(htt->used_msdu_ids, htt->max_num_pending_tx)) + wake_up(&htt->empty_tx_wq); + spin_unlock_bh(&htt->tx_lock); + + dev_kfree_skb_any(txdesc); +} + +void ath10k_txrx_tx_completed(struct ath10k_htt *htt, + const struct htt_tx_done *tx_done) +{ + struct sk_buff *txdesc; + + ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n", + tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack); + + if (tx_done->msdu_id >= htt->max_num_pending_tx) { + ath10k_warn("warning: msdu_id %d too big, ignoring\n", + tx_done->msdu_id); + return; + } + + txdesc = htt->pending_tx[tx_done->msdu_id]; + + ATH10K_SKB_CB(txdesc)->htt.discard = tx_done->discard; + ATH10K_SKB_CB(txdesc)->htt.no_ack = tx_done->no_ack; + + ath10k_txrx_tx_unref(htt, txdesc); +} + +static const u8 rx_legacy_rate_idx[] = { + 3, /* 0x00 - 11Mbps */ + 2, /* 0x01 - 5.5Mbps */ + 1, /* 0x02 - 2Mbps */ + 0, /* 0x03 - 1Mbps */ + 3, /* 0x04 - 11Mbps */ + 2, /* 0x05 - 5.5Mbps */ + 1, /* 0x06 - 2Mbps */ + 0, /* 0x07 - 1Mbps */ + 10, /* 0x08 - 48Mbps */ + 8, /* 0x09 - 24Mbps */ + 6, /* 0x0A - 12Mbps */ + 4, /* 0x0B - 6Mbps */ + 11, /* 0x0C - 54Mbps */ + 9, /* 0x0D - 36Mbps */ + 7, /* 0x0E - 18Mbps */ + 5, /* 0x0F - 9Mbps */ +}; + +static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info, + enum ieee80211_band band, + struct ieee80211_rx_status *status) +{ + u8 cck, rate, rate_idx, bw, sgi, mcs, nss; + u8 info0 = info->rate.info0; + u32 info1 = info->rate.info1; + u32 info2 = info->rate.info2; + u8 preamble = 0; + + /* Check if valid fields */ + if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID)) + return; + + preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE); + + switch (preamble) { + case HTT_RX_LEGACY: + cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK; + rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE); + rate_idx = 0; + + if (rate < 0x08 || rate > 0x0F) + break; + + switch (band) { + case IEEE80211_BAND_2GHZ: + if (cck) + rate &= ~BIT(3); + rate_idx = rx_legacy_rate_idx[rate]; + break; + case IEEE80211_BAND_5GHZ: + rate_idx = rx_legacy_rate_idx[rate]; + /* We are using same rate table registering + HW - ath10k_rates[]. In case of 5GHz skip + CCK rates, so -4 here */ + rate_idx -= 4; + break; + default: + break; + } + + status->rate_idx = rate_idx; + break; + case HTT_RX_HT: + case HTT_RX_HT_WITH_TXBF: + /* HT-SIG - Table 20-11 in info1 and info2 */ + mcs = info1 & 0x1F; + nss = mcs >> 3; + bw = (info1 >> 7) & 1; + sgi = (info2 >> 7) & 1; + + status->rate_idx = mcs; + status->flag |= RX_FLAG_HT; + if (sgi) + status->flag |= RX_FLAG_SHORT_GI; + if (bw) + status->flag |= RX_FLAG_40MHZ; + break; + case HTT_RX_VHT: + case HTT_RX_VHT_WITH_TXBF: + /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2 + TODO check this */ + mcs = (info2 >> 4) & 0x0F; + nss = (info1 >> 10) & 0x07; + bw = info1 & 3; + sgi = info2 & 1; + + status->rate_idx = mcs; + status->vht_nss = nss; + + if (sgi) + status->flag |= RX_FLAG_SHORT_GI; + + switch (bw) { + /* 20MHZ */ + case 0: + break; + /* 40MHZ */ + case 1: + status->flag |= RX_FLAG_40MHZ; + break; + /* 80MHZ */ + case 2: + status->flag |= RX_FLAG_80MHZ; + } + + status->flag |= RX_FLAG_VHT; + break; + default: + break; + } +} + +void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info) +{ + struct ieee80211_rx_status *status; + struct ieee80211_channel *ch; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)info->skb->data; + + status = IEEE80211_SKB_RXCB(info->skb); + memset(status, 0, sizeof(*status)); + + if (info->encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) { + status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED | + RX_FLAG_MMIC_STRIPPED; + hdr->frame_control = __cpu_to_le16( + __le16_to_cpu(hdr->frame_control) & + ~IEEE80211_FCTL_PROTECTED); + } + + if (info->status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) + status->flag |= RX_FLAG_MMIC_ERROR; + + if (info->fcs_err) + status->flag |= RX_FLAG_FAILED_FCS_CRC; + + status->signal = info->signal; + + spin_lock_bh(&ar->data_lock); + ch = ar->scan_channel; + if (!ch) + ch = ar->rx_channel; + spin_unlock_bh(&ar->data_lock); + + if (!ch) { + ath10k_warn("no channel configured; ignoring frame!\n"); + dev_kfree_skb_any(info->skb); + return; + } + + process_rx_rates(ar, info, ch->band, status); + status->band = ch->band; + status->freq = ch->center_freq; + + ath10k_dbg(ATH10K_DBG_DATA, + "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u\n", + info->skb, + info->skb->len, + status->flag == 0 ? "legacy" : "", + status->flag & RX_FLAG_HT ? "ht" : "", + status->flag & RX_FLAG_VHT ? "vht" : "", + status->flag & RX_FLAG_40MHZ ? "40" : "", + status->flag & RX_FLAG_80MHZ ? "80" : "", + status->flag & RX_FLAG_SHORT_GI ? "sgi " : "", + status->rate_idx, + status->vht_nss, + status->freq, + status->band); + + ieee80211_rx(ar->hw, info->skb); +} + +struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, + const u8 *addr) +{ + struct ath10k_peer *peer; + + lockdep_assert_held(&ar->data_lock); + + list_for_each_entry(peer, &ar->peers, list) { + if (peer->vdev_id != vdev_id) + continue; + if (memcmp(peer->addr, addr, ETH_ALEN)) + continue; + + return peer; + } + + return NULL; +} + +static struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, + int peer_id) +{ + struct ath10k_peer *peer; + + lockdep_assert_held(&ar->data_lock); + + list_for_each_entry(peer, &ar->peers, list) + if (test_bit(peer_id, peer->peer_ids)) + return peer; + + return NULL; +} + +static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id, + const u8 *addr, bool expect_mapped) +{ + int ret; + + ret = wait_event_timeout(ar->peer_mapping_wq, ({ + bool mapped; + + spin_lock_bh(&ar->data_lock); + mapped = !!ath10k_peer_find(ar, vdev_id, addr); + spin_unlock_bh(&ar->data_lock); + + mapped == expect_mapped; + }), 3*HZ); + + if (ret <= 0) + return -ETIMEDOUT; + + return 0; +} + +int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, const u8 *addr) +{ + return ath10k_wait_for_peer_common(ar, vdev_id, addr, true); +} + +int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, const u8 *addr) +{ + return ath10k_wait_for_peer_common(ar, vdev_id, addr, false); +} + +void ath10k_peer_map_event(struct ath10k_htt *htt, + struct htt_peer_map_event *ev) +{ + struct ath10k *ar = htt->ar; + struct ath10k_peer *peer; + + spin_lock_bh(&ar->data_lock); + peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr); + if (!peer) { + peer = kzalloc(sizeof(*peer), GFP_ATOMIC); + if (!peer) + goto exit; + + peer->vdev_id = ev->vdev_id; + memcpy(peer->addr, ev->addr, ETH_ALEN); + list_add(&peer->list, &ar->peers); + wake_up(&ar->peer_mapping_wq); + } + + ath10k_dbg(ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n", + ev->vdev_id, ev->addr, ev->peer_id); + + set_bit(ev->peer_id, peer->peer_ids); +exit: + spin_unlock_bh(&ar->data_lock); +} + +void ath10k_peer_unmap_event(struct ath10k_htt *htt, + struct htt_peer_unmap_event *ev) +{ + struct ath10k *ar = htt->ar; + struct ath10k_peer *peer; + + spin_lock_bh(&ar->data_lock); + peer = ath10k_peer_find_by_id(ar, ev->peer_id); + if (!peer) { + ath10k_warn("unknown peer id %d\n", ev->peer_id); + goto exit; + } + + ath10k_dbg(ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n", + peer->vdev_id, peer->addr, ev->peer_id); + + clear_bit(ev->peer_id, peer->peer_ids); + + if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) { + list_del(&peer->list); + kfree(peer); + wake_up(&ar->peer_mapping_wq); + } + +exit: + spin_unlock_bh(&ar->data_lock); +} diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h new file mode 100644 index 000000000000..e78632a76df7 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/txrx.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef _TXRX_H_ +#define _TXRX_H_ + +#include "htt.h" + +void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc); +void ath10k_txrx_tx_completed(struct ath10k_htt *htt, + const struct htt_tx_done *tx_done); +void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info); + +struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, + const u8 *addr); +int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, + const u8 *addr); +int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, + const u8 *addr); + +void ath10k_peer_map_event(struct ath10k_htt *htt, + struct htt_peer_map_event *ev); +void ath10k_peer_unmap_event(struct ath10k_htt *htt, + struct htt_peer_unmap_event *ev); + +#endif diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c new file mode 100644 index 000000000000..7d4b7987422d --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -0,0 +1,2081 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/skbuff.h> + +#include "core.h" +#include "htc.h" +#include "debug.h" +#include "wmi.h" +#include "mac.h" + +void ath10k_wmi_flush_tx(struct ath10k *ar) +{ + int ret; + + ret = wait_event_timeout(ar->wmi.wq, + atomic_read(&ar->wmi.pending_tx_count) == 0, + 5*HZ); + if (atomic_read(&ar->wmi.pending_tx_count) == 0) + return; + + if (ret == 0) + ret = -ETIMEDOUT; + + if (ret < 0) + ath10k_warn("wmi flush failed (%d)\n", ret); +} + +int ath10k_wmi_wait_for_service_ready(struct ath10k *ar) +{ + int ret; + ret = wait_for_completion_timeout(&ar->wmi.service_ready, + WMI_SERVICE_READY_TIMEOUT_HZ); + return ret; +} + +int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar) +{ + int ret; + ret = wait_for_completion_timeout(&ar->wmi.unified_ready, + WMI_UNIFIED_READY_TIMEOUT_HZ); + return ret; +} + +static struct sk_buff *ath10k_wmi_alloc_skb(u32 len) +{ + struct sk_buff *skb; + u32 round_len = roundup(len, 4); + + skb = ath10k_htc_alloc_skb(WMI_SKB_HEADROOM + round_len); + if (!skb) + return NULL; + + skb_reserve(skb, WMI_SKB_HEADROOM); + if (!IS_ALIGNED((unsigned long)skb->data, 4)) + ath10k_warn("Unaligned WMI skb\n"); + + skb_put(skb, round_len); + memset(skb->data, 0, round_len); + + return skb; +} + +static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) +{ + dev_kfree_skb(skb); + + if (atomic_sub_return(1, &ar->wmi.pending_tx_count) == 0) + wake_up(&ar->wmi.wq); +} + +/* WMI command API */ +static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, + enum wmi_cmd_id cmd_id) +{ + struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); + struct wmi_cmd_hdr *cmd_hdr; + int status; + u32 cmd = 0; + + if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL) + return -ENOMEM; + + cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID); + + cmd_hdr = (struct wmi_cmd_hdr *)skb->data; + cmd_hdr->cmd_id = __cpu_to_le32(cmd); + + if (atomic_add_return(1, &ar->wmi.pending_tx_count) > + WMI_MAX_PENDING_TX_COUNT) { + /* avoid using up memory when FW hangs */ + atomic_dec(&ar->wmi.pending_tx_count); + return -EBUSY; + } + + memset(skb_cb, 0, sizeof(*skb_cb)); + + trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len); + + status = ath10k_htc_send(ar->htc, ar->wmi.eid, skb); + if (status) { + dev_kfree_skb_any(skb); + atomic_dec(&ar->wmi.pending_tx_count); + return status; + } + + return 0; +} + +static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data; + enum wmi_scan_event_type event_type; + enum wmi_scan_completion_reason reason; + u32 freq; + u32 req_id; + u32 scan_id; + u32 vdev_id; + + event_type = __le32_to_cpu(event->event_type); + reason = __le32_to_cpu(event->reason); + freq = __le32_to_cpu(event->channel_freq); + req_id = __le32_to_cpu(event->scan_req_id); + scan_id = __le32_to_cpu(event->scan_id); + vdev_id = __le32_to_cpu(event->vdev_id); + + ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENTID\n"); + ath10k_dbg(ATH10K_DBG_WMI, + "scan event type %d reason %d freq %d req_id %d " + "scan_id %d vdev_id %d\n", + event_type, reason, freq, req_id, scan_id, vdev_id); + + spin_lock_bh(&ar->data_lock); + + switch (event_type) { + case WMI_SCAN_EVENT_STARTED: + ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_STARTED\n"); + if (ar->scan.in_progress && ar->scan.is_roc) + ieee80211_ready_on_channel(ar->hw); + + complete(&ar->scan.started); + break; + case WMI_SCAN_EVENT_COMPLETED: + ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_COMPLETED\n"); + switch (reason) { + case WMI_SCAN_REASON_COMPLETED: + ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_COMPLETED\n"); + break; + case WMI_SCAN_REASON_CANCELLED: + ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_CANCELED\n"); + break; + case WMI_SCAN_REASON_PREEMPTED: + ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_PREEMPTED\n"); + break; + case WMI_SCAN_REASON_TIMEDOUT: + ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_TIMEDOUT\n"); + break; + default: + break; + } + + ar->scan_channel = NULL; + if (!ar->scan.in_progress) { + ath10k_warn("no scan requested, ignoring\n"); + break; + } + + if (ar->scan.is_roc) { + ath10k_offchan_tx_purge(ar); + + if (!ar->scan.aborting) + ieee80211_remain_on_channel_expired(ar->hw); + } else { + ieee80211_scan_completed(ar->hw, ar->scan.aborting); + } + + del_timer(&ar->scan.timeout); + complete_all(&ar->scan.completed); + ar->scan.in_progress = false; + break; + case WMI_SCAN_EVENT_BSS_CHANNEL: + ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_BSS_CHANNEL\n"); + ar->scan_channel = NULL; + break; + case WMI_SCAN_EVENT_FOREIGN_CHANNEL: + ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_FOREIGN_CHANNEL\n"); + ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq); + if (ar->scan.in_progress && ar->scan.is_roc && + ar->scan.roc_freq == freq) { + complete(&ar->scan.on_channel); + } + break; + case WMI_SCAN_EVENT_DEQUEUED: + ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_DEQUEUED\n"); + break; + case WMI_SCAN_EVENT_PREEMPTED: + ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_PREEMPTED\n"); + break; + case WMI_SCAN_EVENT_START_FAILED: + ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_START_FAILED\n"); + break; + default: + break; + } + + spin_unlock_bh(&ar->data_lock); + return 0; +} + +static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode) +{ + enum ieee80211_band band; + + switch (phy_mode) { + case MODE_11A: + case MODE_11NA_HT20: + case MODE_11NA_HT40: + case MODE_11AC_VHT20: + case MODE_11AC_VHT40: + case MODE_11AC_VHT80: + band = IEEE80211_BAND_5GHZ; + break; + case MODE_11G: + case MODE_11B: + case MODE_11GONLY: + case MODE_11NG_HT20: + case MODE_11NG_HT40: + case MODE_11AC_VHT20_2G: + case MODE_11AC_VHT40_2G: + case MODE_11AC_VHT80_2G: + default: + band = IEEE80211_BAND_2GHZ; + } + + return band; +} + +static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band) +{ + u8 rate_idx = 0; + + /* rate in Kbps */ + switch (rate) { + case 1000: + rate_idx = 0; + break; + case 2000: + rate_idx = 1; + break; + case 5500: + rate_idx = 2; + break; + case 11000: + rate_idx = 3; + break; + case 6000: + rate_idx = 4; + break; + case 9000: + rate_idx = 5; + break; + case 12000: + rate_idx = 6; + break; + case 18000: + rate_idx = 7; + break; + case 24000: + rate_idx = 8; + break; + case 36000: + rate_idx = 9; + break; + case 48000: + rate_idx = 10; + break; + case 54000: + rate_idx = 11; + break; + default: + break; + } + + if (band == IEEE80211_BAND_5GHZ) { + if (rate_idx > 3) + /* Omit CCK rates */ + rate_idx -= 4; + else + rate_idx = 0; + } + + return rate_idx; +} + +static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_mgmt_rx_event *event = (struct wmi_mgmt_rx_event *)skb->data; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_hdr *hdr; + u32 rx_status; + u32 channel; + u32 phy_mode; + u32 snr; + u32 rate; + u32 buf_len; + u16 fc; + + channel = __le32_to_cpu(event->hdr.channel); + buf_len = __le32_to_cpu(event->hdr.buf_len); + rx_status = __le32_to_cpu(event->hdr.status); + snr = __le32_to_cpu(event->hdr.snr); + phy_mode = __le32_to_cpu(event->hdr.phy_mode); + rate = __le32_to_cpu(event->hdr.rate); + + memset(status, 0, sizeof(*status)); + + ath10k_dbg(ATH10K_DBG_MGMT, + "event mgmt rx status %08x\n", rx_status); + + if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) { + dev_kfree_skb(skb); + return 0; + } + + if (rx_status & WMI_RX_STATUS_ERR_KEY_CACHE_MISS) { + dev_kfree_skb(skb); + return 0; + } + + if (rx_status & WMI_RX_STATUS_ERR_CRC) + status->flag |= RX_FLAG_FAILED_FCS_CRC; + if (rx_status & WMI_RX_STATUS_ERR_MIC) + status->flag |= RX_FLAG_MMIC_ERROR; + + status->band = phy_mode_to_band(phy_mode); + status->freq = ieee80211_channel_to_frequency(channel, status->band); + status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR; + status->rate_idx = get_rate_idx(rate, status->band); + + skb_pull(skb, sizeof(event->hdr)); + + hdr = (struct ieee80211_hdr *)skb->data; + fc = le16_to_cpu(hdr->frame_control); + + if (fc & IEEE80211_FCTL_PROTECTED) { + status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED | + RX_FLAG_MMIC_STRIPPED; + hdr->frame_control = __cpu_to_le16(fc & + ~IEEE80211_FCTL_PROTECTED); + } + + ath10k_dbg(ATH10K_DBG_MGMT, + "event mgmt rx skb %p len %d ftype %02x stype %02x\n", + skb, skb->len, + fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); + + ath10k_dbg(ATH10K_DBG_MGMT, + "event mgmt rx freq %d band %d snr %d, rate_idx %d\n", + status->freq, status->band, status->signal, + status->rate_idx); + + /* + * packets from HTC come aligned to 4byte boundaries + * because they can originally come in along with a trailer + */ + skb_trim(skb, buf_len); + + ieee80211_rx(ar->hw, skb); + return 0; +} + +static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_CHAN_INFO_EVENTID\n"); +} + +static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n"); +} + +static void ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_MESG_EVENTID\n"); +} + +static void ath10k_wmi_event_update_stats(struct ath10k *ar, + struct sk_buff *skb) +{ + struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data; + + ath10k_dbg(ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n"); + + ath10k_debug_read_target_stats(ar, ev); +} + +static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, + struct sk_buff *skb) +{ + struct wmi_vdev_start_response_event *ev; + + ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n"); + + ev = (struct wmi_vdev_start_response_event *)skb->data; + + if (WARN_ON(__le32_to_cpu(ev->status))) + return; + + complete(&ar->vdev_setup_done); +} + +static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, + struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n"); + complete(&ar->vdev_setup_done); +} + +static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, + struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_PEER_STA_KICKOUT_EVENTID\n"); +} + +/* + * FIXME + * + * We don't report to mac80211 sleep state of connected + * stations. Due to this mac80211 can't fill in TIM IE + * correctly. + * + * I know of no way of getting nullfunc frames that contain + * sleep transition from connected stations - these do not + * seem to be sent from the target to the host. There also + * doesn't seem to be a dedicated event for that. So the + * only way left to do this would be to read tim_bitmap + * during SWBA. + * + * We could probably try using tim_bitmap from SWBA to tell + * mac80211 which stations are asleep and which are not. The + * problem here is calling mac80211 functions so many times + * could take too long and make us miss the time to submit + * the beacon to the target. + * + * So as a workaround we try to extend the TIM IE if there + * is unicast buffered for stations with aid > 7 and fill it + * in ourselves. + */ +static void ath10k_wmi_update_tim(struct ath10k *ar, + struct ath10k_vif *arvif, + struct sk_buff *bcn, + struct wmi_bcn_info *bcn_info) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data; + struct ieee80211_tim_ie *tim; + u8 *ies, *ie; + u8 ie_len, pvm_len; + + /* if next SWBA has no tim_changed the tim_bitmap is garbage. + * we must copy the bitmap upon change and reuse it later */ + if (__le32_to_cpu(bcn_info->tim_info.tim_changed)) { + int i; + + BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) != + sizeof(bcn_info->tim_info.tim_bitmap)); + + for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) { + __le32 t = bcn_info->tim_info.tim_bitmap[i / 4]; + u32 v = __le32_to_cpu(t); + arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF; + } + + /* FW reports either length 0 or 16 + * so we calculate this on our own */ + arvif->u.ap.tim_len = 0; + for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) + if (arvif->u.ap.tim_bitmap[i]) + arvif->u.ap.tim_len = i; + + arvif->u.ap.tim_len++; + } + + ies = bcn->data; + ies += ieee80211_hdrlen(hdr->frame_control); + ies += 12; /* fixed parameters */ + + ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies, + (u8 *)skb_tail_pointer(bcn) - ies); + if (!ie) { + /* highly unlikely for mac80211 */ + ath10k_warn("no tim ie found;\n"); + return; + } + + tim = (void *)ie + 2; + ie_len = ie[1]; + pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */ + + if (pvm_len < arvif->u.ap.tim_len) { + int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len; + int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len); + void *next_ie = ie + 2 + ie_len; + + if (skb_put(bcn, expand_size)) { + memmove(next_ie + expand_size, next_ie, move_size); + + ie[1] += expand_size; + ie_len += expand_size; + pvm_len += expand_size; + } else { + ath10k_warn("tim expansion failed\n"); + } + } + + if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) { + ath10k_warn("tim pvm length is too great (%d)\n", pvm_len); + return; + } + + tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast); + memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len); + + ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n", + tim->dtim_count, tim->dtim_period, + tim->bitmap_ctrl, pvm_len); +} + +static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len, + struct wmi_p2p_noa_info *noa) +{ + struct ieee80211_p2p_noa_attr *noa_attr; + u8 ctwindow_oppps = noa->ctwindow_oppps; + u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET; + bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT); + __le16 *noa_attr_len; + u16 attr_len; + u8 noa_descriptors = noa->num_descriptors; + int i; + + /* P2P IE */ + data[0] = WLAN_EID_VENDOR_SPECIFIC; + data[1] = len - 2; + data[2] = (WLAN_OUI_WFA >> 16) & 0xff; + data[3] = (WLAN_OUI_WFA >> 8) & 0xff; + data[4] = (WLAN_OUI_WFA >> 0) & 0xff; + data[5] = WLAN_OUI_TYPE_WFA_P2P; + + /* NOA ATTR */ + data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE; + noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */ + noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9]; + + noa_attr->index = noa->index; + noa_attr->oppps_ctwindow = ctwindow; + if (oppps) + noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT; + + for (i = 0; i < noa_descriptors; i++) { + noa_attr->desc[i].count = + __le32_to_cpu(noa->descriptors[i].type_count); + noa_attr->desc[i].duration = noa->descriptors[i].duration; + noa_attr->desc[i].interval = noa->descriptors[i].interval; + noa_attr->desc[i].start_time = noa->descriptors[i].start_time; + } + + attr_len = 2; /* index + oppps_ctwindow */ + attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc); + *noa_attr_len = __cpu_to_le16(attr_len); +} + +static u32 ath10k_p2p_calc_noa_ie_len(struct wmi_p2p_noa_info *noa) +{ + u32 len = 0; + u8 noa_descriptors = noa->num_descriptors; + u8 opp_ps_info = noa->ctwindow_oppps; + bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT); + + + if (!noa_descriptors && !opps_enabled) + return len; + + len += 1 + 1 + 4; /* EID + len + OUI */ + len += 1 + 2; /* noa attr + attr len */ + len += 1 + 1; /* index + oppps_ctwindow */ + len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc); + + return len; +} + +static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif, + struct sk_buff *bcn, + struct wmi_bcn_info *bcn_info) +{ + struct wmi_p2p_noa_info *noa = &bcn_info->p2p_noa_info; + u8 *new_data, *old_data = arvif->u.ap.noa_data; + u32 new_len; + + if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) + return; + + ath10k_dbg(ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed); + if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) { + new_len = ath10k_p2p_calc_noa_ie_len(noa); + if (!new_len) + goto cleanup; + + new_data = kmalloc(new_len, GFP_ATOMIC); + if (!new_data) + goto cleanup; + + ath10k_p2p_fill_noa_ie(new_data, new_len, noa); + + spin_lock_bh(&ar->data_lock); + arvif->u.ap.noa_data = new_data; + arvif->u.ap.noa_len = new_len; + spin_unlock_bh(&ar->data_lock); + kfree(old_data); + } + + if (arvif->u.ap.noa_data) + if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC)) + memcpy(skb_put(bcn, arvif->u.ap.noa_len), + arvif->u.ap.noa_data, + arvif->u.ap.noa_len); + return; + +cleanup: + spin_lock_bh(&ar->data_lock); + arvif->u.ap.noa_data = NULL; + arvif->u.ap.noa_len = 0; + spin_unlock_bh(&ar->data_lock); + kfree(old_data); +} + + +static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_host_swba_event *ev; + u32 map; + int i = -1; + struct wmi_bcn_info *bcn_info; + struct ath10k_vif *arvif; + struct wmi_bcn_tx_arg arg; + struct sk_buff *bcn; + int vdev_id = 0; + int ret; + + ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n"); + + ev = (struct wmi_host_swba_event *)skb->data; + map = __le32_to_cpu(ev->vdev_map); + + ath10k_dbg(ATH10K_DBG_MGMT, "host swba:\n" + "-vdev map 0x%x\n", + ev->vdev_map); + + for (; map; map >>= 1, vdev_id++) { + if (!(map & 0x1)) + continue; + + i++; + + if (i >= WMI_MAX_AP_VDEV) { + ath10k_warn("swba has corrupted vdev map\n"); + break; + } + + bcn_info = &ev->bcn_info[i]; + + ath10k_dbg(ATH10K_DBG_MGMT, + "-bcn_info[%d]:\n" + "--tim_len %d\n" + "--tim_mcast %d\n" + "--tim_changed %d\n" + "--tim_num_ps_pending %d\n" + "--tim_bitmap 0x%08x%08x%08x%08x\n", + i, + __le32_to_cpu(bcn_info->tim_info.tim_len), + __le32_to_cpu(bcn_info->tim_info.tim_mcast), + __le32_to_cpu(bcn_info->tim_info.tim_changed), + __le32_to_cpu(bcn_info->tim_info.tim_num_ps_pending), + __le32_to_cpu(bcn_info->tim_info.tim_bitmap[3]), + __le32_to_cpu(bcn_info->tim_info.tim_bitmap[2]), + __le32_to_cpu(bcn_info->tim_info.tim_bitmap[1]), + __le32_to_cpu(bcn_info->tim_info.tim_bitmap[0])); + + arvif = ath10k_get_arvif(ar, vdev_id); + if (arvif == NULL) { + ath10k_warn("no vif for vdev_id %d found\n", vdev_id); + continue; + } + + bcn = ieee80211_beacon_get(ar->hw, arvif->vif); + if (!bcn) { + ath10k_warn("could not get mac80211 beacon\n"); + continue; + } + + ath10k_tx_h_seq_no(bcn); + ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info); + ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info); + + arg.vdev_id = arvif->vdev_id; + arg.tx_rate = 0; + arg.tx_power = 0; + arg.bcn = bcn->data; + arg.bcn_len = bcn->len; + + ret = ath10k_wmi_beacon_send(ar, &arg); + if (ret) + ath10k_warn("could not send beacon (%d)\n", ret); + + dev_kfree_skb_any(bcn); + } +} + +static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, + struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n"); +} + +static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_PHYERR_EVENTID\n"); +} + +static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n"); +} + +static void ath10k_wmi_event_profile_match(struct ath10k *ar, + struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n"); +} + +static void ath10k_wmi_event_debug_print(struct ath10k *ar, + struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_PRINT_EVENTID\n"); +} + +static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n"); +} + +static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, + struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n"); +} + +static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar, + struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n"); +} + +static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar, + struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n"); +} + +static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, + struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n"); +} + +static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, + struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n"); +} + +static void ath10k_wmi_event_dcs_interference(struct ath10k *ar, + struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n"); +} + +static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, + struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n"); +} + +static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, + struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n"); +} + +static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, + struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n"); +} + +static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, + struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n"); +} + +static void ath10k_wmi_event_delba_complete(struct ath10k *ar, + struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n"); +} + +static void ath10k_wmi_event_addba_complete(struct ath10k *ar, + struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n"); +} + +static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar, + struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n"); +} + +static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, + struct sk_buff *skb) +{ + struct wmi_service_ready_event *ev = (void *)skb->data; + + if (skb->len < sizeof(*ev)) { + ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n", + skb->len, sizeof(*ev)); + return; + } + + ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power); + ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power); + ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info); + ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info); + ar->fw_version_major = + (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24; + ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff); + ar->fw_version_release = + (__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16; + ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff); + ar->phy_capability = __le32_to_cpu(ev->phy_capability); + + ar->ath_common.regulatory.current_rd = + __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd); + + ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap, + sizeof(ev->wmi_service_bitmap)); + + if (strlen(ar->hw->wiphy->fw_version) == 0) { + snprintf(ar->hw->wiphy->fw_version, + sizeof(ar->hw->wiphy->fw_version), + "%u.%u.%u.%u", + ar->fw_version_major, + ar->fw_version_minor, + ar->fw_version_release, + ar->fw_version_build); + } + + /* FIXME: it probably should be better to support this */ + if (__le32_to_cpu(ev->num_mem_reqs) > 0) { + ath10k_warn("target requested %d memory chunks; ignoring\n", + __le32_to_cpu(ev->num_mem_reqs)); + } + + ath10k_dbg(ATH10K_DBG_WMI, + "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u\n", + __le32_to_cpu(ev->sw_version), + __le32_to_cpu(ev->sw_version_1), + __le32_to_cpu(ev->abi_version), + __le32_to_cpu(ev->phy_capability), + __le32_to_cpu(ev->ht_cap_info), + __le32_to_cpu(ev->vht_cap_info), + __le32_to_cpu(ev->vht_supp_mcs), + __le32_to_cpu(ev->sys_cap_info), + __le32_to_cpu(ev->num_mem_reqs)); + + complete(&ar->wmi.service_ready); +} + +static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data; + + if (WARN_ON(skb->len < sizeof(*ev))) + return -EINVAL; + + memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN); + + ath10k_dbg(ATH10K_DBG_WMI, + "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n", + __le32_to_cpu(ev->sw_version), + __le32_to_cpu(ev->abi_version), + ev->mac_addr.addr, + __le32_to_cpu(ev->status)); + + complete(&ar->wmi.unified_ready); + return 0; +} + +static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_cmd_hdr *cmd_hdr; + enum wmi_event_id id; + u16 len; + + cmd_hdr = (struct wmi_cmd_hdr *)skb->data; + id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); + + if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) + return; + + len = skb->len; + + trace_ath10k_wmi_event(id, skb->data, skb->len); + + switch (id) { + case WMI_MGMT_RX_EVENTID: + ath10k_wmi_event_mgmt_rx(ar, skb); + /* mgmt_rx() owns the skb now! */ + return; + case WMI_SCAN_EVENTID: + ath10k_wmi_event_scan(ar, skb); + break; + case WMI_CHAN_INFO_EVENTID: + ath10k_wmi_event_chan_info(ar, skb); + break; + case WMI_ECHO_EVENTID: + ath10k_wmi_event_echo(ar, skb); + break; + case WMI_DEBUG_MESG_EVENTID: + ath10k_wmi_event_debug_mesg(ar, skb); + break; + case WMI_UPDATE_STATS_EVENTID: + ath10k_wmi_event_update_stats(ar, skb); + break; + case WMI_VDEV_START_RESP_EVENTID: + ath10k_wmi_event_vdev_start_resp(ar, skb); + break; + case WMI_VDEV_STOPPED_EVENTID: + ath10k_wmi_event_vdev_stopped(ar, skb); + break; + case WMI_PEER_STA_KICKOUT_EVENTID: + ath10k_wmi_event_peer_sta_kickout(ar, skb); + break; + case WMI_HOST_SWBA_EVENTID: + ath10k_wmi_event_host_swba(ar, skb); + break; + case WMI_TBTTOFFSET_UPDATE_EVENTID: + ath10k_wmi_event_tbttoffset_update(ar, skb); + break; + case WMI_PHYERR_EVENTID: + ath10k_wmi_event_phyerr(ar, skb); + break; + case WMI_ROAM_EVENTID: + ath10k_wmi_event_roam(ar, skb); + break; + case WMI_PROFILE_MATCH: + ath10k_wmi_event_profile_match(ar, skb); + break; + case WMI_DEBUG_PRINT_EVENTID: + ath10k_wmi_event_debug_print(ar, skb); + break; + case WMI_PDEV_QVIT_EVENTID: + ath10k_wmi_event_pdev_qvit(ar, skb); + break; + case WMI_WLAN_PROFILE_DATA_EVENTID: + ath10k_wmi_event_wlan_profile_data(ar, skb); + break; + case WMI_RTT_MEASUREMENT_REPORT_EVENTID: + ath10k_wmi_event_rtt_measurement_report(ar, skb); + break; + case WMI_TSF_MEASUREMENT_REPORT_EVENTID: + ath10k_wmi_event_tsf_measurement_report(ar, skb); + break; + case WMI_RTT_ERROR_REPORT_EVENTID: + ath10k_wmi_event_rtt_error_report(ar, skb); + break; + case WMI_WOW_WAKEUP_HOST_EVENTID: + ath10k_wmi_event_wow_wakeup_host(ar, skb); + break; + case WMI_DCS_INTERFERENCE_EVENTID: + ath10k_wmi_event_dcs_interference(ar, skb); + break; + case WMI_PDEV_TPC_CONFIG_EVENTID: + ath10k_wmi_event_pdev_tpc_config(ar, skb); + break; + case WMI_PDEV_FTM_INTG_EVENTID: + ath10k_wmi_event_pdev_ftm_intg(ar, skb); + break; + case WMI_GTK_OFFLOAD_STATUS_EVENTID: + ath10k_wmi_event_gtk_offload_status(ar, skb); + break; + case WMI_GTK_REKEY_FAIL_EVENTID: + ath10k_wmi_event_gtk_rekey_fail(ar, skb); + break; + case WMI_TX_DELBA_COMPLETE_EVENTID: + ath10k_wmi_event_delba_complete(ar, skb); + break; + case WMI_TX_ADDBA_COMPLETE_EVENTID: + ath10k_wmi_event_addba_complete(ar, skb); + break; + case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID: + ath10k_wmi_event_vdev_install_key_complete(ar, skb); + break; + case WMI_SERVICE_READY_EVENTID: + ath10k_wmi_service_ready_event_rx(ar, skb); + break; + case WMI_READY_EVENTID: + ath10k_wmi_ready_event_rx(ar, skb); + break; + default: + ath10k_warn("Unknown eventid: %d\n", id); + break; + } + + dev_kfree_skb(skb); +} + +static void ath10k_wmi_event_work(struct work_struct *work) +{ + struct ath10k *ar = container_of(work, struct ath10k, + wmi.wmi_event_work); + struct sk_buff *skb; + + for (;;) { + skb = skb_dequeue(&ar->wmi.wmi_event_list); + if (!skb) + break; + + ath10k_wmi_event_process(ar, skb); + } +} + +static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_cmd_hdr *cmd_hdr = (struct wmi_cmd_hdr *)skb->data; + enum wmi_event_id event_id; + + event_id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); + + /* some events require to be handled ASAP + * thus can't be defered to a worker thread */ + switch (event_id) { + case WMI_HOST_SWBA_EVENTID: + case WMI_MGMT_RX_EVENTID: + ath10k_wmi_event_process(ar, skb); + return; + default: + break; + } + + skb_queue_tail(&ar->wmi.wmi_event_list, skb); + queue_work(ar->workqueue, &ar->wmi.wmi_event_work); +} + +/* WMI Initialization functions */ +int ath10k_wmi_attach(struct ath10k *ar) +{ + init_completion(&ar->wmi.service_ready); + init_completion(&ar->wmi.unified_ready); + init_waitqueue_head(&ar->wmi.wq); + + skb_queue_head_init(&ar->wmi.wmi_event_list); + INIT_WORK(&ar->wmi.wmi_event_work, ath10k_wmi_event_work); + + return 0; +} + +void ath10k_wmi_detach(struct ath10k *ar) +{ + /* HTC should've drained the packets already */ + if (WARN_ON(atomic_read(&ar->wmi.pending_tx_count) > 0)) + ath10k_warn("there are still pending packets\n"); + + cancel_work_sync(&ar->wmi.wmi_event_work); + skb_queue_purge(&ar->wmi.wmi_event_list); +} + +int ath10k_wmi_connect_htc_service(struct ath10k *ar) +{ + int status; + struct ath10k_htc_svc_conn_req conn_req; + struct ath10k_htc_svc_conn_resp conn_resp; + + memset(&conn_req, 0, sizeof(conn_req)); + memset(&conn_resp, 0, sizeof(conn_resp)); + + /* these fields are the same for all service endpoints */ + conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete; + conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx; + + /* connect to control service */ + conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL; + + status = ath10k_htc_connect_service(ar->htc, &conn_req, &conn_resp); + if (status) { + ath10k_warn("failed to connect to WMI CONTROL service status: %d\n", + status); + return status; + } + + ar->wmi.eid = conn_resp.eid; + return 0; +} + +int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, + u16 rd5g, u16 ctl2g, u16 ctl5g) +{ + struct wmi_pdev_set_regdomain_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data; + cmd->reg_domain = __cpu_to_le32(rd); + cmd->reg_domain_2G = __cpu_to_le32(rd2g); + cmd->reg_domain_5G = __cpu_to_le32(rd5g); + cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g); + cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g); + + ath10k_dbg(ATH10K_DBG_WMI, + "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n", + rd, rd2g, rd5g, ctl2g, ctl5g); + + return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_REGDOMAIN_CMDID); +} + +int ath10k_wmi_pdev_set_channel(struct ath10k *ar, + const struct wmi_channel_arg *arg) +{ + struct wmi_set_channel_cmd *cmd; + struct sk_buff *skb; + + if (arg->passive) + return -EINVAL; + + skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_set_channel_cmd *)skb->data; + cmd->chan.mhz = __cpu_to_le32(arg->freq); + cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq); + cmd->chan.mode = arg->mode; + cmd->chan.min_power = arg->min_power; + cmd->chan.max_power = arg->max_power; + cmd->chan.reg_power = arg->max_reg_power; + cmd->chan.reg_classid = arg->reg_class_id; + cmd->chan.antenna_max = arg->max_antenna_gain; + + ath10k_dbg(ATH10K_DBG_WMI, + "wmi set channel mode %d freq %d\n", + arg->mode, arg->freq); + + return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_CHANNEL_CMDID); +} + +int ath10k_wmi_pdev_suspend_target(struct ath10k *ar) +{ + struct wmi_pdev_suspend_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_pdev_suspend_cmd *)skb->data; + cmd->suspend_opt = WMI_PDEV_SUSPEND; + + return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SUSPEND_CMDID); +} + +int ath10k_wmi_pdev_resume_target(struct ath10k *ar) +{ + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(0); + if (skb == NULL) + return -ENOMEM; + + return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_RESUME_CMDID); +} + +int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id, + u32 value) +{ + struct wmi_pdev_set_param_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_pdev_set_param_cmd *)skb->data; + cmd->param_id = __cpu_to_le32(id); + cmd->param_value = __cpu_to_le32(value); + + ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n", + id, value); + return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_PARAM_CMDID); +} + +int ath10k_wmi_cmd_init(struct ath10k *ar) +{ + struct wmi_init_cmd *cmd; + struct sk_buff *buf; + struct wmi_resource_config config = {}; + u32 val; + + config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS); + config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS); + config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS); + + config.num_offload_reorder_bufs = + __cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS); + + config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS); + config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS); + config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT); + config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK); + config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK); + config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI); + config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE); + + config.scan_max_pending_reqs = + __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS); + + config.bmiss_offload_max_vdev = + __cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV); + + config.roam_offload_max_vdev = + __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV); + + config.roam_offload_max_ap_profiles = + __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES); + + config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS); + config.num_mcast_table_elems = + __cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS); + + config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE); + config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE); + config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES); + config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE); + config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM); + + val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK; + config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val); + + config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG); + + config.gtk_offload_max_vdev = + __cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV); + + config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC); + config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES); + + buf = ath10k_wmi_alloc_skb(sizeof(*cmd)); + if (!buf) + return -ENOMEM; + + cmd = (struct wmi_init_cmd *)buf->data; + cmd->num_host_mem_chunks = 0; + memcpy(&cmd->resource_config, &config, sizeof(config)); + + ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n"); + return ath10k_wmi_cmd_send(ar, buf, WMI_INIT_CMDID); +} + +static int ath10k_wmi_start_scan_calc_len(const struct wmi_start_scan_arg *arg) +{ + int len; + + len = sizeof(struct wmi_start_scan_cmd); + + if (arg->ie_len) { + if (!arg->ie) + return -EINVAL; + if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN) + return -EINVAL; + + len += sizeof(struct wmi_ie_data); + len += roundup(arg->ie_len, 4); + } + + if (arg->n_channels) { + if (!arg->channels) + return -EINVAL; + if (arg->n_channels > ARRAY_SIZE(arg->channels)) + return -EINVAL; + + len += sizeof(struct wmi_chan_list); + len += sizeof(__le32) * arg->n_channels; + } + + if (arg->n_ssids) { + if (!arg->ssids) + return -EINVAL; + if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID) + return -EINVAL; + + len += sizeof(struct wmi_ssid_list); + len += sizeof(struct wmi_ssid) * arg->n_ssids; + } + + if (arg->n_bssids) { + if (!arg->bssids) + return -EINVAL; + if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID) + return -EINVAL; + + len += sizeof(struct wmi_bssid_list); + len += sizeof(struct wmi_mac_addr) * arg->n_bssids; + } + + return len; +} + +int ath10k_wmi_start_scan(struct ath10k *ar, + const struct wmi_start_scan_arg *arg) +{ + struct wmi_start_scan_cmd *cmd; + struct sk_buff *skb; + struct wmi_ie_data *ie; + struct wmi_chan_list *channels; + struct wmi_ssid_list *ssids; + struct wmi_bssid_list *bssids; + u32 scan_id; + u32 scan_req_id; + int off; + int len = 0; + int i; + + len = ath10k_wmi_start_scan_calc_len(arg); + if (len < 0) + return len; /* len contains error code here */ + + skb = ath10k_wmi_alloc_skb(len); + if (!skb) + return -ENOMEM; + + scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX; + scan_id |= arg->scan_id; + + scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX; + scan_req_id |= arg->scan_req_id; + + cmd = (struct wmi_start_scan_cmd *)skb->data; + cmd->scan_id = __cpu_to_le32(scan_id); + cmd->scan_req_id = __cpu_to_le32(scan_req_id); + cmd->vdev_id = __cpu_to_le32(arg->vdev_id); + cmd->scan_priority = __cpu_to_le32(arg->scan_priority); + cmd->notify_scan_events = __cpu_to_le32(arg->notify_scan_events); + cmd->dwell_time_active = __cpu_to_le32(arg->dwell_time_active); + cmd->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive); + cmd->min_rest_time = __cpu_to_le32(arg->min_rest_time); + cmd->max_rest_time = __cpu_to_le32(arg->max_rest_time); + cmd->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time); + cmd->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time); + cmd->idle_time = __cpu_to_le32(arg->idle_time); + cmd->max_scan_time = __cpu_to_le32(arg->max_scan_time); + cmd->probe_delay = __cpu_to_le32(arg->probe_delay); + cmd->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags); + + /* TLV list starts after fields included in the struct */ + off = sizeof(*cmd); + + if (arg->n_channels) { + channels = (void *)skb->data + off; + channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG); + channels->num_chan = __cpu_to_le32(arg->n_channels); + + for (i = 0; i < arg->n_channels; i++) + channels->channel_list[i] = + __cpu_to_le32(arg->channels[i]); + + off += sizeof(*channels); + off += sizeof(__le32) * arg->n_channels; + } + + if (arg->n_ssids) { + ssids = (void *)skb->data + off; + ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG); + ssids->num_ssids = __cpu_to_le32(arg->n_ssids); + + for (i = 0; i < arg->n_ssids; i++) { + ssids->ssids[i].ssid_len = + __cpu_to_le32(arg->ssids[i].len); + memcpy(&ssids->ssids[i].ssid, + arg->ssids[i].ssid, + arg->ssids[i].len); + } + + off += sizeof(*ssids); + off += sizeof(struct wmi_ssid) * arg->n_ssids; + } + + if (arg->n_bssids) { + bssids = (void *)skb->data + off; + bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG); + bssids->num_bssid = __cpu_to_le32(arg->n_bssids); + + for (i = 0; i < arg->n_bssids; i++) + memcpy(&bssids->bssid_list[i], + arg->bssids[i].bssid, + ETH_ALEN); + + off += sizeof(*bssids); + off += sizeof(struct wmi_mac_addr) * arg->n_bssids; + } + + if (arg->ie_len) { + ie = (void *)skb->data + off; + ie->tag = __cpu_to_le32(WMI_IE_TAG); + ie->ie_len = __cpu_to_le32(arg->ie_len); + memcpy(ie->ie_data, arg->ie, arg->ie_len); + + off += sizeof(*ie); + off += roundup(arg->ie_len, 4); + } + + if (off != skb->len) { + dev_kfree_skb(skb); + return -EINVAL; + } + + ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n"); + return ath10k_wmi_cmd_send(ar, skb, WMI_START_SCAN_CMDID); +} + +void ath10k_wmi_start_scan_init(struct ath10k *ar, + struct wmi_start_scan_arg *arg) +{ + /* setup commonly used values */ + arg->scan_req_id = 1; + arg->scan_priority = WMI_SCAN_PRIORITY_LOW; + arg->dwell_time_active = 50; + arg->dwell_time_passive = 150; + arg->min_rest_time = 50; + arg->max_rest_time = 500; + arg->repeat_probe_time = 0; + arg->probe_spacing_time = 0; + arg->idle_time = 0; + arg->max_scan_time = 5000; + arg->probe_delay = 5; + arg->notify_scan_events = WMI_SCAN_EVENT_STARTED + | WMI_SCAN_EVENT_COMPLETED + | WMI_SCAN_EVENT_BSS_CHANNEL + | WMI_SCAN_EVENT_FOREIGN_CHANNEL + | WMI_SCAN_EVENT_DEQUEUED; + arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES; + arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT; + arg->n_bssids = 1; + arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF"; +} + +int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) +{ + struct wmi_stop_scan_cmd *cmd; + struct sk_buff *skb; + u32 scan_id; + u32 req_id; + + if (arg->req_id > 0xFFF) + return -EINVAL; + if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF) + return -EINVAL; + + skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + scan_id = arg->u.scan_id; + scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX; + + req_id = arg->req_id; + req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX; + + cmd = (struct wmi_stop_scan_cmd *)skb->data; + cmd->req_type = __cpu_to_le32(arg->req_type); + cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id); + cmd->scan_id = __cpu_to_le32(scan_id); + cmd->scan_req_id = __cpu_to_le32(req_id); + + ath10k_dbg(ATH10K_DBG_WMI, + "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n", + arg->req_id, arg->req_type, arg->u.scan_id); + return ath10k_wmi_cmd_send(ar, skb, WMI_STOP_SCAN_CMDID); +} + +int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, + enum wmi_vdev_type type, + enum wmi_vdev_subtype subtype, + const u8 macaddr[ETH_ALEN]) +{ + struct wmi_vdev_create_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_vdev_create_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->vdev_type = __cpu_to_le32(type); + cmd->vdev_subtype = __cpu_to_le32(subtype); + memcpy(cmd->vdev_macaddr.addr, macaddr, ETH_ALEN); + + ath10k_dbg(ATH10K_DBG_WMI, + "WMI vdev create: id %d type %d subtype %d macaddr %pM\n", + vdev_id, type, subtype, macaddr); + + return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_CREATE_CMDID); +} + +int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) +{ + struct wmi_vdev_delete_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_vdev_delete_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(vdev_id); + + ath10k_dbg(ATH10K_DBG_WMI, + "WMI vdev delete id %d\n", vdev_id); + + return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DELETE_CMDID); +} + +static int ath10k_wmi_vdev_start_restart(struct ath10k *ar, + const struct wmi_vdev_start_request_arg *arg, + enum wmi_cmd_id cmd_id) +{ + struct wmi_vdev_start_request_cmd *cmd; + struct sk_buff *skb; + const char *cmdname; + u32 flags = 0; + + if (cmd_id != WMI_VDEV_START_REQUEST_CMDID && + cmd_id != WMI_VDEV_RESTART_REQUEST_CMDID) + return -EINVAL; + if (WARN_ON(arg->ssid && arg->ssid_len == 0)) + return -EINVAL; + if (WARN_ON(arg->hidden_ssid && !arg->ssid)) + return -EINVAL; + if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid))) + return -EINVAL; + + if (cmd_id == WMI_VDEV_START_REQUEST_CMDID) + cmdname = "start"; + else if (cmd_id == WMI_VDEV_RESTART_REQUEST_CMDID) + cmdname = "restart"; + else + return -EINVAL; /* should not happen, we already check cmd_id */ + + skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + if (arg->hidden_ssid) + flags |= WMI_VDEV_START_HIDDEN_SSID; + if (arg->pmf_enabled) + flags |= WMI_VDEV_START_PMF_ENABLED; + + cmd = (struct wmi_vdev_start_request_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(arg->vdev_id); + cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack); + cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval); + cmd->dtim_period = __cpu_to_le32(arg->dtim_period); + cmd->flags = __cpu_to_le32(flags); + cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate); + cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power); + + if (arg->ssid) { + cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len); + memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len); + } + + cmd->chan.mhz = __cpu_to_le32(arg->channel.freq); + + cmd->chan.band_center_freq1 = + __cpu_to_le32(arg->channel.band_center_freq1); + + cmd->chan.mode = arg->channel.mode; + cmd->chan.min_power = arg->channel.min_power; + cmd->chan.max_power = arg->channel.max_power; + cmd->chan.reg_power = arg->channel.max_reg_power; + cmd->chan.reg_classid = arg->channel.reg_class_id; + cmd->chan.antenna_max = arg->channel.max_antenna_gain; + + ath10k_dbg(ATH10K_DBG_WMI, + "wmi vdev %s id 0x%x freq %d, mode %d, ch_flags: 0x%0X," + "max_power: %d\n", cmdname, arg->vdev_id, arg->channel.freq, + arg->channel.mode, flags, arg->channel.max_power); + + return ath10k_wmi_cmd_send(ar, skb, cmd_id); +} + +int ath10k_wmi_vdev_start(struct ath10k *ar, + const struct wmi_vdev_start_request_arg *arg) +{ + return ath10k_wmi_vdev_start_restart(ar, arg, + WMI_VDEV_START_REQUEST_CMDID); +} + +int ath10k_wmi_vdev_restart(struct ath10k *ar, + const struct wmi_vdev_start_request_arg *arg) +{ + return ath10k_wmi_vdev_start_restart(ar, arg, + WMI_VDEV_RESTART_REQUEST_CMDID); +} + +int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) +{ + struct wmi_vdev_stop_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_vdev_stop_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(vdev_id); + + ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id); + + return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_STOP_CMDID); +} + +int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) +{ + struct wmi_vdev_up_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_vdev_up_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->vdev_assoc_id = __cpu_to_le32(aid); + memcpy(&cmd->vdev_bssid.addr, bssid, 6); + + ath10k_dbg(ATH10K_DBG_WMI, + "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n", + vdev_id, aid, bssid); + + return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_UP_CMDID); +} + +int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) +{ + struct wmi_vdev_down_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_vdev_down_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(vdev_id); + + ath10k_dbg(ATH10K_DBG_WMI, + "wmi mgmt vdev down id 0x%x\n", vdev_id); + + return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DOWN_CMDID); +} + +int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, + enum wmi_vdev_param param_id, u32 param_value) +{ + struct wmi_vdev_set_param_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_vdev_set_param_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->param_id = __cpu_to_le32(param_id); + cmd->param_value = __cpu_to_le32(param_value); + + ath10k_dbg(ATH10K_DBG_WMI, + "wmi vdev id 0x%x set param %d value %d\n", + vdev_id, param_id, param_value); + + return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_SET_PARAM_CMDID); +} + +int ath10k_wmi_vdev_install_key(struct ath10k *ar, + const struct wmi_vdev_install_key_arg *arg) +{ + struct wmi_vdev_install_key_cmd *cmd; + struct sk_buff *skb; + + if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL) + return -EINVAL; + if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL) + return -EINVAL; + + skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->key_len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_vdev_install_key_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(arg->vdev_id); + cmd->key_idx = __cpu_to_le32(arg->key_idx); + cmd->key_flags = __cpu_to_le32(arg->key_flags); + cmd->key_cipher = __cpu_to_le32(arg->key_cipher); + cmd->key_len = __cpu_to_le32(arg->key_len); + cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len); + cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len); + + if (arg->macaddr) + memcpy(cmd->peer_macaddr.addr, arg->macaddr, ETH_ALEN); + if (arg->key_data) + memcpy(cmd->key_data, arg->key_data, arg->key_len); + + return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID); +} + +int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN]) +{ + struct wmi_peer_create_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_peer_create_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(vdev_id); + memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); + + ath10k_dbg(ATH10K_DBG_WMI, + "wmi peer create vdev_id %d peer_addr %pM\n", + vdev_id, peer_addr); + return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_CREATE_CMDID); +} + +int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN]) +{ + struct wmi_peer_delete_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_peer_delete_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(vdev_id); + memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); + + ath10k_dbg(ATH10K_DBG_WMI, + "wmi peer delete vdev_id %d peer_addr %pM\n", + vdev_id, peer_addr); + return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_DELETE_CMDID); +} + +int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN], u32 tid_bitmap) +{ + struct wmi_peer_flush_tids_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_peer_flush_tids_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap); + memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); + + ath10k_dbg(ATH10K_DBG_WMI, + "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n", + vdev_id, peer_addr, tid_bitmap); + return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_FLUSH_TIDS_CMDID); +} + +int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, + const u8 *peer_addr, enum wmi_peer_param param_id, + u32 param_value) +{ + struct wmi_peer_set_param_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_peer_set_param_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->param_id = __cpu_to_le32(param_id); + cmd->param_value = __cpu_to_le32(param_value); + memcpy(&cmd->peer_macaddr.addr, peer_addr, 6); + + ath10k_dbg(ATH10K_DBG_WMI, + "wmi vdev %d peer 0x%pM set param %d value %d\n", + vdev_id, peer_addr, param_id, param_value); + + return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_SET_PARAM_CMDID); +} + +int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, + enum wmi_sta_ps_mode psmode) +{ + struct wmi_sta_powersave_mode_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->sta_ps_mode = __cpu_to_le32(psmode); + + ath10k_dbg(ATH10K_DBG_WMI, + "wmi set powersave id 0x%x mode %d\n", + vdev_id, psmode); + + return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_MODE_CMDID); +} + +int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, + enum wmi_sta_powersave_param param_id, + u32 value) +{ + struct wmi_sta_powersave_param_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_sta_powersave_param_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->param_id = __cpu_to_le32(param_id); + cmd->param_value = __cpu_to_le32(value); + + ath10k_dbg(ATH10K_DBG_WMI, + "wmi sta ps param vdev_id 0x%x param %d value %d\n", + vdev_id, param_id, value); + return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_PARAM_CMDID); +} + +int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, + enum wmi_ap_ps_peer_param param_id, u32 value) +{ + struct wmi_ap_ps_peer_cmd *cmd; + struct sk_buff *skb; + + if (!mac) + return -EINVAL; + + skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_ap_ps_peer_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->param_id = __cpu_to_le32(param_id); + cmd->param_value = __cpu_to_le32(value); + memcpy(&cmd->peer_macaddr, mac, ETH_ALEN); + + ath10k_dbg(ATH10K_DBG_WMI, + "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n", + vdev_id, param_id, value, mac); + + return ath10k_wmi_cmd_send(ar, skb, WMI_AP_PS_PEER_PARAM_CMDID); +} + +int ath10k_wmi_scan_chan_list(struct ath10k *ar, + const struct wmi_scan_chan_list_arg *arg) +{ + struct wmi_scan_chan_list_cmd *cmd; + struct sk_buff *skb; + struct wmi_channel_arg *ch; + struct wmi_channel *ci; + int len; + int i; + + len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel); + + skb = ath10k_wmi_alloc_skb(len); + if (!skb) + return -EINVAL; + + cmd = (struct wmi_scan_chan_list_cmd *)skb->data; + cmd->num_scan_chans = __cpu_to_le32(arg->n_channels); + + for (i = 0; i < arg->n_channels; i++) { + u32 flags = 0; + + ch = &arg->channels[i]; + ci = &cmd->chan_info[i]; + + if (ch->passive) + flags |= WMI_CHAN_FLAG_PASSIVE; + if (ch->allow_ibss) + flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED; + if (ch->allow_ht) + flags |= WMI_CHAN_FLAG_ALLOW_HT; + if (ch->allow_vht) + flags |= WMI_CHAN_FLAG_ALLOW_VHT; + if (ch->ht40plus) + flags |= WMI_CHAN_FLAG_HT40_PLUS; + + ci->mhz = __cpu_to_le32(ch->freq); + ci->band_center_freq1 = __cpu_to_le32(ch->freq); + ci->band_center_freq2 = 0; + ci->min_power = ch->min_power; + ci->max_power = ch->max_power; + ci->reg_power = ch->max_reg_power; + ci->antenna_max = ch->max_antenna_gain; + ci->antenna_max = 0; + + /* mode & flags share storage */ + ci->mode = ch->mode; + ci->flags |= __cpu_to_le32(flags); + } + + return ath10k_wmi_cmd_send(ar, skb, WMI_SCAN_CHAN_LIST_CMDID); +} + +int ath10k_wmi_peer_assoc(struct ath10k *ar, + const struct wmi_peer_assoc_complete_arg *arg) +{ + struct wmi_peer_assoc_complete_cmd *cmd; + struct sk_buff *skb; + + if (arg->peer_mpdu_density > 16) + return -EINVAL; + if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES) + return -EINVAL; + if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES) + return -EINVAL; + + skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_peer_assoc_complete_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(arg->vdev_id); + cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1); + cmd->peer_associd = __cpu_to_le32(arg->peer_aid); + cmd->peer_flags = __cpu_to_le32(arg->peer_flags); + cmd->peer_caps = __cpu_to_le32(arg->peer_caps); + cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval); + cmd->peer_ht_caps = __cpu_to_le32(arg->peer_ht_caps); + cmd->peer_max_mpdu = __cpu_to_le32(arg->peer_max_mpdu); + cmd->peer_mpdu_density = __cpu_to_le32(arg->peer_mpdu_density); + cmd->peer_rate_caps = __cpu_to_le32(arg->peer_rate_caps); + cmd->peer_nss = __cpu_to_le32(arg->peer_num_spatial_streams); + cmd->peer_vht_caps = __cpu_to_le32(arg->peer_vht_caps); + cmd->peer_phymode = __cpu_to_le32(arg->peer_phymode); + + memcpy(cmd->peer_macaddr.addr, arg->addr, ETH_ALEN); + + cmd->peer_legacy_rates.num_rates = + __cpu_to_le32(arg->peer_legacy_rates.num_rates); + memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates, + arg->peer_legacy_rates.num_rates); + + cmd->peer_ht_rates.num_rates = + __cpu_to_le32(arg->peer_ht_rates.num_rates); + memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates, + arg->peer_ht_rates.num_rates); + + cmd->peer_vht_rates.rx_max_rate = + __cpu_to_le32(arg->peer_vht_rates.rx_max_rate); + cmd->peer_vht_rates.rx_mcs_set = + __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set); + cmd->peer_vht_rates.tx_max_rate = + __cpu_to_le32(arg->peer_vht_rates.tx_max_rate); + cmd->peer_vht_rates.tx_mcs_set = + __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set); + + return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID); +} + +int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg) +{ + struct wmi_bcn_tx_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->bcn_len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_bcn_tx_cmd *)skb->data; + cmd->hdr.vdev_id = __cpu_to_le32(arg->vdev_id); + cmd->hdr.tx_rate = __cpu_to_le32(arg->tx_rate); + cmd->hdr.tx_power = __cpu_to_le32(arg->tx_power); + cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len); + memcpy(cmd->bcn, arg->bcn, arg->bcn_len); + + return ath10k_wmi_cmd_send(ar, skb, WMI_BCN_TX_CMDID); +} + +static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params, + const struct wmi_wmm_params_arg *arg) +{ + params->cwmin = __cpu_to_le32(arg->cwmin); + params->cwmax = __cpu_to_le32(arg->cwmax); + params->aifs = __cpu_to_le32(arg->aifs); + params->txop = __cpu_to_le32(arg->txop); + params->acm = __cpu_to_le32(arg->acm); + params->no_ack = __cpu_to_le32(arg->no_ack); +} + +int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, + const struct wmi_pdev_set_wmm_params_arg *arg) +{ + struct wmi_pdev_set_wmm_params *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_pdev_set_wmm_params *)skb->data; + ath10k_wmi_pdev_set_wmm_param(&cmd->ac_be, &arg->ac_be); + ath10k_wmi_pdev_set_wmm_param(&cmd->ac_bk, &arg->ac_bk); + ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi); + ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo); + + ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n"); + return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_WMM_PARAMS_CMDID); +} + +int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id) +{ + struct wmi_request_stats_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_request_stats_cmd *)skb->data; + cmd->stats_id = __cpu_to_le32(stats_id); + + ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id); + return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID); +} diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h new file mode 100644 index 000000000000..9555f5a0e041 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -0,0 +1,3052 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WMI_H_ +#define _WMI_H_ + +#include <linux/types.h> +#include <net/mac80211.h> + +/* + * This file specifies the WMI interface for the Unified Software + * Architecture. + * + * It includes definitions of all the commands and events. Commands are + * messages from the host to the target. Events and Replies are messages + * from the target to the host. + * + * Ownership of correctness in regards to WMI commands belongs to the host + * driver and the target is not required to validate parameters for value, + * proper range, or any other checking. + * + * Guidelines for extending this interface are below. + * + * 1. Add new WMI commands ONLY within the specified range - 0x9000 - 0x9fff + * + * 2. Use ONLY u32 type for defining member variables within WMI + * command/event structures. Do not use u8, u16, bool or + * enum types within these structures. + * + * 3. DO NOT define bit fields within structures. Implement bit fields + * using masks if necessary. Do not use the programming language's bit + * field definition. + * + * 4. Define macros for encode/decode of u8, u16 fields within + * the u32 variables. Use these macros for set/get of these fields. + * Try to use this to optimize the structure without bloating it with + * u32 variables for every lower sized field. + * + * 5. Do not use PACK/UNPACK attributes for the structures as each member + * variable is already 4-byte aligned by virtue of being a u32 + * type. + * + * 6. Comment each parameter part of the WMI command/event structure by + * using the 2 stars at the begining of C comment instead of one star to + * enable HTML document generation using Doxygen. + * + */ + +/* Control Path */ +struct wmi_cmd_hdr { + __le32 cmd_id; +} __packed; + +#define WMI_CMD_HDR_CMD_ID_MASK 0x00FFFFFF +#define WMI_CMD_HDR_CMD_ID_LSB 0 +#define WMI_CMD_HDR_PLT_PRIV_MASK 0xFF000000 +#define WMI_CMD_HDR_PLT_PRIV_LSB 24 + +#define HTC_PROTOCOL_VERSION 0x0002 +#define WMI_PROTOCOL_VERSION 0x0002 + +enum wmi_service_id { + WMI_SERVICE_BEACON_OFFLOAD = 0, /* beacon offload */ + WMI_SERVICE_SCAN_OFFLOAD, /* scan offload */ + WMI_SERVICE_ROAM_OFFLOAD, /* roam offload */ + WMI_SERVICE_BCN_MISS_OFFLOAD, /* beacon miss offload */ + WMI_SERVICE_STA_PWRSAVE, /* fake sleep + basic power save */ + WMI_SERVICE_STA_ADVANCED_PWRSAVE, /* uapsd, pspoll, force sleep */ + WMI_SERVICE_AP_UAPSD, /* uapsd on AP */ + WMI_SERVICE_AP_DFS, /* DFS on AP */ + WMI_SERVICE_11AC, /* supports 11ac */ + WMI_SERVICE_BLOCKACK, /* Supports triggering ADDBA/DELBA from host*/ + WMI_SERVICE_PHYERR, /* PHY error */ + WMI_SERVICE_BCN_FILTER, /* Beacon filter support */ + WMI_SERVICE_RTT, /* RTT (round trip time) support */ + WMI_SERVICE_RATECTRL, /* Rate-control */ + WMI_SERVICE_WOW, /* WOW Support */ + WMI_SERVICE_RATECTRL_CACHE, /* Rate-control caching */ + WMI_SERVICE_IRAM_TIDS, /* TIDs in IRAM */ + WMI_SERVICE_ARPNS_OFFLOAD, /* ARP NS Offload support */ + WMI_SERVICE_NLO, /* Network list offload service */ + WMI_SERVICE_GTK_OFFLOAD, /* GTK offload */ + WMI_SERVICE_SCAN_SCH, /* Scan Scheduler Service */ + WMI_SERVICE_CSA_OFFLOAD, /* CSA offload service */ + WMI_SERVICE_CHATTER, /* Chatter service */ + WMI_SERVICE_COEX_FREQAVOID, /* FW report freq range to avoid */ + WMI_SERVICE_PACKET_POWER_SAVE, /* packet power save service */ + WMI_SERVICE_FORCE_FW_HANG, /* To test fw recovery mechanism */ + WMI_SERVICE_GPIO, /* GPIO service */ + WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, /* Modulated DTIM support */ + WMI_STA_UAPSD_BASIC_AUTO_TRIG, /* UAPSD AC Trigger Generation */ + WMI_STA_UAPSD_VAR_AUTO_TRIG, /* -do- */ + WMI_SERVICE_STA_KEEP_ALIVE, /* STA keep alive mechanism support */ + WMI_SERVICE_TX_ENCAP, /* Packet type for TX encapsulation */ + + WMI_SERVICE_LAST, + WMI_MAX_SERVICE = 64 /* max service */ +}; + +static inline char *wmi_service_name(int service_id) +{ + switch (service_id) { + case WMI_SERVICE_BEACON_OFFLOAD: + return "BEACON_OFFLOAD"; + case WMI_SERVICE_SCAN_OFFLOAD: + return "SCAN_OFFLOAD"; + case WMI_SERVICE_ROAM_OFFLOAD: + return "ROAM_OFFLOAD"; + case WMI_SERVICE_BCN_MISS_OFFLOAD: + return "BCN_MISS_OFFLOAD"; + case WMI_SERVICE_STA_PWRSAVE: + return "STA_PWRSAVE"; + case WMI_SERVICE_STA_ADVANCED_PWRSAVE: + return "STA_ADVANCED_PWRSAVE"; + case WMI_SERVICE_AP_UAPSD: + return "AP_UAPSD"; + case WMI_SERVICE_AP_DFS: + return "AP_DFS"; + case WMI_SERVICE_11AC: + return "11AC"; + case WMI_SERVICE_BLOCKACK: + return "BLOCKACK"; + case WMI_SERVICE_PHYERR: + return "PHYERR"; + case WMI_SERVICE_BCN_FILTER: + return "BCN_FILTER"; + case WMI_SERVICE_RTT: + return "RTT"; + case WMI_SERVICE_RATECTRL: + return "RATECTRL"; + case WMI_SERVICE_WOW: + return "WOW"; + case WMI_SERVICE_RATECTRL_CACHE: + return "RATECTRL CACHE"; + case WMI_SERVICE_IRAM_TIDS: + return "IRAM TIDS"; + case WMI_SERVICE_ARPNS_OFFLOAD: + return "ARPNS_OFFLOAD"; + case WMI_SERVICE_NLO: + return "NLO"; + case WMI_SERVICE_GTK_OFFLOAD: + return "GTK_OFFLOAD"; + case WMI_SERVICE_SCAN_SCH: + return "SCAN_SCH"; + case WMI_SERVICE_CSA_OFFLOAD: + return "CSA_OFFLOAD"; + case WMI_SERVICE_CHATTER: + return "CHATTER"; + case WMI_SERVICE_COEX_FREQAVOID: + return "COEX_FREQAVOID"; + case WMI_SERVICE_PACKET_POWER_SAVE: + return "PACKET_POWER_SAVE"; + case WMI_SERVICE_FORCE_FW_HANG: + return "FORCE FW HANG"; + case WMI_SERVICE_GPIO: + return "GPIO"; + case WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM: + return "MODULATED DTIM"; + case WMI_STA_UAPSD_BASIC_AUTO_TRIG: + return "BASIC UAPSD"; + case WMI_STA_UAPSD_VAR_AUTO_TRIG: + return "VAR UAPSD"; + case WMI_SERVICE_STA_KEEP_ALIVE: + return "STA KEEP ALIVE"; + case WMI_SERVICE_TX_ENCAP: + return "TX ENCAP"; + default: + return "UNKNOWN SERVICE\n"; + } +} + + +#define WMI_SERVICE_BM_SIZE \ + ((WMI_MAX_SERVICE + sizeof(u32) - 1)/sizeof(u32)) + +/* 2 word representation of MAC addr */ +struct wmi_mac_addr { + union { + u8 addr[6]; + struct { + u32 word0; + u32 word1; + } __packed; + } __packed; +} __packed; + +/* macro to convert MAC address from WMI word format to char array */ +#define WMI_MAC_ADDR_TO_CHAR_ARRAY(pwmi_mac_addr, c_macaddr) do { \ + (c_macaddr)[0] = ((pwmi_mac_addr)->word0) & 0xff; \ + (c_macaddr)[1] = (((pwmi_mac_addr)->word0) >> 8) & 0xff; \ + (c_macaddr)[2] = (((pwmi_mac_addr)->word0) >> 16) & 0xff; \ + (c_macaddr)[3] = (((pwmi_mac_addr)->word0) >> 24) & 0xff; \ + (c_macaddr)[4] = ((pwmi_mac_addr)->word1) & 0xff; \ + (c_macaddr)[5] = (((pwmi_mac_addr)->word1) >> 8) & 0xff; \ + } while (0) + +/* + * wmi command groups. + */ +enum wmi_cmd_group { + /* 0 to 2 are reserved */ + WMI_GRP_START = 0x3, + WMI_GRP_SCAN = WMI_GRP_START, + WMI_GRP_PDEV, + WMI_GRP_VDEV, + WMI_GRP_PEER, + WMI_GRP_MGMT, + WMI_GRP_BA_NEG, + WMI_GRP_STA_PS, + WMI_GRP_DFS, + WMI_GRP_ROAM, + WMI_GRP_OFL_SCAN, + WMI_GRP_P2P, + WMI_GRP_AP_PS, + WMI_GRP_RATE_CTRL, + WMI_GRP_PROFILE, + WMI_GRP_SUSPEND, + WMI_GRP_BCN_FILTER, + WMI_GRP_WOW, + WMI_GRP_RTT, + WMI_GRP_SPECTRAL, + WMI_GRP_STATS, + WMI_GRP_ARP_NS_OFL, + WMI_GRP_NLO_OFL, + WMI_GRP_GTK_OFL, + WMI_GRP_CSA_OFL, + WMI_GRP_CHATTER, + WMI_GRP_TID_ADDBA, + WMI_GRP_MISC, + WMI_GRP_GPIO, +}; + +#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1) +#define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1) + +/* Command IDs and commande events. */ +enum wmi_cmd_id { + WMI_INIT_CMDID = 0x1, + + /* Scan specific commands */ + WMI_START_SCAN_CMDID = WMI_CMD_GRP(WMI_GRP_SCAN), + WMI_STOP_SCAN_CMDID, + WMI_SCAN_CHAN_LIST_CMDID, + WMI_SCAN_SCH_PRIO_TBL_CMDID, + + /* PDEV (physical device) specific commands */ + WMI_PDEV_SET_REGDOMAIN_CMDID = WMI_CMD_GRP(WMI_GRP_PDEV), + WMI_PDEV_SET_CHANNEL_CMDID, + WMI_PDEV_SET_PARAM_CMDID, + WMI_PDEV_PKTLOG_ENABLE_CMDID, + WMI_PDEV_PKTLOG_DISABLE_CMDID, + WMI_PDEV_SET_WMM_PARAMS_CMDID, + WMI_PDEV_SET_HT_CAP_IE_CMDID, + WMI_PDEV_SET_VHT_CAP_IE_CMDID, + WMI_PDEV_SET_DSCP_TID_MAP_CMDID, + WMI_PDEV_SET_QUIET_MODE_CMDID, + WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID, + WMI_PDEV_GET_TPC_CONFIG_CMDID, + WMI_PDEV_SET_BASE_MACADDR_CMDID, + + /* VDEV (virtual device) specific commands */ + WMI_VDEV_CREATE_CMDID = WMI_CMD_GRP(WMI_GRP_VDEV), + WMI_VDEV_DELETE_CMDID, + WMI_VDEV_START_REQUEST_CMDID, + WMI_VDEV_RESTART_REQUEST_CMDID, + WMI_VDEV_UP_CMDID, + WMI_VDEV_STOP_CMDID, + WMI_VDEV_DOWN_CMDID, + WMI_VDEV_SET_PARAM_CMDID, + WMI_VDEV_INSTALL_KEY_CMDID, + + /* peer specific commands */ + WMI_PEER_CREATE_CMDID = WMI_CMD_GRP(WMI_GRP_PEER), + WMI_PEER_DELETE_CMDID, + WMI_PEER_FLUSH_TIDS_CMDID, + WMI_PEER_SET_PARAM_CMDID, + WMI_PEER_ASSOC_CMDID, + WMI_PEER_ADD_WDS_ENTRY_CMDID, + WMI_PEER_REMOVE_WDS_ENTRY_CMDID, + WMI_PEER_MCAST_GROUP_CMDID, + + /* beacon/management specific commands */ + WMI_BCN_TX_CMDID = WMI_CMD_GRP(WMI_GRP_MGMT), + WMI_PDEV_SEND_BCN_CMDID, + WMI_BCN_TMPL_CMDID, + WMI_BCN_FILTER_RX_CMDID, + WMI_PRB_REQ_FILTER_RX_CMDID, + WMI_MGMT_TX_CMDID, + WMI_PRB_TMPL_CMDID, + + /* commands to directly control BA negotiation directly from host. */ + WMI_ADDBA_CLEAR_RESP_CMDID = WMI_CMD_GRP(WMI_GRP_BA_NEG), + WMI_ADDBA_SEND_CMDID, + WMI_ADDBA_STATUS_CMDID, + WMI_DELBA_SEND_CMDID, + WMI_ADDBA_SET_RESP_CMDID, + WMI_SEND_SINGLEAMSDU_CMDID, + + /* Station power save specific config */ + WMI_STA_POWERSAVE_MODE_CMDID = WMI_CMD_GRP(WMI_GRP_STA_PS), + WMI_STA_POWERSAVE_PARAM_CMDID, + WMI_STA_MIMO_PS_MODE_CMDID, + + /** DFS-specific commands */ + WMI_PDEV_DFS_ENABLE_CMDID = WMI_CMD_GRP(WMI_GRP_DFS), + WMI_PDEV_DFS_DISABLE_CMDID, + + /* Roaming specific commands */ + WMI_ROAM_SCAN_MODE = WMI_CMD_GRP(WMI_GRP_ROAM), + WMI_ROAM_SCAN_RSSI_THRESHOLD, + WMI_ROAM_SCAN_PERIOD, + WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, + WMI_ROAM_AP_PROFILE, + + /* offload scan specific commands */ + WMI_OFL_SCAN_ADD_AP_PROFILE = WMI_CMD_GRP(WMI_GRP_OFL_SCAN), + WMI_OFL_SCAN_REMOVE_AP_PROFILE, + WMI_OFL_SCAN_PERIOD, + + /* P2P specific commands */ + WMI_P2P_DEV_SET_DEVICE_INFO = WMI_CMD_GRP(WMI_GRP_P2P), + WMI_P2P_DEV_SET_DISCOVERABILITY, + WMI_P2P_GO_SET_BEACON_IE, + WMI_P2P_GO_SET_PROBE_RESP_IE, + WMI_P2P_SET_VENDOR_IE_DATA_CMDID, + + /* AP power save specific config */ + WMI_AP_PS_PEER_PARAM_CMDID = WMI_CMD_GRP(WMI_GRP_AP_PS), + WMI_AP_PS_PEER_UAPSD_COEX_CMDID, + + /* Rate-control specific commands */ + WMI_PEER_RATE_RETRY_SCHED_CMDID = + WMI_CMD_GRP(WMI_GRP_RATE_CTRL), + + /* WLAN Profiling commands. */ + WMI_WLAN_PROFILE_TRIGGER_CMDID = WMI_CMD_GRP(WMI_GRP_PROFILE), + WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID, + WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, + WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, + WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, + + /* Suspend resume command Ids */ + WMI_PDEV_SUSPEND_CMDID = WMI_CMD_GRP(WMI_GRP_SUSPEND), + WMI_PDEV_RESUME_CMDID, + + /* Beacon filter commands */ + WMI_ADD_BCN_FILTER_CMDID = WMI_CMD_GRP(WMI_GRP_BCN_FILTER), + WMI_RMV_BCN_FILTER_CMDID, + + /* WOW Specific WMI commands*/ + WMI_WOW_ADD_WAKE_PATTERN_CMDID = WMI_CMD_GRP(WMI_GRP_WOW), + WMI_WOW_DEL_WAKE_PATTERN_CMDID, + WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, + WMI_WOW_ENABLE_CMDID, + WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, + + /* RTT measurement related cmd */ + WMI_RTT_MEASREQ_CMDID = WMI_CMD_GRP(WMI_GRP_RTT), + WMI_RTT_TSF_CMDID, + + /* spectral scan commands */ + WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID = WMI_CMD_GRP(WMI_GRP_SPECTRAL), + WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, + + /* F/W stats */ + WMI_REQUEST_STATS_CMDID = WMI_CMD_GRP(WMI_GRP_STATS), + + /* ARP OFFLOAD REQUEST*/ + WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_CMD_GRP(WMI_GRP_ARP_NS_OFL), + + /* NS offload confid*/ + WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_NLO_OFL), + + /* GTK offload Specific WMI commands*/ + WMI_GTK_OFFLOAD_CMDID = WMI_CMD_GRP(WMI_GRP_GTK_OFL), + + /* CSA offload Specific WMI commands*/ + WMI_CSA_OFFLOAD_ENABLE_CMDID = WMI_CMD_GRP(WMI_GRP_CSA_OFL), + WMI_CSA_OFFLOAD_CHANSWITCH_CMDID, + + /* Chatter commands*/ + WMI_CHATTER_SET_MODE_CMDID = WMI_CMD_GRP(WMI_GRP_CHATTER), + + /* addba specific commands */ + WMI_PEER_TID_ADDBA_CMDID = WMI_CMD_GRP(WMI_GRP_TID_ADDBA), + WMI_PEER_TID_DELBA_CMDID, + + /* set station mimo powersave method */ + WMI_STA_DTIM_PS_METHOD_CMDID, + /* Configure the Station UAPSD AC Auto Trigger Parameters */ + WMI_STA_UAPSD_AUTO_TRIG_CMDID, + + /* STA Keep alive parameter configuration, + Requires WMI_SERVICE_STA_KEEP_ALIVE */ + WMI_STA_KEEPALIVE_CMD, + + /* misc command group */ + WMI_ECHO_CMDID = WMI_CMD_GRP(WMI_GRP_MISC), + WMI_PDEV_UTF_CMDID, + WMI_DBGLOG_CFG_CMDID, + WMI_PDEV_QVIT_CMDID, + WMI_PDEV_FTM_INTG_CMDID, + WMI_VDEV_SET_KEEPALIVE_CMDID, + WMI_VDEV_GET_KEEPALIVE_CMDID, + + /* GPIO Configuration */ + WMI_GPIO_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_GPIO), + WMI_GPIO_OUTPUT_CMDID, +}; + +enum wmi_event_id { + WMI_SERVICE_READY_EVENTID = 0x1, + WMI_READY_EVENTID, + + /* Scan specific events */ + WMI_SCAN_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_SCAN), + + /* PDEV specific events */ + WMI_PDEV_TPC_CONFIG_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_PDEV), + WMI_CHAN_INFO_EVENTID, + WMI_PHYERR_EVENTID, + + /* VDEV specific events */ + WMI_VDEV_START_RESP_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_VDEV), + WMI_VDEV_STOPPED_EVENTID, + WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID, + + /* peer specific events */ + WMI_PEER_STA_KICKOUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_PEER), + + /* beacon/mgmt specific events */ + WMI_MGMT_RX_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MGMT), + WMI_HOST_SWBA_EVENTID, + WMI_TBTTOFFSET_UPDATE_EVENTID, + + /* ADDBA Related WMI Events*/ + WMI_TX_DELBA_COMPLETE_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_BA_NEG), + WMI_TX_ADDBA_COMPLETE_EVENTID, + + /* Roam event to trigger roaming on host */ + WMI_ROAM_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_ROAM), + WMI_PROFILE_MATCH, + + /* WoW */ + WMI_WOW_WAKEUP_HOST_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_WOW), + + /* RTT */ + WMI_RTT_MEASUREMENT_REPORT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_RTT), + WMI_TSF_MEASUREMENT_REPORT_EVENTID, + WMI_RTT_ERROR_REPORT_EVENTID, + + /* GTK offload */ + WMI_GTK_OFFLOAD_STATUS_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GTK_OFL), + WMI_GTK_REKEY_FAIL_EVENTID, + + /* CSA IE received event */ + WMI_CSA_HANDLING_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_CSA_OFL), + + /* Misc events */ + WMI_ECHO_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MISC), + WMI_PDEV_UTF_EVENTID, + WMI_DEBUG_MESG_EVENTID, + WMI_UPDATE_STATS_EVENTID, + WMI_DEBUG_PRINT_EVENTID, + WMI_DCS_INTERFERENCE_EVENTID, + WMI_PDEV_QVIT_EVENTID, + WMI_WLAN_PROFILE_DATA_EVENTID, + WMI_PDEV_FTM_INTG_EVENTID, + WMI_WLAN_FREQ_AVOID_EVENTID, + WMI_VDEV_GET_KEEPALIVE_EVENTID, + + /* GPIO Event */ + WMI_GPIO_INPUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GPIO), +}; + +enum wmi_phy_mode { + MODE_11A = 0, /* 11a Mode */ + MODE_11G = 1, /* 11b/g Mode */ + MODE_11B = 2, /* 11b Mode */ + MODE_11GONLY = 3, /* 11g only Mode */ + MODE_11NA_HT20 = 4, /* 11a HT20 mode */ + MODE_11NG_HT20 = 5, /* 11g HT20 mode */ + MODE_11NA_HT40 = 6, /* 11a HT40 mode */ + MODE_11NG_HT40 = 7, /* 11g HT40 mode */ + MODE_11AC_VHT20 = 8, + MODE_11AC_VHT40 = 9, + MODE_11AC_VHT80 = 10, + /* MODE_11AC_VHT160 = 11, */ + MODE_11AC_VHT20_2G = 11, + MODE_11AC_VHT40_2G = 12, + MODE_11AC_VHT80_2G = 13, + MODE_UNKNOWN = 14, + MODE_MAX = 14 +}; + +#define WMI_CHAN_LIST_TAG 0x1 +#define WMI_SSID_LIST_TAG 0x2 +#define WMI_BSSID_LIST_TAG 0x3 +#define WMI_IE_TAG 0x4 + +struct wmi_channel { + __le32 mhz; + __le32 band_center_freq1; + __le32 band_center_freq2; /* valid for 11ac, 80plus80 */ + union { + __le32 flags; /* WMI_CHAN_FLAG_ */ + struct { + u8 mode; /* only 6 LSBs */ + } __packed; + } __packed; + union { + __le32 reginfo0; + struct { + u8 min_power; + u8 max_power; + u8 reg_power; + u8 reg_classid; + } __packed; + } __packed; + union { + __le32 reginfo1; + struct { + u8 antenna_max; + } __packed; + } __packed; +} __packed; + +struct wmi_channel_arg { + u32 freq; + u32 band_center_freq1; + bool passive; + bool allow_ibss; + bool allow_ht; + bool allow_vht; + bool ht40plus; + /* note: power unit is 1/4th of dBm */ + u32 min_power; + u32 max_power; + u32 max_reg_power; + u32 max_antenna_gain; + u32 reg_class_id; + enum wmi_phy_mode mode; +}; + +enum wmi_channel_change_cause { + WMI_CHANNEL_CHANGE_CAUSE_NONE = 0, + WMI_CHANNEL_CHANGE_CAUSE_CSA, +}; + +#define WMI_CHAN_FLAG_HT40_PLUS (1 << 6) +#define WMI_CHAN_FLAG_PASSIVE (1 << 7) +#define WMI_CHAN_FLAG_ADHOC_ALLOWED (1 << 8) +#define WMI_CHAN_FLAG_AP_DISABLED (1 << 9) +#define WMI_CHAN_FLAG_DFS (1 << 10) +#define WMI_CHAN_FLAG_ALLOW_HT (1 << 11) +#define WMI_CHAN_FLAG_ALLOW_VHT (1 << 12) + +/* Indicate reason for channel switch */ +#define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13) + +#define WMI_MAX_SPATIAL_STREAM 3 + +/* HT Capabilities*/ +#define WMI_HT_CAP_ENABLED 0x0001 /* HT Enabled/ disabled */ +#define WMI_HT_CAP_HT20_SGI 0x0002 /* Short Guard Interval with HT20 */ +#define WMI_HT_CAP_DYNAMIC_SMPS 0x0004 /* Dynamic MIMO powersave */ +#define WMI_HT_CAP_TX_STBC 0x0008 /* B3 TX STBC */ +#define WMI_HT_CAP_TX_STBC_MASK_SHIFT 3 +#define WMI_HT_CAP_RX_STBC 0x0030 /* B4-B5 RX STBC */ +#define WMI_HT_CAP_RX_STBC_MASK_SHIFT 4 +#define WMI_HT_CAP_LDPC 0x0040 /* LDPC supported */ +#define WMI_HT_CAP_L_SIG_TXOP_PROT 0x0080 /* L-SIG TXOP Protection */ +#define WMI_HT_CAP_MPDU_DENSITY 0x0700 /* MPDU Density */ +#define WMI_HT_CAP_MPDU_DENSITY_MASK_SHIFT 8 +#define WMI_HT_CAP_HT40_SGI 0x0800 + +#define WMI_HT_CAP_DEFAULT_ALL (WMI_HT_CAP_ENABLED | \ + WMI_HT_CAP_HT20_SGI | \ + WMI_HT_CAP_HT40_SGI | \ + WMI_HT_CAP_TX_STBC | \ + WMI_HT_CAP_RX_STBC | \ + WMI_HT_CAP_LDPC) + + +/* + * WMI_VHT_CAP_* these maps to ieee 802.11ac vht capability information + * field. The fields not defined here are not supported, or reserved. + * Do not change these masks and if you have to add new one follow the + * bitmask as specified by 802.11ac draft. + */ + +#define WMI_VHT_CAP_MAX_MPDU_LEN_MASK 0x00000003 +#define WMI_VHT_CAP_RX_LDPC 0x00000010 +#define WMI_VHT_CAP_SGI_80MHZ 0x00000020 +#define WMI_VHT_CAP_TX_STBC 0x00000080 +#define WMI_VHT_CAP_RX_STBC_MASK 0x00000300 +#define WMI_VHT_CAP_RX_STBC_MASK_SHIFT 8 +#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP 0x03800000 +#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIFT 23 +#define WMI_VHT_CAP_RX_FIXED_ANT 0x10000000 +#define WMI_VHT_CAP_TX_FIXED_ANT 0x20000000 + +/* The following also refer for max HT AMSDU */ +#define WMI_VHT_CAP_MAX_MPDU_LEN_3839 0x00000000 +#define WMI_VHT_CAP_MAX_MPDU_LEN_7935 0x00000001 +#define WMI_VHT_CAP_MAX_MPDU_LEN_11454 0x00000002 + +#define WMI_VHT_CAP_DEFAULT_ALL (WMI_VHT_CAP_MAX_MPDU_LEN_11454 | \ + WMI_VHT_CAP_RX_LDPC | \ + WMI_VHT_CAP_SGI_80MHZ | \ + WMI_VHT_CAP_TX_STBC | \ + WMI_VHT_CAP_RX_STBC_MASK | \ + WMI_VHT_CAP_MAX_AMPDU_LEN_EXP | \ + WMI_VHT_CAP_RX_FIXED_ANT | \ + WMI_VHT_CAP_TX_FIXED_ANT) + +/* + * Interested readers refer to Rx/Tx MCS Map definition as defined in + * 802.11ac + */ +#define WMI_VHT_MAX_MCS_4_SS_MASK(r, ss) ((3 & (r)) << (((ss) - 1) << 1)) +#define WMI_VHT_MAX_SUPP_RATE_MASK 0x1fff0000 +#define WMI_VHT_MAX_SUPP_RATE_MASK_SHIFT 16 + +enum { + REGDMN_MODE_11A = 0x00001, /* 11a channels */ + REGDMN_MODE_TURBO = 0x00002, /* 11a turbo-only channels */ + REGDMN_MODE_11B = 0x00004, /* 11b channels */ + REGDMN_MODE_PUREG = 0x00008, /* 11g channels (OFDM only) */ + REGDMN_MODE_11G = 0x00008, /* XXX historical */ + REGDMN_MODE_108G = 0x00020, /* 11a+Turbo channels */ + REGDMN_MODE_108A = 0x00040, /* 11g+Turbo channels */ + REGDMN_MODE_XR = 0x00100, /* XR channels */ + REGDMN_MODE_11A_HALF_RATE = 0x00200, /* 11A half rate channels */ + REGDMN_MODE_11A_QUARTER_RATE = 0x00400, /* 11A quarter rate channels */ + REGDMN_MODE_11NG_HT20 = 0x00800, /* 11N-G HT20 channels */ + REGDMN_MODE_11NA_HT20 = 0x01000, /* 11N-A HT20 channels */ + REGDMN_MODE_11NG_HT40PLUS = 0x02000, /* 11N-G HT40 + channels */ + REGDMN_MODE_11NG_HT40MINUS = 0x04000, /* 11N-G HT40 - channels */ + REGDMN_MODE_11NA_HT40PLUS = 0x08000, /* 11N-A HT40 + channels */ + REGDMN_MODE_11NA_HT40MINUS = 0x10000, /* 11N-A HT40 - channels */ + REGDMN_MODE_11AC_VHT20 = 0x20000, /* 5Ghz, VHT20 */ + REGDMN_MODE_11AC_VHT40PLUS = 0x40000, /* 5Ghz, VHT40 + channels */ + REGDMN_MODE_11AC_VHT40MINUS = 0x80000, /* 5Ghz VHT40 - channels */ + REGDMN_MODE_11AC_VHT80 = 0x100000, /* 5Ghz, VHT80 channels */ + REGDMN_MODE_ALL = 0xffffffff +}; + +#define REGDMN_CAP1_CHAN_HALF_RATE 0x00000001 +#define REGDMN_CAP1_CHAN_QUARTER_RATE 0x00000002 +#define REGDMN_CAP1_CHAN_HAL49GHZ 0x00000004 + +/* regulatory capabilities */ +#define REGDMN_EEPROM_EEREGCAP_EN_FCC_MIDBAND 0x0040 +#define REGDMN_EEPROM_EEREGCAP_EN_KK_U1_EVEN 0x0080 +#define REGDMN_EEPROM_EEREGCAP_EN_KK_U2 0x0100 +#define REGDMN_EEPROM_EEREGCAP_EN_KK_MIDBAND 0x0200 +#define REGDMN_EEPROM_EEREGCAP_EN_KK_U1_ODD 0x0400 +#define REGDMN_EEPROM_EEREGCAP_EN_KK_NEW_11A 0x0800 + +struct hal_reg_capabilities { + /* regdomain value specified in EEPROM */ + __le32 eeprom_rd; + /*regdomain */ + __le32 eeprom_rd_ext; + /* CAP1 capabilities bit map. */ + __le32 regcap1; + /* REGDMN EEPROM CAP. */ + __le32 regcap2; + /* REGDMN MODE */ + __le32 wireless_modes; + __le32 low_2ghz_chan; + __le32 high_2ghz_chan; + __le32 low_5ghz_chan; + __le32 high_5ghz_chan; +} __packed; + +enum wlan_mode_capability { + WHAL_WLAN_11A_CAPABILITY = 0x1, + WHAL_WLAN_11G_CAPABILITY = 0x2, + WHAL_WLAN_11AG_CAPABILITY = 0x3, +}; + +/* structure used by FW for requesting host memory */ +struct wlan_host_mem_req { + /* ID of the request */ + __le32 req_id; + /* size of the of each unit */ + __le32 unit_size; + /* flags to indicate that + * the number units is dependent + * on number of resources(num vdevs num peers .. etc) + */ + __le32 num_unit_info; + /* + * actual number of units to allocate . if flags in the num_unit_info + * indicate that number of units is tied to number of a particular + * resource to allocate then num_units filed is set to 0 and host + * will derive the number units from number of the resources it is + * requesting. + */ + __le32 num_units; +} __packed; + +#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id) \ + ((((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \ + (1 << ((svc_id)%(sizeof(u32))))) != 0) + +/* + * The following struct holds optional payload for + * wmi_service_ready_event,e.g., 11ac pass some of the + * device capability to the host. + */ +struct wmi_service_ready_event { + __le32 sw_version; + __le32 sw_version_1; + __le32 abi_version; + /* WMI_PHY_CAPABILITY */ + __le32 phy_capability; + /* Maximum number of frag table entries that SW will populate less 1 */ + __le32 max_frag_entry; + __le32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE]; + __le32 num_rf_chains; + /* + * The following field is only valid for service type + * WMI_SERVICE_11AC + */ + __le32 ht_cap_info; /* WMI HT Capability */ + __le32 vht_cap_info; /* VHT capability info field of 802.11ac */ + __le32 vht_supp_mcs; /* VHT Supported MCS Set field Rx/Tx same */ + __le32 hw_min_tx_power; + __le32 hw_max_tx_power; + struct hal_reg_capabilities hal_reg_capabilities; + __le32 sys_cap_info; + __le32 min_pkt_size_enable; /* Enterprise mode short pkt enable */ + /* + * Max beacon and Probe Response IE offload size + * (includes optional P2P IEs) + */ + __le32 max_bcn_ie_size; + /* + * request to host to allocate a chuck of memory and pss it down to FW + * via WM_INIT. FW uses this as FW extesnsion memory for saving its + * data structures. Only valid for low latency interfaces like PCIE + * where FW can access this memory directly (or) by DMA. + */ + __le32 num_mem_reqs; + struct wlan_host_mem_req mem_reqs[1]; +} __packed; + +/* + * status consists of upper 16 bits fo int status and lower 16 bits of + * module ID that retuned status + */ +#define WLAN_INIT_STATUS_SUCCESS 0x0 +#define WLAN_GET_INIT_STATUS_REASON(status) ((status) & 0xffff) +#define WLAN_GET_INIT_STATUS_MODULE_ID(status) (((status) >> 16) & 0xffff) + +#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ) +#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ) + +struct wmi_ready_event { + __le32 sw_version; + __le32 abi_version; + struct wmi_mac_addr mac_addr; + __le32 status; +} __packed; + +struct wmi_resource_config { + /* number of virtual devices (VAPs) to support */ + __le32 num_vdevs; + + /* number of peer nodes to support */ + __le32 num_peers; + + /* + * In offload mode target supports features like WOW, chatter and + * other protocol offloads. In order to support them some + * functionalities like reorder buffering, PN checking need to be + * done in target. This determines maximum number of peers suported + * by target in offload mode + */ + __le32 num_offload_peers; + + /* For target-based RX reordering */ + __le32 num_offload_reorder_bufs; + + /* number of keys per peer */ + __le32 num_peer_keys; + + /* total number of TX/RX data TIDs */ + __le32 num_tids; + + /* + * max skid for resolving hash collisions + * + * The address search table is sparse, so that if two MAC addresses + * result in the same hash value, the second of these conflicting + * entries can slide to the next index in the address search table, + * and use it, if it is unoccupied. This ast_skid_limit parameter + * specifies the upper bound on how many subsequent indices to search + * over to find an unoccupied space. + */ + __le32 ast_skid_limit; + + /* + * the nominal chain mask for transmit + * + * The chain mask may be modified dynamically, e.g. to operate AP + * tx with a reduced number of chains if no clients are associated. + * This configuration parameter specifies the nominal chain-mask that + * should be used when not operating with a reduced set of tx chains. + */ + __le32 tx_chain_mask; + + /* + * the nominal chain mask for receive + * + * The chain mask may be modified dynamically, e.g. for a client + * to use a reduced number of chains for receive if the traffic to + * the client is low enough that it doesn't require downlink MIMO + * or antenna diversity. + * This configuration parameter specifies the nominal chain-mask that + * should be used when not operating with a reduced set of rx chains. + */ + __le32 rx_chain_mask; + + /* + * what rx reorder timeout (ms) to use for the AC + * + * Each WMM access class (voice, video, best-effort, background) will + * have its own timeout value to dictate how long to wait for missing + * rx MPDUs to arrive before flushing subsequent MPDUs that have + * already been received. + * This parameter specifies the timeout in milliseconds for each + * class. + */ + __le32 rx_timeout_pri_vi; + __le32 rx_timeout_pri_vo; + __le32 rx_timeout_pri_be; + __le32 rx_timeout_pri_bk; + + /* + * what mode the rx should decap packets to + * + * MAC can decap to RAW (no decap), native wifi or Ethernet types + * THis setting also determines the default TX behavior, however TX + * behavior can be modified on a per VAP basis during VAP init + */ + __le32 rx_decap_mode; + + /* what is the maximum scan requests than can be queued */ + __le32 scan_max_pending_reqs; + + /* maximum VDEV that could use BMISS offload */ + __le32 bmiss_offload_max_vdev; + + /* maximum VDEV that could use offload roaming */ + __le32 roam_offload_max_vdev; + + /* maximum AP profiles that would push to offload roaming */ + __le32 roam_offload_max_ap_profiles; + + /* + * how many groups to use for mcast->ucast conversion + * + * The target's WAL maintains a table to hold information regarding + * which peers belong to a given multicast group, so that if + * multicast->unicast conversion is enabled, the target can convert + * multicast tx frames to a series of unicast tx frames, to each + * peer within the multicast group. + This num_mcast_groups configuration parameter tells the target how + * many multicast groups to provide storage for within its multicast + * group membership table. + */ + __le32 num_mcast_groups; + + /* + * size to alloc for the mcast membership table + * + * This num_mcast_table_elems configuration parameter tells the + * target how many peer elements it needs to provide storage for in + * its multicast group membership table. + * These multicast group membership table elements are shared by the + * multicast groups stored within the table. + */ + __le32 num_mcast_table_elems; + + /* + * whether/how to do multicast->unicast conversion + * + * This configuration parameter specifies whether the target should + * perform multicast --> unicast conversion on transmit, and if so, + * what to do if it finds no entries in its multicast group + * membership table for the multicast IP address in the tx frame. + * Configuration value: + * 0 -> Do not perform multicast to unicast conversion. + * 1 -> Convert multicast frames to unicast, if the IP multicast + * address from the tx frame is found in the multicast group + * membership table. If the IP multicast address is not found, + * drop the frame. + * 2 -> Convert multicast frames to unicast, if the IP multicast + * address from the tx frame is found in the multicast group + * membership table. If the IP multicast address is not found, + * transmit the frame as multicast. + */ + __le32 mcast2ucast_mode; + + /* + * how much memory to allocate for a tx PPDU dbg log + * + * This parameter controls how much memory the target will allocate + * to store a log of tx PPDU meta-information (how large the PPDU + * was, when it was sent, whether it was successful, etc.) + */ + __le32 tx_dbg_log_size; + + /* how many AST entries to be allocated for WDS */ + __le32 num_wds_entries; + + /* + * MAC DMA burst size, e.g., For target PCI limit can be + * 0 -default, 1 256B + */ + __le32 dma_burst_size; + + /* + * Fixed delimiters to be inserted after every MPDU to + * account for interface latency to avoid underrun. + */ + __le32 mac_aggr_delim; + + /* + * determine whether target is responsible for detecting duplicate + * non-aggregate MPDU and timing out stale fragments. + * + * A-MPDU reordering is always performed on the target. + * + * 0: target responsible for frag timeout and dup checking + * 1: host responsible for frag timeout and dup checking + */ + __le32 rx_skip_defrag_timeout_dup_detection_check; + + /* + * Configuration for VoW : + * No of Video Nodes to be supported + * and Max no of descriptors for each Video link (node). + */ + __le32 vow_config; + + /* maximum VDEV that could use GTK offload */ + __le32 gtk_offload_max_vdev; + + /* Number of msdu descriptors target should use */ + __le32 num_msdu_desc; + + /* + * Max. number of Tx fragments per MSDU + * This parameter controls the max number of Tx fragments per MSDU. + * This is sent by the target as part of the WMI_SERVICE_READY event + * and is overriden by the OS shim as required. + */ + __le32 max_frag_entries; +} __packed; + +/* strucutre describing host memory chunk. */ +struct host_memory_chunk { + /* id of the request that is passed up in service ready */ + __le32 req_id; + /* the physical address the memory chunk */ + __le32 ptr; + /* size of the chunk */ + __le32 size; +} __packed; + +struct wmi_init_cmd { + struct wmi_resource_config resource_config; + __le32 num_host_mem_chunks; + + /* + * variable number of host memory chunks. + * This should be the last element in the structure + */ + struct host_memory_chunk host_mem_chunks[1]; +} __packed; + +/* TLV for channel list */ +struct wmi_chan_list { + __le32 tag; /* WMI_CHAN_LIST_TAG */ + __le32 num_chan; + __le32 channel_list[0]; +} __packed; + +struct wmi_bssid_list { + __le32 tag; /* WMI_BSSID_LIST_TAG */ + __le32 num_bssid; + struct wmi_mac_addr bssid_list[0]; +} __packed; + +struct wmi_ie_data { + __le32 tag; /* WMI_IE_TAG */ + __le32 ie_len; + u8 ie_data[0]; +} __packed; + +struct wmi_ssid { + __le32 ssid_len; + u8 ssid[32]; +} __packed; + +struct wmi_ssid_list { + __le32 tag; /* WMI_SSID_LIST_TAG */ + __le32 num_ssids; + struct wmi_ssid ssids[0]; +} __packed; + +/* prefix used by scan requestor ids on the host */ +#define WMI_HOST_SCAN_REQUESTOR_ID_PREFIX 0xA000 + +/* prefix used by scan request ids generated on the host */ +/* host cycles through the lower 12 bits to generate ids */ +#define WMI_HOST_SCAN_REQ_ID_PREFIX 0xA000 + +#define WLAN_SCAN_PARAMS_MAX_SSID 16 +#define WLAN_SCAN_PARAMS_MAX_BSSID 4 +#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256 + +/* Scan priority numbers must be sequential, starting with 0 */ +enum wmi_scan_priority { + WMI_SCAN_PRIORITY_VERY_LOW = 0, + WMI_SCAN_PRIORITY_LOW, + WMI_SCAN_PRIORITY_MEDIUM, + WMI_SCAN_PRIORITY_HIGH, + WMI_SCAN_PRIORITY_VERY_HIGH, + WMI_SCAN_PRIORITY_COUNT /* number of priorities supported */ +}; + +struct wmi_start_scan_cmd { + /* Scan ID */ + __le32 scan_id; + /* Scan requestor ID */ + __le32 scan_req_id; + /* VDEV id(interface) that is requesting scan */ + __le32 vdev_id; + /* Scan Priority, input to scan scheduler */ + __le32 scan_priority; + /* Scan events subscription */ + __le32 notify_scan_events; + /* dwell time in msec on active channels */ + __le32 dwell_time_active; + /* dwell time in msec on passive channels */ + __le32 dwell_time_passive; + /* + * min time in msec on the BSS channel,only valid if atleast one + * VDEV is active + */ + __le32 min_rest_time; + /* + * max rest time in msec on the BSS channel,only valid if at least + * one VDEV is active + */ + /* + * the scanner will rest on the bss channel at least min_rest_time + * after min_rest_time the scanner will start checking for tx/rx + * activity on all VDEVs. if there is no activity the scanner will + * switch to off channel. if there is activity the scanner will let + * the radio on the bss channel until max_rest_time expires.at + * max_rest_time scanner will switch to off channel irrespective of + * activity. activity is determined by the idle_time parameter. + */ + __le32 max_rest_time; + /* + * time before sending next set of probe requests. + * The scanner keeps repeating probe requests transmission with + * period specified by repeat_probe_time. + * The number of probe requests specified depends on the ssid_list + * and bssid_list + */ + __le32 repeat_probe_time; + /* time in msec between 2 consequetive probe requests with in a set. */ + __le32 probe_spacing_time; + /* + * data inactivity time in msec on bss channel that will be used by + * scanner for measuring the inactivity. + */ + __le32 idle_time; + /* maximum time in msec allowed for scan */ + __le32 max_scan_time; + /* + * delay in msec before sending first probe request after switching + * to a channel + */ + __le32 probe_delay; + /* Scan control flags */ + __le32 scan_ctrl_flags; + + /* Burst duration time in msecs */ + __le32 burst_duration; + /* + * TLV (tag length value ) paramerters follow the scan_cmd structure. + * TLV can contain channel list, bssid list, ssid list and + * ie. the TLV tags are defined above; + */ +} __packed; + +struct wmi_ssid_arg { + int len; + const u8 *ssid; +}; + +struct wmi_bssid_arg { + const u8 *bssid; +}; + +struct wmi_start_scan_arg { + u32 scan_id; + u32 scan_req_id; + u32 vdev_id; + u32 scan_priority; + u32 notify_scan_events; + u32 dwell_time_active; + u32 dwell_time_passive; + u32 min_rest_time; + u32 max_rest_time; + u32 repeat_probe_time; + u32 probe_spacing_time; + u32 idle_time; + u32 max_scan_time; + u32 probe_delay; + u32 scan_ctrl_flags; + + u32 ie_len; + u32 n_channels; + u32 n_ssids; + u32 n_bssids; + + u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN]; + u32 channels[64]; + struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID]; + struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID]; +}; + +/* scan control flags */ + +/* passively scan all channels including active channels */ +#define WMI_SCAN_FLAG_PASSIVE 0x1 +/* add wild card ssid probe request even though ssid_list is specified. */ +#define WMI_SCAN_ADD_BCAST_PROBE_REQ 0x2 +/* add cck rates to rates/xrate ie for the generated probe request */ +#define WMI_SCAN_ADD_CCK_RATES 0x4 +/* add ofdm rates to rates/xrate ie for the generated probe request */ +#define WMI_SCAN_ADD_OFDM_RATES 0x8 +/* To enable indication of Chan load and Noise floor to host */ +#define WMI_SCAN_CHAN_STAT_EVENT 0x10 +/* Filter Probe request frames */ +#define WMI_SCAN_FILTER_PROBE_REQ 0x20 +/* When set, DFS channels will not be scanned */ +#define WMI_SCAN_BYPASS_DFS_CHN 0x40 +/* Different FW scan engine may choose to bail out on errors. + * Allow the driver to have influence over that. */ +#define WMI_SCAN_CONTINUE_ON_ERROR 0x80 + +/* WMI_SCAN_CLASS_MASK must be the same value as IEEE80211_SCAN_CLASS_MASK */ +#define WMI_SCAN_CLASS_MASK 0xFF000000 + + +enum wmi_stop_scan_type { + WMI_SCAN_STOP_ONE = 0x00000000, /* stop by scan_id */ + WMI_SCAN_STOP_VDEV_ALL = 0x01000000, /* stop by vdev_id */ + WMI_SCAN_STOP_ALL = 0x04000000, /* stop all scans */ +}; + +struct wmi_stop_scan_cmd { + __le32 scan_req_id; + __le32 scan_id; + __le32 req_type; + __le32 vdev_id; +} __packed; + +struct wmi_stop_scan_arg { + u32 req_id; + enum wmi_stop_scan_type req_type; + union { + u32 scan_id; + u32 vdev_id; + } u; +}; + +struct wmi_scan_chan_list_cmd { + __le32 num_scan_chans; + struct wmi_channel chan_info[0]; +} __packed; + +struct wmi_scan_chan_list_arg { + u32 n_channels; + struct wmi_channel_arg *channels; +}; + +enum wmi_bss_filter { + WMI_BSS_FILTER_NONE = 0, /* no beacons forwarded */ + WMI_BSS_FILTER_ALL, /* all beacons forwarded */ + WMI_BSS_FILTER_PROFILE, /* only beacons matching profile */ + WMI_BSS_FILTER_ALL_BUT_PROFILE, /* all but beacons matching profile */ + WMI_BSS_FILTER_CURRENT_BSS, /* only beacons matching current BSS */ + WMI_BSS_FILTER_ALL_BUT_BSS, /* all but beacons matching BSS */ + WMI_BSS_FILTER_PROBED_SSID, /* beacons matching probed ssid */ + WMI_BSS_FILTER_LAST_BSS, /* marker only */ +}; + +enum wmi_scan_event_type { + WMI_SCAN_EVENT_STARTED = 0x1, + WMI_SCAN_EVENT_COMPLETED = 0x2, + WMI_SCAN_EVENT_BSS_CHANNEL = 0x4, + WMI_SCAN_EVENT_FOREIGN_CHANNEL = 0x8, + WMI_SCAN_EVENT_DEQUEUED = 0x10, + WMI_SCAN_EVENT_PREEMPTED = 0x20, /* possibly by high-prio scan */ + WMI_SCAN_EVENT_START_FAILED = 0x40, + WMI_SCAN_EVENT_RESTARTED = 0x80, + WMI_SCAN_EVENT_MAX = 0x8000 +}; + +enum wmi_scan_completion_reason { + WMI_SCAN_REASON_COMPLETED, + WMI_SCAN_REASON_CANCELLED, + WMI_SCAN_REASON_PREEMPTED, + WMI_SCAN_REASON_TIMEDOUT, + WMI_SCAN_REASON_MAX, +}; + +struct wmi_scan_event { + __le32 event_type; /* %WMI_SCAN_EVENT_ */ + __le32 reason; /* %WMI_SCAN_REASON_ */ + __le32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */ + __le32 scan_req_id; + __le32 scan_id; + __le32 vdev_id; +} __packed; + +/* + * This defines how much headroom is kept in the + * receive frame between the descriptor and the + * payload, in order for the WMI PHY error and + * management handler to insert header contents. + * + * This is in bytes. + */ +#define WMI_MGMT_RX_HDR_HEADROOM 52 + +/* + * This event will be used for sending scan results + * as well as rx mgmt frames to the host. The rx buffer + * will be sent as part of this WMI event. It would be a + * good idea to pass all the fields in the RX status + * descriptor up to the host. + */ +struct wmi_mgmt_rx_hdr { + __le32 channel; + __le32 snr; + __le32 rate; + __le32 phy_mode; + __le32 buf_len; + __le32 status; /* %WMI_RX_STATUS_ */ +} __packed; + +struct wmi_mgmt_rx_event { + struct wmi_mgmt_rx_hdr hdr; + u8 buf[0]; +} __packed; + +#define WMI_RX_STATUS_OK 0x00 +#define WMI_RX_STATUS_ERR_CRC 0x01 +#define WMI_RX_STATUS_ERR_DECRYPT 0x08 +#define WMI_RX_STATUS_ERR_MIC 0x10 +#define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20 + +struct wmi_single_phyerr_rx_hdr { + /* TSF timestamp */ + __le32 tsf_timestamp; + + /* + * Current freq1, freq2 + * + * [7:0]: freq1[lo] + * [15:8] : freq1[hi] + * [23:16]: freq2[lo] + * [31:24]: freq2[hi] + */ + __le16 freq1; + __le16 freq2; + + /* + * Combined RSSI over all chains and channel width for this PHY error + * + * [7:0]: RSSI combined + * [15:8]: Channel width (MHz) + * [23:16]: PHY error code + * [24:16]: reserved (future use) + */ + u8 rssi_combined; + u8 chan_width_mhz; + u8 phy_err_code; + u8 rsvd0; + + /* + * RSSI on chain 0 through 3 + * + * This is formatted the same as the PPDU_START RX descriptor + * field: + * + * [7:0]: pri20 + * [15:8]: sec20 + * [23:16]: sec40 + * [31:24]: sec80 + */ + + __le32 rssi_chain0; + __le32 rssi_chain1; + __le32 rssi_chain2; + __le32 rssi_chain3; + + /* + * Last calibrated NF value for chain 0 through 3 + * + * nf_list_1: + * + * + [15:0] - chain 0 + * + [31:16] - chain 1 + * + * nf_list_2: + * + * + [15:0] - chain 2 + * + [31:16] - chain 3 + */ + __le32 nf_list_1; + __le32 nf_list_2; + + + /* Length of the frame */ + __le32 buf_len; +} __packed; + +struct wmi_single_phyerr_rx_event { + /* Phy error event header */ + struct wmi_single_phyerr_rx_hdr hdr; + /* frame buffer */ + u8 bufp[0]; +} __packed; + +struct wmi_comb_phyerr_rx_hdr { + /* Phy error phy error count */ + __le32 num_phyerr_events; + __le32 tsf_l32; + __le32 tsf_u32; +} __packed; + +struct wmi_comb_phyerr_rx_event { + /* Phy error phy error count */ + struct wmi_comb_phyerr_rx_hdr hdr; + /* + * frame buffer - contains multiple payloads in the order: + * header - payload, header - payload... + * (The header is of type: wmi_single_phyerr_rx_hdr) + */ + u8 bufp[0]; +} __packed; + +struct wmi_mgmt_tx_hdr { + __le32 vdev_id; + struct wmi_mac_addr peer_macaddr; + __le32 tx_rate; + __le32 tx_power; + __le32 buf_len; +} __packed; + +struct wmi_mgmt_tx_cmd { + struct wmi_mgmt_tx_hdr hdr; + u8 buf[0]; +} __packed; + +struct wmi_echo_event { + __le32 value; +} __packed; + +struct wmi_echo_cmd { + __le32 value; +} __packed; + + +struct wmi_pdev_set_regdomain_cmd { + __le32 reg_domain; + __le32 reg_domain_2G; + __le32 reg_domain_5G; + __le32 conformance_test_limit_2G; + __le32 conformance_test_limit_5G; +} __packed; + +/* Command to set/unset chip in quiet mode */ +struct wmi_pdev_set_quiet_cmd { + /* period in TUs */ + __le32 period; + + /* duration in TUs */ + __le32 duration; + + /* offset in TUs */ + __le32 next_start; + + /* enable/disable */ + __le32 enabled; +} __packed; + + +/* + * 802.11g protection mode. + */ +enum ath10k_protmode { + ATH10K_PROT_NONE = 0, /* no protection */ + ATH10K_PROT_CTSONLY = 1, /* CTS to self */ + ATH10K_PROT_RTSCTS = 2, /* RTS-CTS */ +}; + +enum wmi_beacon_gen_mode { + WMI_BEACON_STAGGERED_MODE = 0, + WMI_BEACON_BURST_MODE = 1 +}; + +enum wmi_csa_event_ies_present_flag { + WMI_CSA_IE_PRESENT = 0x00000001, + WMI_XCSA_IE_PRESENT = 0x00000002, + WMI_WBW_IE_PRESENT = 0x00000004, + WMI_CSWARP_IE_PRESENT = 0x00000008, +}; + +/* wmi CSA receive event from beacon frame */ +struct wmi_csa_event { + __le32 i_fc_dur; + /* Bit 0-15: FC */ + /* Bit 16-31: DUR */ + struct wmi_mac_addr i_addr1; + struct wmi_mac_addr i_addr2; + __le32 csa_ie[2]; + __le32 xcsa_ie[2]; + __le32 wb_ie[2]; + __le32 cswarp_ie; + __le32 ies_present_flag; /* wmi_csa_event_ies_present_flag */ +} __packed; + +/* the definition of different PDEV parameters */ +#define PDEV_DEFAULT_STATS_UPDATE_PERIOD 500 +#define VDEV_DEFAULT_STATS_UPDATE_PERIOD 500 +#define PEER_DEFAULT_STATS_UPDATE_PERIOD 500 + +enum wmi_pdev_param { + /* TX chian mask */ + WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1, + /* RX chian mask */ + WMI_PDEV_PARAM_RX_CHAIN_MASK, + /* TX power limit for 2G Radio */ + WMI_PDEV_PARAM_TXPOWER_LIMIT2G, + /* TX power limit for 5G Radio */ + WMI_PDEV_PARAM_TXPOWER_LIMIT5G, + /* TX power scale */ + WMI_PDEV_PARAM_TXPOWER_SCALE, + /* Beacon generation mode . 0: host, 1: target */ + WMI_PDEV_PARAM_BEACON_GEN_MODE, + /* Beacon generation mode . 0: staggered 1: bursted */ + WMI_PDEV_PARAM_BEACON_TX_MODE, + /* + * Resource manager off chan mode . + * 0: turn off off chan mode. 1: turn on offchan mode + */ + WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE, + /* + * Protection mode: + * 0: no protection 1:use CTS-to-self 2: use RTS/CTS + */ + WMI_PDEV_PARAM_PROTECTION_MODE, + /* Dynamic bandwidth 0: disable 1: enable */ + WMI_PDEV_PARAM_DYNAMIC_BW, + /* Non aggregrate/ 11g sw retry threshold.0-disable */ + WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH, + /* aggregrate sw retry threshold. 0-disable*/ + WMI_PDEV_PARAM_AGG_SW_RETRY_TH, + /* Station kickout threshold (non of consecutive failures).0-disable */ + WMI_PDEV_PARAM_STA_KICKOUT_TH, + /* Aggerate size scaling configuration per AC */ + WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING, + /* LTR enable */ + WMI_PDEV_PARAM_LTR_ENABLE, + /* LTR latency for BE, in us */ + WMI_PDEV_PARAM_LTR_AC_LATENCY_BE, + /* LTR latency for BK, in us */ + WMI_PDEV_PARAM_LTR_AC_LATENCY_BK, + /* LTR latency for VI, in us */ + WMI_PDEV_PARAM_LTR_AC_LATENCY_VI, + /* LTR latency for VO, in us */ + WMI_PDEV_PARAM_LTR_AC_LATENCY_VO, + /* LTR AC latency timeout, in ms */ + WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, + /* LTR platform latency override, in us */ + WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE, + /* LTR-RX override, in us */ + WMI_PDEV_PARAM_LTR_RX_OVERRIDE, + /* Tx activity timeout for LTR, in us */ + WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, + /* L1SS state machine enable */ + WMI_PDEV_PARAM_L1SS_ENABLE, + /* Deep sleep state machine enable */ + WMI_PDEV_PARAM_DSLEEP_ENABLE, + /* RX buffering flush enable */ + WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH, + /* RX buffering matermark */ + WMI_PDEV_PARAM_PCIELP_TXBUF_WATERMARK, + /* RX buffering timeout enable */ + WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, + /* RX buffering timeout value */ + WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE, + /* pdev level stats update period in ms */ + WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, + /* vdev level stats update period in ms */ + WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, + /* peer level stats update period in ms */ + WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, + /* beacon filter status update period */ + WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, + /* QOS Mgmt frame protection MFP/PMF 0: disable, 1: enable */ + WMI_PDEV_PARAM_PMF_QOS, + /* Access category on which ARP frames are sent */ + WMI_PDEV_PARAM_ARP_AC_OVERRIDE, + /* DCS configuration */ + WMI_PDEV_PARAM_DCS, + /* Enable/Disable ANI on target */ + WMI_PDEV_PARAM_ANI_ENABLE, + /* configure the ANI polling period */ + WMI_PDEV_PARAM_ANI_POLL_PERIOD, + /* configure the ANI listening period */ + WMI_PDEV_PARAM_ANI_LISTEN_PERIOD, + /* configure OFDM immunity level */ + WMI_PDEV_PARAM_ANI_OFDM_LEVEL, + /* configure CCK immunity level */ + WMI_PDEV_PARAM_ANI_CCK_LEVEL, + /* Enable/Disable CDD for 1x1 STAs in rate control module */ + WMI_PDEV_PARAM_DYNTXCHAIN, + /* Enable/Disable proxy STA */ + WMI_PDEV_PARAM_PROXY_STA, + /* Enable/Disable low power state when all VDEVs are inactive/idle. */ + WMI_PDEV_PARAM_IDLE_PS_CONFIG, + /* Enable/Disable power gating sleep */ + WMI_PDEV_PARAM_POWER_GATING_SLEEP, +}; + +struct wmi_pdev_set_param_cmd { + __le32 param_id; + __le32 param_value; +} __packed; + +struct wmi_pdev_get_tpc_config_cmd { + /* parameter */ + __le32 param; +} __packed; + +#define WMI_TPC_RATE_MAX 160 +#define WMI_TPC_TX_N_CHAIN 4 + +enum wmi_tpc_config_event_flag { + WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD = 0x1, + WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC = 0x2, + WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF = 0x4, +}; + +struct wmi_pdev_tpc_config_event { + __le32 reg_domain; + __le32 chan_freq; + __le32 phy_mode; + __le32 twice_antenna_reduction; + __le32 twice_max_rd_power; + s32 twice_antenna_gain; + __le32 power_limit; + __le32 rate_max; + __le32 num_tx_chain; + __le32 ctl; + __le32 flags; + s8 max_reg_allow_pow[WMI_TPC_TX_N_CHAIN]; + s8 max_reg_allow_pow_agcdd[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN]; + s8 max_reg_allow_pow_agstbc[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN]; + s8 max_reg_allow_pow_agtxbf[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN]; + u8 rates_array[WMI_TPC_RATE_MAX]; +} __packed; + +/* Transmit power scale factor. */ +enum wmi_tp_scale { + WMI_TP_SCALE_MAX = 0, /* no scaling (default) */ + WMI_TP_SCALE_50 = 1, /* 50% of max (-3 dBm) */ + WMI_TP_SCALE_25 = 2, /* 25% of max (-6 dBm) */ + WMI_TP_SCALE_12 = 3, /* 12% of max (-9 dBm) */ + WMI_TP_SCALE_MIN = 4, /* min, but still on */ + WMI_TP_SCALE_SIZE = 5, /* max num of enum */ +}; + +struct wmi_set_channel_cmd { + /* channel (only frequency and mode info are used) */ + struct wmi_channel chan; +} __packed; + +struct wmi_pdev_chanlist_update_event { + /* number of channels */ + __le32 num_chan; + /* array of channels */ + struct wmi_channel channel_list[1]; +} __packed; + +#define WMI_MAX_DEBUG_MESG (sizeof(u32) * 32) + +struct wmi_debug_mesg_event { + /* message buffer, NULL terminated */ + char bufp[WMI_MAX_DEBUG_MESG]; +} __packed; + +enum { + /* P2P device */ + VDEV_SUBTYPE_P2PDEV = 0, + /* P2P client */ + VDEV_SUBTYPE_P2PCLI, + /* P2P GO */ + VDEV_SUBTYPE_P2PGO, + /* BT3.0 HS */ + VDEV_SUBTYPE_BT, +}; + +struct wmi_pdev_set_channel_cmd { + /* idnore power , only use flags , mode and freq */ + struct wmi_channel chan; +} __packed; + +/* Customize the DSCP (bit) to TID (0-7) mapping for QOS */ +#define WMI_DSCP_MAP_MAX (64) +struct wmi_pdev_set_dscp_tid_map_cmd { + /* map indicating DSCP to TID conversion */ + __le32 dscp_to_tid_map[WMI_DSCP_MAP_MAX]; +} __packed; + +enum mcast_bcast_rate_id { + WMI_SET_MCAST_RATE, + WMI_SET_BCAST_RATE +}; + +struct mcast_bcast_rate { + enum mcast_bcast_rate_id rate_id; + __le32 rate; +} __packed; + +struct wmi_wmm_params { + __le32 cwmin; + __le32 cwmax; + __le32 aifs; + __le32 txop; + __le32 acm; + __le32 no_ack; +} __packed; + +struct wmi_pdev_set_wmm_params { + struct wmi_wmm_params ac_be; + struct wmi_wmm_params ac_bk; + struct wmi_wmm_params ac_vi; + struct wmi_wmm_params ac_vo; +} __packed; + +struct wmi_wmm_params_arg { + u32 cwmin; + u32 cwmax; + u32 aifs; + u32 txop; + u32 acm; + u32 no_ack; +}; + +struct wmi_pdev_set_wmm_params_arg { + struct wmi_wmm_params_arg ac_be; + struct wmi_wmm_params_arg ac_bk; + struct wmi_wmm_params_arg ac_vi; + struct wmi_wmm_params_arg ac_vo; +}; + +struct wal_dbg_tx_stats { + /* Num HTT cookies queued to dispatch list */ + __le32 comp_queued; + + /* Num HTT cookies dispatched */ + __le32 comp_delivered; + + /* Num MSDU queued to WAL */ + __le32 msdu_enqued; + + /* Num MPDU queue to WAL */ + __le32 mpdu_enqued; + + /* Num MSDUs dropped by WMM limit */ + __le32 wmm_drop; + + /* Num Local frames queued */ + __le32 local_enqued; + + /* Num Local frames done */ + __le32 local_freed; + + /* Num queued to HW */ + __le32 hw_queued; + + /* Num PPDU reaped from HW */ + __le32 hw_reaped; + + /* Num underruns */ + __le32 underrun; + + /* Num PPDUs cleaned up in TX abort */ + __le32 tx_abort; + + /* Num MPDUs requed by SW */ + __le32 mpdus_requed; + + /* excessive retries */ + __le32 tx_ko; + + /* data hw rate code */ + __le32 data_rc; + + /* Scheduler self triggers */ + __le32 self_triggers; + + /* frames dropped due to excessive sw retries */ + __le32 sw_retry_failure; + + /* illegal rate phy errors */ + __le32 illgl_rate_phy_err; + + /* wal pdev continous xretry */ + __le32 pdev_cont_xretry; + + /* wal pdev continous xretry */ + __le32 pdev_tx_timeout; + + /* wal pdev resets */ + __le32 pdev_resets; + + __le32 phy_underrun; + + /* MPDU is more than txop limit */ + __le32 txop_ovf; +} __packed; + +struct wal_dbg_rx_stats { + /* Cnts any change in ring routing mid-ppdu */ + __le32 mid_ppdu_route_change; + + /* Total number of statuses processed */ + __le32 status_rcvd; + + /* Extra frags on rings 0-3 */ + __le32 r0_frags; + __le32 r1_frags; + __le32 r2_frags; + __le32 r3_frags; + + /* MSDUs / MPDUs delivered to HTT */ + __le32 htt_msdus; + __le32 htt_mpdus; + + /* MSDUs / MPDUs delivered to local stack */ + __le32 loc_msdus; + __le32 loc_mpdus; + + /* AMSDUs that have more MSDUs than the status ring size */ + __le32 oversize_amsdu; + + /* Number of PHY errors */ + __le32 phy_errs; + + /* Number of PHY errors drops */ + __le32 phy_err_drop; + + /* Number of mpdu errors - FCS, MIC, ENC etc. */ + __le32 mpdu_errs; +} __packed; + +struct wal_dbg_peer_stats { + /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */ + __le32 dummy; +} __packed; + +struct wal_dbg_stats { + struct wal_dbg_tx_stats tx; + struct wal_dbg_rx_stats rx; + struct wal_dbg_peer_stats peer; +} __packed; + +enum wmi_stats_id { + WMI_REQUEST_PEER_STAT = 0x01, + WMI_REQUEST_AP_STAT = 0x02 +}; + +struct wmi_request_stats_cmd { + __le32 stats_id; + + /* + * Space to add parameters like + * peer mac addr + */ +} __packed; + +/* Suspend option */ +enum { + /* suspend */ + WMI_PDEV_SUSPEND, + + /* suspend and disable all interrupts */ + WMI_PDEV_SUSPEND_AND_DISABLE_INTR, +}; + +struct wmi_pdev_suspend_cmd { + /* suspend option sent to target */ + __le32 suspend_opt; +} __packed; + +struct wmi_stats_event { + __le32 stats_id; /* %WMI_REQUEST_ */ + /* + * number of pdev stats event structures + * (wmi_pdev_stats) 0 or 1 + */ + __le32 num_pdev_stats; + /* + * number of vdev stats event structures + * (wmi_vdev_stats) 0 or max vdevs + */ + __le32 num_vdev_stats; + /* + * number of peer stats event structures + * (wmi_peer_stats) 0 or max peers + */ + __le32 num_peer_stats; + __le32 num_bcnflt_stats; + /* + * followed by + * num_pdev_stats * size of(struct wmi_pdev_stats) + * num_vdev_stats * size of(struct wmi_vdev_stats) + * num_peer_stats * size of(struct wmi_peer_stats) + * + * By having a zero sized array, the pointer to data area + * becomes available without increasing the struct size + */ + u8 data[0]; +} __packed; + +/* + * PDEV statistics + * TODO: add all PDEV stats here + */ +struct wmi_pdev_stats { + __le32 chan_nf; /* Channel noise floor */ + __le32 tx_frame_count; /* TX frame count */ + __le32 rx_frame_count; /* RX frame count */ + __le32 rx_clear_count; /* rx clear count */ + __le32 cycle_count; /* cycle count */ + __le32 phy_err_count; /* Phy error count */ + __le32 chan_tx_pwr; /* channel tx power */ + struct wal_dbg_stats wal; /* WAL dbg stats */ +} __packed; + +/* + * VDEV statistics + * TODO: add all VDEV stats here + */ +struct wmi_vdev_stats { + __le32 vdev_id; +} __packed; + +/* + * peer statistics. + * TODO: add more stats + */ +struct wmi_peer_stats { + struct wmi_mac_addr peer_macaddr; + __le32 peer_rssi; + __le32 peer_tx_rate; +} __packed; + +struct wmi_vdev_create_cmd { + __le32 vdev_id; + __le32 vdev_type; + __le32 vdev_subtype; + struct wmi_mac_addr vdev_macaddr; +} __packed; + +enum wmi_vdev_type { + WMI_VDEV_TYPE_AP = 1, + WMI_VDEV_TYPE_STA = 2, + WMI_VDEV_TYPE_IBSS = 3, + WMI_VDEV_TYPE_MONITOR = 4, +}; + +enum wmi_vdev_subtype { + WMI_VDEV_SUBTYPE_NONE = 0, + WMI_VDEV_SUBTYPE_P2P_DEVICE = 1, + WMI_VDEV_SUBTYPE_P2P_CLIENT = 2, + WMI_VDEV_SUBTYPE_P2P_GO = 3, +}; + +/* values for vdev_subtype */ + +/* values for vdev_start_request flags */ +/* + * Indicates that AP VDEV uses hidden ssid. only valid for + * AP/GO */ +#define WMI_VDEV_START_HIDDEN_SSID (1<<0) +/* + * Indicates if robust management frame/management frame + * protection is enabled. For GO/AP vdevs, it indicates that + * it may support station/client associations with RMF enabled. + * For STA/client vdevs, it indicates that sta will + * associate with AP with RMF enabled. */ +#define WMI_VDEV_START_PMF_ENABLED (1<<1) + +struct wmi_p2p_noa_descriptor { + __le32 type_count; /* 255: continuous schedule, 0: reserved */ + __le32 duration; /* Absent period duration in micro seconds */ + __le32 interval; /* Absent period interval in micro seconds */ + __le32 start_time; /* 32 bit tsf time when in starts */ +} __packed; + +struct wmi_vdev_start_request_cmd { + /* WMI channel */ + struct wmi_channel chan; + /* unique id identifying the VDEV, generated by the caller */ + __le32 vdev_id; + /* requestor id identifying the caller module */ + __le32 requestor_id; + /* beacon interval from received beacon */ + __le32 beacon_interval; + /* DTIM Period from the received beacon */ + __le32 dtim_period; + /* Flags */ + __le32 flags; + /* ssid field. Only valid for AP/GO/IBSS/BTAmp VDEV type. */ + struct wmi_ssid ssid; + /* beacon/probe reponse xmit rate. Applicable for SoftAP. */ + __le32 bcn_tx_rate; + /* beacon/probe reponse xmit power. Applicable for SoftAP. */ + __le32 bcn_tx_power; + /* number of p2p NOA descriptor(s) from scan entry */ + __le32 num_noa_descriptors; + /* + * Disable H/W ack. This used by WMI_VDEV_RESTART_REQUEST_CMDID. + * During CAC, Our HW shouldn't ack ditected frames + */ + __le32 disable_hw_ack; + /* actual p2p NOA descriptor from scan entry */ + struct wmi_p2p_noa_descriptor noa_descriptors[2]; +} __packed; + +struct wmi_vdev_restart_request_cmd { + struct wmi_vdev_start_request_cmd vdev_start_request_cmd; +} __packed; + +struct wmi_vdev_start_request_arg { + u32 vdev_id; + struct wmi_channel_arg channel; + u32 bcn_intval; + u32 dtim_period; + u8 *ssid; + u32 ssid_len; + u32 bcn_tx_rate; + u32 bcn_tx_power; + bool disable_hw_ack; + bool hidden_ssid; + bool pmf_enabled; +}; + +struct wmi_vdev_delete_cmd { + /* unique id identifying the VDEV, generated by the caller */ + __le32 vdev_id; +} __packed; + +struct wmi_vdev_up_cmd { + __le32 vdev_id; + __le32 vdev_assoc_id; + struct wmi_mac_addr vdev_bssid; +} __packed; + +struct wmi_vdev_stop_cmd { + __le32 vdev_id; +} __packed; + +struct wmi_vdev_down_cmd { + __le32 vdev_id; +} __packed; + +struct wmi_vdev_standby_response_cmd { + /* unique id identifying the VDEV, generated by the caller */ + __le32 vdev_id; +} __packed; + +struct wmi_vdev_resume_response_cmd { + /* unique id identifying the VDEV, generated by the caller */ + __le32 vdev_id; +} __packed; + +struct wmi_vdev_set_param_cmd { + __le32 vdev_id; + __le32 param_id; + __le32 param_value; +} __packed; + +#define WMI_MAX_KEY_INDEX 3 +#define WMI_MAX_KEY_LEN 32 + +#define WMI_KEY_PAIRWISE 0x00 +#define WMI_KEY_GROUP 0x01 +#define WMI_KEY_TX_USAGE 0x02 /* default tx key - static wep */ + +struct wmi_key_seq_counter { + __le32 key_seq_counter_l; + __le32 key_seq_counter_h; +} __packed; + +#define WMI_CIPHER_NONE 0x0 /* clear key */ +#define WMI_CIPHER_WEP 0x1 +#define WMI_CIPHER_TKIP 0x2 +#define WMI_CIPHER_AES_OCB 0x3 +#define WMI_CIPHER_AES_CCM 0x4 +#define WMI_CIPHER_WAPI 0x5 +#define WMI_CIPHER_CKIP 0x6 +#define WMI_CIPHER_AES_CMAC 0x7 + +struct wmi_vdev_install_key_cmd { + __le32 vdev_id; + struct wmi_mac_addr peer_macaddr; + __le32 key_idx; + __le32 key_flags; + __le32 key_cipher; /* %WMI_CIPHER_ */ + struct wmi_key_seq_counter key_rsc_counter; + struct wmi_key_seq_counter key_global_rsc_counter; + struct wmi_key_seq_counter key_tsc_counter; + u8 wpi_key_rsc_counter[16]; + u8 wpi_key_tsc_counter[16]; + __le32 key_len; + __le32 key_txmic_len; + __le32 key_rxmic_len; + + /* contains key followed by tx mic followed by rx mic */ + u8 key_data[0]; +} __packed; + +struct wmi_vdev_install_key_arg { + u32 vdev_id; + const u8 *macaddr; + u32 key_idx; + u32 key_flags; + u32 key_cipher; + u32 key_len; + u32 key_txmic_len; + u32 key_rxmic_len; + const void *key_data; +}; + +/* Preamble types to be used with VDEV fixed rate configuration */ +enum wmi_rate_preamble { + WMI_RATE_PREAMBLE_OFDM, + WMI_RATE_PREAMBLE_CCK, + WMI_RATE_PREAMBLE_HT, + WMI_RATE_PREAMBLE_VHT, +}; + +/* Value to disable fixed rate setting */ +#define WMI_FIXED_RATE_NONE (0xff) + +/* the definition of different VDEV parameters */ +enum wmi_vdev_param { + /* RTS Threshold */ + WMI_VDEV_PARAM_RTS_THRESHOLD = 0x1, + /* Fragmentation threshold */ + WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD, + /* beacon interval in TUs */ + WMI_VDEV_PARAM_BEACON_INTERVAL, + /* Listen interval in TUs */ + WMI_VDEV_PARAM_LISTEN_INTERVAL, + /* muticast rate in Mbps */ + WMI_VDEV_PARAM_MULTICAST_RATE, + /* management frame rate in Mbps */ + WMI_VDEV_PARAM_MGMT_TX_RATE, + /* slot time (long vs short) */ + WMI_VDEV_PARAM_SLOT_TIME, + /* preamble (long vs short) */ + WMI_VDEV_PARAM_PREAMBLE, + /* SWBA time (time before tbtt in msec) */ + WMI_VDEV_PARAM_SWBA_TIME, + /* time period for updating VDEV stats */ + WMI_VDEV_STATS_UPDATE_PERIOD, + /* age out time in msec for frames queued for station in power save */ + WMI_VDEV_PWRSAVE_AGEOUT_TIME, + /* + * Host SWBA interval (time in msec before tbtt for SWBA event + * generation). + */ + WMI_VDEV_HOST_SWBA_INTERVAL, + /* DTIM period (specified in units of num beacon intervals) */ + WMI_VDEV_PARAM_DTIM_PERIOD, + /* + * scheduler air time limit for this VDEV. used by off chan + * scheduler. + */ + WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, + /* enable/dsiable WDS for this VDEV */ + WMI_VDEV_PARAM_WDS, + /* ATIM Window */ + WMI_VDEV_PARAM_ATIM_WINDOW, + /* BMISS max */ + WMI_VDEV_PARAM_BMISS_COUNT_MAX, + /* BMISS first time */ + WMI_VDEV_PARAM_BMISS_FIRST_BCNT, + /* BMISS final time */ + WMI_VDEV_PARAM_BMISS_FINAL_BCNT, + /* WMM enables/disabled */ + WMI_VDEV_PARAM_FEATURE_WMM, + /* Channel width */ + WMI_VDEV_PARAM_CHWIDTH, + /* Channel Offset */ + WMI_VDEV_PARAM_CHEXTOFFSET, + /* Disable HT Protection */ + WMI_VDEV_PARAM_DISABLE_HTPROTECTION, + /* Quick STA Kickout */ + WMI_VDEV_PARAM_STA_QUICKKICKOUT, + /* Rate to be used with Management frames */ + WMI_VDEV_PARAM_MGMT_RATE, + /* Protection Mode */ + WMI_VDEV_PARAM_PROTECTION_MODE, + /* Fixed rate setting */ + WMI_VDEV_PARAM_FIXED_RATE, + /* Short GI Enable/Disable */ + WMI_VDEV_PARAM_SGI, + /* Enable LDPC */ + WMI_VDEV_PARAM_LDPC, + /* Enable Tx STBC */ + WMI_VDEV_PARAM_TX_STBC, + /* Enable Rx STBC */ + WMI_VDEV_PARAM_RX_STBC, + /* Intra BSS forwarding */ + WMI_VDEV_PARAM_INTRA_BSS_FWD, + /* Setting Default xmit key for Vdev */ + WMI_VDEV_PARAM_DEF_KEYID, + /* NSS width */ + WMI_VDEV_PARAM_NSS, + /* Set the custom rate for the broadcast data frames */ + WMI_VDEV_PARAM_BCAST_DATA_RATE, + /* Set the custom rate (rate-code) for multicast data frames */ + WMI_VDEV_PARAM_MCAST_DATA_RATE, + /* Tx multicast packet indicate Enable/Disable */ + WMI_VDEV_PARAM_MCAST_INDICATE, + /* Tx DHCP packet indicate Enable/Disable */ + WMI_VDEV_PARAM_DHCP_INDICATE, + /* Enable host inspection of Tx unicast packet to unknown destination */ + WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE, + + /* The minimum amount of time AP begins to consider STA inactive */ + WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, + + /* + * An associated STA is considered inactive when there is no recent + * TX/RX activity and no downlink frames are buffered for it. Once a + * STA exceeds the maximum idle inactive time, the AP will send an + * 802.11 data-null as a keep alive to verify the STA is still + * associated. If the STA does ACK the data-null, or if the data-null + * is buffered and the STA does not retrieve it, the STA will be + * considered unresponsive + * (see WMI_VDEV_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS). + */ + WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, + + /* + * An associated STA is considered unresponsive if there is no recent + * TX/RX activity and downlink frames are buffered for it. Once a STA + * exceeds the maximum unresponsive time, the AP will send a + * WMI_STA_KICKOUT event to the host so the STA can be deleted. */ + WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, + + /* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */ + WMI_VDEV_PARAM_AP_ENABLE_NAWDS, + /* Enable/Disable RTS-CTS */ + WMI_VDEV_PARAM_ENABLE_RTSCTS, + /* Enable TXBFee/er */ + WMI_VDEV_PARAM_TXBF, + + /* Set packet power save */ + WMI_VDEV_PARAM_PACKET_POWERSAVE, + + /* + * Drops un-encrypted packets if eceived in an encrypted connection + * otherwise forwards to host. + */ + WMI_VDEV_PARAM_DROP_UNENCRY, + + /* + * Set the encapsulation type for frames. + */ + WMI_VDEV_PARAM_TX_ENCAP_TYPE, +}; + +/* slot time long */ +#define WMI_VDEV_SLOT_TIME_LONG 0x1 +/* slot time short */ +#define WMI_VDEV_SLOT_TIME_SHORT 0x2 +/* preablbe long */ +#define WMI_VDEV_PREAMBLE_LONG 0x1 +/* preablbe short */ +#define WMI_VDEV_PREAMBLE_SHORT 0x2 + +enum wmi_start_event_param { + WMI_VDEV_RESP_START_EVENT = 0, + WMI_VDEV_RESP_RESTART_EVENT, +}; + +struct wmi_vdev_start_response_event { + __le32 vdev_id; + __le32 req_id; + __le32 resp_type; /* %WMI_VDEV_RESP_ */ + __le32 status; +} __packed; + +struct wmi_vdev_standby_req_event { + /* unique id identifying the VDEV, generated by the caller */ + __le32 vdev_id; +} __packed; + +struct wmi_vdev_resume_req_event { + /* unique id identifying the VDEV, generated by the caller */ + __le32 vdev_id; +} __packed; + +struct wmi_vdev_stopped_event { + /* unique id identifying the VDEV, generated by the caller */ + __le32 vdev_id; +} __packed; + +/* + * common structure used for simple events + * (stopped, resume_req, standby response) + */ +struct wmi_vdev_simple_event { + /* unique id identifying the VDEV, generated by the caller */ + __le32 vdev_id; +} __packed; + +/* VDEV start response status codes */ +/* VDEV succesfully started */ +#define WMI_INIFIED_VDEV_START_RESPONSE_STATUS_SUCCESS 0x0 + +/* requested VDEV not found */ +#define WMI_INIFIED_VDEV_START_RESPONSE_INVALID_VDEVID 0x1 + +/* unsupported VDEV combination */ +#define WMI_INIFIED_VDEV_START_RESPONSE_NOT_SUPPORTED 0x2 + +/* Beacon processing related command and event structures */ +struct wmi_bcn_tx_hdr { + __le32 vdev_id; + __le32 tx_rate; + __le32 tx_power; + __le32 bcn_len; +} __packed; + +struct wmi_bcn_tx_cmd { + struct wmi_bcn_tx_hdr hdr; + u8 *bcn[0]; +} __packed; + +struct wmi_bcn_tx_arg { + u32 vdev_id; + u32 tx_rate; + u32 tx_power; + u32 bcn_len; + const void *bcn; +}; + +/* Beacon filter */ +#define WMI_BCN_FILTER_ALL 0 /* Filter all beacons */ +#define WMI_BCN_FILTER_NONE 1 /* Pass all beacons */ +#define WMI_BCN_FILTER_RSSI 2 /* Pass Beacons RSSI >= RSSI threshold */ +#define WMI_BCN_FILTER_BSSID 3 /* Pass Beacons with matching BSSID */ +#define WMI_BCN_FILTER_SSID 4 /* Pass Beacons with matching SSID */ + +struct wmi_bcn_filter_rx_cmd { + /* Filter ID */ + __le32 bcn_filter_id; + /* Filter type - wmi_bcn_filter */ + __le32 bcn_filter; + /* Buffer len */ + __le32 bcn_filter_len; + /* Filter info (threshold, BSSID, RSSI) */ + u8 *bcn_filter_buf; +} __packed; + +/* Capabilities and IEs to be passed to firmware */ +struct wmi_bcn_prb_info { + /* Capabilities */ + __le32 caps; + /* ERP info */ + __le32 erp; + /* Advanced capabilities */ + /* HT capabilities */ + /* HT Info */ + /* ibss_dfs */ + /* wpa Info */ + /* rsn Info */ + /* rrm info */ + /* ath_ext */ + /* app IE */ +} __packed; + +struct wmi_bcn_tmpl_cmd { + /* unique id identifying the VDEV, generated by the caller */ + __le32 vdev_id; + /* TIM IE offset from the beginning of the template. */ + __le32 tim_ie_offset; + /* beacon probe capabilities and IEs */ + struct wmi_bcn_prb_info bcn_prb_info; + /* beacon buffer length */ + __le32 buf_len; + /* variable length data */ + u8 data[1]; +} __packed; + +struct wmi_prb_tmpl_cmd { + /* unique id identifying the VDEV, generated by the caller */ + __le32 vdev_id; + /* beacon probe capabilities and IEs */ + struct wmi_bcn_prb_info bcn_prb_info; + /* beacon buffer length */ + __le32 buf_len; + /* Variable length data */ + u8 data[1]; +} __packed; + +enum wmi_sta_ps_mode { + /* enable power save for the given STA VDEV */ + WMI_STA_PS_MODE_DISABLED = 0, + /* disable power save for a given STA VDEV */ + WMI_STA_PS_MODE_ENABLED = 1, +}; + +struct wmi_sta_powersave_mode_cmd { + /* unique id identifying the VDEV, generated by the caller */ + __le32 vdev_id; + + /* + * Power save mode + * (see enum wmi_sta_ps_mode) + */ + __le32 sta_ps_mode; +} __packed; + +enum wmi_csa_offload_en { + WMI_CSA_OFFLOAD_DISABLE = 0, + WMI_CSA_OFFLOAD_ENABLE = 1, +}; + +struct wmi_csa_offload_enable_cmd { + __le32 vdev_id; + __le32 csa_offload_enable; +} __packed; + +struct wmi_csa_offload_chanswitch_cmd { + __le32 vdev_id; + struct wmi_channel chan; +} __packed; + +/* + * This parameter controls the policy for retrieving frames from AP while the + * STA is in sleep state. + * + * Only takes affect if the sta_ps_mode is enabled + */ +enum wmi_sta_ps_param_rx_wake_policy { + /* + * Wake up when ever there is an RX activity on the VDEV. In this mode + * the Power save SM(state machine) will come out of sleep by either + * sending null frame (or) a data frame (with PS==0) in response to TIM + * bit set in the received beacon frame from AP. + */ + WMI_STA_PS_RX_WAKE_POLICY_WAKE = 0, + + /* + * Here the power save state machine will not wakeup in response to TIM + * bit, instead it will send a PSPOLL (or) UASPD trigger based on UAPSD + * configuration setup by WMISET_PS_SET_UAPSD WMI command. When all + * access categories are delivery-enabled, the station will send a + * UAPSD trigger frame, otherwise it will send a PS-Poll. + */ + WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD = 1, +}; + +/* + * Number of tx frames/beacon that cause the power save SM to wake up. + * + * Value 1 causes the SM to wake up for every TX. Value 0 has a special + * meaning, It will cause the SM to never wake up. This is useful if you want + * to keep the system to sleep all the time for some kind of test mode . host + * can change this parameter any time. It will affect at the next tx frame. + */ +enum wmi_sta_ps_param_tx_wake_threshold { + WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER = 0, + WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS = 1, + + /* + * Values greater than one indicate that many TX attempts per beacon + * interval before the STA will wake up + */ +}; + +/* + * The maximum number of PS-Poll frames the FW will send in response to + * traffic advertised in TIM before waking up (by sending a null frame with PS + * = 0). Value 0 has a special meaning: there is no maximum count and the FW + * will send as many PS-Poll as are necessary to retrieve buffered BU. This + * parameter is used when the RX wake policy is + * WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD and ignored when the RX wake + * policy is WMI_STA_PS_RX_WAKE_POLICY_WAKE. + */ +enum wmi_sta_ps_param_pspoll_count { + WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0, + /* + * Values greater than 0 indicate the maximum numer of PS-Poll frames + * FW will send before waking up. + */ +}; + +/* + * This will include the delivery and trigger enabled state for every AC. + * This is the negotiated state with AP. The host MLME needs to set this based + * on AP capability and the state Set in the association request by the + * station MLME.Lower 8 bits of the value specify the UAPSD configuration. + */ +#define WMI_UAPSD_AC_TYPE_DELI 0 +#define WMI_UAPSD_AC_TYPE_TRIG 1 + +#define WMI_UAPSD_AC_BIT_MASK(ac, type) \ + ((type == WMI_UAPSD_AC_TYPE_DELI) ? (1<<(ac<<1)) : (1<<((ac<<1)+1))) + +enum wmi_sta_ps_param_uapsd { + WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0), + WMI_STA_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1), + WMI_STA_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2), + WMI_STA_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3), + WMI_STA_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4), + WMI_STA_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5), + WMI_STA_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6), + WMI_STA_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7), +}; + +enum wmi_sta_powersave_param { + /* + * Controls how frames are retrievd from AP while STA is sleeping + * + * (see enum wmi_sta_ps_param_rx_wake_policy) + */ + WMI_STA_PS_PARAM_RX_WAKE_POLICY = 0, + + /* + * The STA will go active after this many TX + * + * (see enum wmi_sta_ps_param_tx_wake_threshold) + */ + WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD = 1, + + /* + * Number of PS-Poll to send before STA wakes up + * + * (see enum wmi_sta_ps_param_pspoll_count) + * + */ + WMI_STA_PS_PARAM_PSPOLL_COUNT = 2, + + /* + * TX/RX inactivity time in msec before going to sleep. + * + * The power save SM will monitor tx/rx activity on the VDEV, if no + * activity for the specified msec of the parameter the Power save + * SM will go to sleep. + */ + WMI_STA_PS_PARAM_INACTIVITY_TIME = 3, + + /* + * Set uapsd configuration. + * + * (see enum wmi_sta_ps_param_uapsd) + */ + WMI_STA_PS_PARAM_UAPSD = 4, +}; + +struct wmi_sta_powersave_param_cmd { + __le32 vdev_id; + __le32 param_id; /* %WMI_STA_PS_PARAM_ */ + __le32 param_value; +} __packed; + +/* No MIMO power save */ +#define WMI_STA_MIMO_PS_MODE_DISABLE +/* mimo powersave mode static*/ +#define WMI_STA_MIMO_PS_MODE_STATIC +/* mimo powersave mode dynamic */ +#define WMI_STA_MIMO_PS_MODE_DYNAMIC + +struct wmi_sta_mimo_ps_mode_cmd { + /* unique id identifying the VDEV, generated by the caller */ + __le32 vdev_id; + /* mimo powersave mode as defined above */ + __le32 mimo_pwrsave_mode; +} __packed; + +/* U-APSD configuration of peer station from (re)assoc request and TSPECs */ +enum wmi_ap_ps_param_uapsd { + WMI_AP_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0), + WMI_AP_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1), + WMI_AP_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2), + WMI_AP_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3), + WMI_AP_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4), + WMI_AP_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5), + WMI_AP_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6), + WMI_AP_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7), +}; + +/* U-APSD maximum service period of peer station */ +enum wmi_ap_ps_peer_param_max_sp { + WMI_AP_PS_PEER_PARAM_MAX_SP_UNLIMITED = 0, + WMI_AP_PS_PEER_PARAM_MAX_SP_2 = 1, + WMI_AP_PS_PEER_PARAM_MAX_SP_4 = 2, + WMI_AP_PS_PEER_PARAM_MAX_SP_6 = 3, + MAX_WMI_AP_PS_PEER_PARAM_MAX_SP, +}; + +/* + * AP power save parameter + * Set a power save specific parameter for a peer station + */ +enum wmi_ap_ps_peer_param { + /* Set uapsd configuration for a given peer. + * + * Include the delivery and trigger enabled state for every AC. + * The host MLME needs to set this based on AP capability and stations + * request Set in the association request received from the station. + * + * Lower 8 bits of the value specify the UAPSD configuration. + * + * (see enum wmi_ap_ps_param_uapsd) + * The default value is 0. + */ + WMI_AP_PS_PEER_PARAM_UAPSD = 0, + + /* + * Set the service period for a UAPSD capable station + * + * The service period from wme ie in the (re)assoc request frame. + * + * (see enum wmi_ap_ps_peer_param_max_sp) + */ + WMI_AP_PS_PEER_PARAM_MAX_SP = 1, + + /* Time in seconds for aging out buffered frames for STA in PS */ + WMI_AP_PS_PEER_PARAM_AGEOUT_TIME = 2, +}; + +struct wmi_ap_ps_peer_cmd { + /* unique id identifying the VDEV, generated by the caller */ + __le32 vdev_id; + + /* peer MAC address */ + struct wmi_mac_addr peer_macaddr; + + /* AP powersave param (see enum wmi_ap_ps_peer_param) */ + __le32 param_id; + + /* AP powersave param value */ + __le32 param_value; +} __packed; + +/* 128 clients = 4 words */ +#define WMI_TIM_BITMAP_ARRAY_SIZE 4 + +struct wmi_tim_info { + __le32 tim_len; + __le32 tim_mcast; + __le32 tim_bitmap[WMI_TIM_BITMAP_ARRAY_SIZE]; + __le32 tim_changed; + __le32 tim_num_ps_pending; +} __packed; + +/* Maximum number of NOA Descriptors supported */ +#define WMI_P2P_MAX_NOA_DESCRIPTORS 4 +#define WMI_P2P_OPPPS_ENABLE_BIT BIT(0) +#define WMI_P2P_OPPPS_CTWINDOW_OFFSET 1 +#define WMI_P2P_NOA_CHANGED_BIT BIT(0) + +struct wmi_p2p_noa_info { + /* Bit 0 - Flag to indicate an update in NOA schedule + Bits 7-1 - Reserved */ + u8 changed; + /* NOA index */ + u8 index; + /* Bit 0 - Opp PS state of the AP + Bits 1-7 - Ctwindow in TUs */ + u8 ctwindow_oppps; + /* Number of NOA descriptors */ + u8 num_descriptors; + + struct wmi_p2p_noa_descriptor descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS]; +} __packed; + +struct wmi_bcn_info { + struct wmi_tim_info tim_info; + struct wmi_p2p_noa_info p2p_noa_info; +} __packed; + +struct wmi_host_swba_event { + __le32 vdev_map; + struct wmi_bcn_info bcn_info[1]; +} __packed; + +#define WMI_MAX_AP_VDEV 16 + +struct wmi_tbtt_offset_event { + __le32 vdev_map; + __le32 tbttoffset_list[WMI_MAX_AP_VDEV]; +} __packed; + + +struct wmi_peer_create_cmd { + __le32 vdev_id; + struct wmi_mac_addr peer_macaddr; +} __packed; + +struct wmi_peer_delete_cmd { + __le32 vdev_id; + struct wmi_mac_addr peer_macaddr; +} __packed; + +struct wmi_peer_flush_tids_cmd { + __le32 vdev_id; + struct wmi_mac_addr peer_macaddr; + __le32 peer_tid_bitmap; +} __packed; + +struct wmi_fixed_rate { + /* + * rate mode . 0: disable fixed rate (auto rate) + * 1: legacy (non 11n) rate specified as ieee rate 2*Mbps + * 2: ht20 11n rate specified as mcs index + * 3: ht40 11n rate specified as mcs index + */ + __le32 rate_mode; + /* + * 4 rate values for 4 rate series. series 0 is stored in byte 0 (LSB) + * and series 3 is stored at byte 3 (MSB) + */ + __le32 rate_series; + /* + * 4 retry counts for 4 rate series. retry count for rate 0 is stored + * in byte 0 (LSB) and retry count for rate 3 is stored at byte 3 + * (MSB) + */ + __le32 rate_retries; +} __packed; + +struct wmi_peer_fixed_rate_cmd { + /* unique id identifying the VDEV, generated by the caller */ + __le32 vdev_id; + /* peer MAC address */ + struct wmi_mac_addr peer_macaddr; + /* fixed rate */ + struct wmi_fixed_rate peer_fixed_rate; +} __packed; + +#define WMI_MGMT_TID 17 + +struct wmi_addba_clear_resp_cmd { + /* unique id identifying the VDEV, generated by the caller */ + __le32 vdev_id; + /* peer MAC address */ + struct wmi_mac_addr peer_macaddr; +} __packed; + +struct wmi_addba_send_cmd { + /* unique id identifying the VDEV, generated by the caller */ + __le32 vdev_id; + /* peer MAC address */ + struct wmi_mac_addr peer_macaddr; + /* Tid number */ + __le32 tid; + /* Buffer/Window size*/ + __le32 buffersize; +} __packed; + +struct wmi_delba_send_cmd { + /* unique id identifying the VDEV, generated by the caller */ + __le32 vdev_id; + /* peer MAC address */ + struct wmi_mac_addr peer_macaddr; + /* Tid number */ + __le32 tid; + /* Is Initiator */ + __le32 initiator; + /* Reason code */ + __le32 reasoncode; +} __packed; + +struct wmi_addba_setresponse_cmd { + /* unique id identifying the vdev, generated by the caller */ + __le32 vdev_id; + /* peer mac address */ + struct wmi_mac_addr peer_macaddr; + /* Tid number */ + __le32 tid; + /* status code */ + __le32 statuscode; +} __packed; + +struct wmi_send_singleamsdu_cmd { + /* unique id identifying the vdev, generated by the caller */ + __le32 vdev_id; + /* peer mac address */ + struct wmi_mac_addr peer_macaddr; + /* Tid number */ + __le32 tid; +} __packed; + +enum wmi_peer_smps_state { + WMI_PEER_SMPS_PS_NONE = 0x0, + WMI_PEER_SMPS_STATIC = 0x1, + WMI_PEER_SMPS_DYNAMIC = 0x2 +}; + +enum wmi_peer_param { + WMI_PEER_SMPS_STATE = 0x1, /* see %wmi_peer_smps_state */ + WMI_PEER_AMPDU = 0x2, + WMI_PEER_AUTHORIZE = 0x3, + WMI_PEER_CHAN_WIDTH = 0x4, + WMI_PEER_NSS = 0x5, + WMI_PEER_USE_4ADDR = 0x6 +}; + +struct wmi_peer_set_param_cmd { + __le32 vdev_id; + struct wmi_mac_addr peer_macaddr; + __le32 param_id; + __le32 param_value; +} __packed; + +#define MAX_SUPPORTED_RATES 128 + +struct wmi_rate_set { + /* total number of rates */ + __le32 num_rates; + /* + * rates (each 8bit value) packed into a 32 bit word. + * the rates are filled from least significant byte to most + * significant byte. + */ + __le32 rates[(MAX_SUPPORTED_RATES/4)+1]; +} __packed; + +struct wmi_rate_set_arg { + unsigned int num_rates; + u8 rates[MAX_SUPPORTED_RATES]; +}; + +/* + * NOTE: It would bea good idea to represent the Tx MCS + * info in one word and Rx in another word. This is split + * into multiple words for convenience + */ +struct wmi_vht_rate_set { + __le32 rx_max_rate; /* Max Rx data rate */ + __le32 rx_mcs_set; /* Negotiated RX VHT rates */ + __le32 tx_max_rate; /* Max Tx data rate */ + __le32 tx_mcs_set; /* Negotiated TX VHT rates */ +} __packed; + +struct wmi_vht_rate_set_arg { + u32 rx_max_rate; + u32 rx_mcs_set; + u32 tx_max_rate; + u32 tx_mcs_set; +}; + +struct wmi_peer_set_rates_cmd { + /* peer MAC address */ + struct wmi_mac_addr peer_macaddr; + /* legacy rate set */ + struct wmi_rate_set peer_legacy_rates; + /* ht rate set */ + struct wmi_rate_set peer_ht_rates; +} __packed; + +struct wmi_peer_set_q_empty_callback_cmd { + /* unique id identifying the VDEV, generated by the caller */ + __le32 vdev_id; + /* peer MAC address */ + struct wmi_mac_addr peer_macaddr; + __le32 callback_enable; +} __packed; + +#define WMI_PEER_AUTH 0x00000001 +#define WMI_PEER_QOS 0x00000002 +#define WMI_PEER_NEED_PTK_4_WAY 0x00000004 +#define WMI_PEER_NEED_GTK_2_WAY 0x00000010 +#define WMI_PEER_APSD 0x00000800 +#define WMI_PEER_HT 0x00001000 +#define WMI_PEER_40MHZ 0x00002000 +#define WMI_PEER_STBC 0x00008000 +#define WMI_PEER_LDPC 0x00010000 +#define WMI_PEER_DYN_MIMOPS 0x00020000 +#define WMI_PEER_STATIC_MIMOPS 0x00040000 +#define WMI_PEER_SPATIAL_MUX 0x00200000 +#define WMI_PEER_VHT 0x02000000 +#define WMI_PEER_80MHZ 0x04000000 +#define WMI_PEER_PMF 0x08000000 + +/* + * Peer rate capabilities. + * + * This is of interest to the ratecontrol + * module which resides in the firmware. The bit definitions are + * consistent with that defined in if_athrate.c. + */ +#define WMI_RC_DS_FLAG 0x01 +#define WMI_RC_CW40_FLAG 0x02 +#define WMI_RC_SGI_FLAG 0x04 +#define WMI_RC_HT_FLAG 0x08 +#define WMI_RC_RTSCTS_FLAG 0x10 +#define WMI_RC_TX_STBC_FLAG 0x20 +#define WMI_RC_RX_STBC_FLAG 0xC0 +#define WMI_RC_RX_STBC_FLAG_S 6 +#define WMI_RC_WEP_TKIP_FLAG 0x100 +#define WMI_RC_TS_FLAG 0x200 +#define WMI_RC_UAPSD_FLAG 0x400 + +/* Maximum listen interval supported by hw in units of beacon interval */ +#define ATH10K_MAX_HW_LISTEN_INTERVAL 5 + +struct wmi_peer_assoc_complete_cmd { + struct wmi_mac_addr peer_macaddr; + __le32 vdev_id; + __le32 peer_new_assoc; /* 1=assoc, 0=reassoc */ + __le32 peer_associd; /* 16 LSBs */ + __le32 peer_flags; + __le32 peer_caps; /* 16 LSBs */ + __le32 peer_listen_intval; + __le32 peer_ht_caps; + __le32 peer_max_mpdu; + __le32 peer_mpdu_density; /* 0..16 */ + __le32 peer_rate_caps; + struct wmi_rate_set peer_legacy_rates; + struct wmi_rate_set peer_ht_rates; + __le32 peer_nss; /* num of spatial streams */ + __le32 peer_vht_caps; + __le32 peer_phymode; + struct wmi_vht_rate_set peer_vht_rates; + /* HT Operation Element of the peer. Five bytes packed in 2 + * INT32 array and filled from lsb to msb. */ + __le32 peer_ht_info[2]; +} __packed; + +struct wmi_peer_assoc_complete_arg { + u8 addr[ETH_ALEN]; + u32 vdev_id; + bool peer_reassoc; + u16 peer_aid; + u32 peer_flags; /* see %WMI_PEER_ */ + u16 peer_caps; + u32 peer_listen_intval; + u32 peer_ht_caps; + u32 peer_max_mpdu; + u32 peer_mpdu_density; /* 0..16 */ + u32 peer_rate_caps; /* see %WMI_RC_ */ + struct wmi_rate_set_arg peer_legacy_rates; + struct wmi_rate_set_arg peer_ht_rates; + u32 peer_num_spatial_streams; + u32 peer_vht_caps; + enum wmi_phy_mode peer_phymode; + struct wmi_vht_rate_set_arg peer_vht_rates; +}; + +struct wmi_peer_add_wds_entry_cmd { + /* peer MAC address */ + struct wmi_mac_addr peer_macaddr; + /* wds MAC addr */ + struct wmi_mac_addr wds_macaddr; +} __packed; + +struct wmi_peer_remove_wds_entry_cmd { + /* wds MAC addr */ + struct wmi_mac_addr wds_macaddr; +} __packed; + +struct wmi_peer_q_empty_callback_event { + /* peer MAC address */ + struct wmi_mac_addr peer_macaddr; +} __packed; + +/* + * Channel info WMI event + */ +struct wmi_chan_info_event { + __le32 err_code; + __le32 freq; + __le32 cmd_flags; + __le32 noise_floor; + __le32 rx_clear_count; + __le32 cycle_count; +} __packed; + +/* Beacon filter wmi command info */ +#define BCN_FLT_MAX_SUPPORTED_IES 256 +#define BCN_FLT_MAX_ELEMS_IE_LIST (BCN_FLT_MAX_SUPPORTED_IES / 32) + +struct bss_bcn_stats { + __le32 vdev_id; + __le32 bss_bcnsdropped; + __le32 bss_bcnsdelivered; +} __packed; + +struct bcn_filter_stats { + __le32 bcns_dropped; + __le32 bcns_delivered; + __le32 activefilters; + struct bss_bcn_stats bss_stats; +} __packed; + +struct wmi_add_bcn_filter_cmd { + u32 vdev_id; + u32 ie_map[BCN_FLT_MAX_ELEMS_IE_LIST]; +} __packed; + +enum wmi_sta_keepalive_method { + WMI_STA_KEEPALIVE_METHOD_NULL_FRAME = 1, + WMI_STA_KEEPALIVE_METHOD_UNSOLICITATED_ARP_RESPONSE = 2, +}; + +/* note: ip4 addresses are in network byte order, i.e. big endian */ +struct wmi_sta_keepalive_arp_resp { + __be32 src_ip4_addr; + __be32 dest_ip4_addr; + struct wmi_mac_addr dest_mac_addr; +} __packed; + +struct wmi_sta_keepalive_cmd { + __le32 vdev_id; + __le32 enabled; + __le32 method; /* WMI_STA_KEEPALIVE_METHOD_ */ + __le32 interval; /* in seconds */ + struct wmi_sta_keepalive_arp_resp arp_resp; +} __packed; + +#define ATH10K_RTS_MAX 2347 +#define ATH10K_FRAGMT_THRESHOLD_MIN 540 +#define ATH10K_FRAGMT_THRESHOLD_MAX 2346 + +#define WMI_MAX_EVENT 0x1000 +/* Maximum number of pending TXed WMI packets */ +#define WMI_MAX_PENDING_TX_COUNT 128 +#define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr) + +/* By default disable power save for IBSS */ +#define ATH10K_DEFAULT_ATIM 0 + +struct ath10k; +struct ath10k_vif; + +int ath10k_wmi_attach(struct ath10k *ar); +void ath10k_wmi_detach(struct ath10k *ar); +int ath10k_wmi_wait_for_service_ready(struct ath10k *ar); +int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar); +void ath10k_wmi_flush_tx(struct ath10k *ar); + +int ath10k_wmi_connect_htc_service(struct ath10k *ar); +int ath10k_wmi_pdev_set_channel(struct ath10k *ar, + const struct wmi_channel_arg *); +int ath10k_wmi_pdev_suspend_target(struct ath10k *ar); +int ath10k_wmi_pdev_resume_target(struct ath10k *ar); +int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, + u16 rd5g, u16 ctl2g, u16 ctl5g); +int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id, + u32 value); +int ath10k_wmi_cmd_init(struct ath10k *ar); +int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *); +void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *); +int ath10k_wmi_stop_scan(struct ath10k *ar, + const struct wmi_stop_scan_arg *arg); +int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, + enum wmi_vdev_type type, + enum wmi_vdev_subtype subtype, + const u8 macaddr[ETH_ALEN]); +int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id); +int ath10k_wmi_vdev_start(struct ath10k *ar, + const struct wmi_vdev_start_request_arg *); +int ath10k_wmi_vdev_restart(struct ath10k *ar, + const struct wmi_vdev_start_request_arg *); +int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id); +int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, + const u8 *bssid); +int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id); +int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, + enum wmi_vdev_param param_id, u32 param_value); +int ath10k_wmi_vdev_install_key(struct ath10k *ar, + const struct wmi_vdev_install_key_arg *arg); +int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN]); +int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN]); +int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN], u32 tid_bitmap); +int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, + const u8 *peer_addr, + enum wmi_peer_param param_id, u32 param_value); +int ath10k_wmi_peer_assoc(struct ath10k *ar, + const struct wmi_peer_assoc_complete_arg *arg); +int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, + enum wmi_sta_ps_mode psmode); +int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, + enum wmi_sta_powersave_param param_id, + u32 value); +int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, + enum wmi_ap_ps_peer_param param_id, u32 value); +int ath10k_wmi_scan_chan_list(struct ath10k *ar, + const struct wmi_scan_chan_list_arg *arg); +int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg); +int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, + const struct wmi_pdev_set_wmm_params_arg *arg); +int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id); + +#endif /* _WMI_H_ */ diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 7f702fe3ecc2..ce67ab791eae 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -60,6 +60,7 @@ #include <asm/unaligned.h> +#include <net/mac80211.h> #include "base.h" #include "reg.h" #include "debug.h" @@ -666,9 +667,46 @@ static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb) return htype; } +static struct ieee80211_rate * +ath5k_get_rate(const struct ieee80211_hw *hw, + const struct ieee80211_tx_info *info, + struct ath5k_buf *bf, int idx) +{ + /* + * convert a ieee80211_tx_rate RC-table entry to + * the respective ieee80211_rate struct + */ + if (bf->rates[idx].idx < 0) { + return NULL; + } + + return &hw->wiphy->bands[info->band]->bitrates[ bf->rates[idx].idx ]; +} + +static u16 +ath5k_get_rate_hw_value(const struct ieee80211_hw *hw, + const struct ieee80211_tx_info *info, + struct ath5k_buf *bf, int idx) +{ + struct ieee80211_rate *rate; + u16 hw_rate; + u8 rc_flags; + + rate = ath5k_get_rate(hw, info, bf, idx); + if (!rate) + return 0; + + rc_flags = bf->rates[idx].flags; + hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ? + rate->hw_value_short : rate->hw_value; + + return hw_rate; +} + static int ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf, - struct ath5k_txq *txq, int padsize) + struct ath5k_txq *txq, int padsize, + struct ieee80211_tx_control *control) { struct ath5k_desc *ds = bf->desc; struct sk_buff *skb = bf->skb; @@ -688,7 +726,11 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf, bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len, DMA_TO_DEVICE); - rate = ieee80211_get_tx_rate(ah->hw, info); + ieee80211_get_tx_rates(info->control.vif, (control) ? control->sta : NULL, skb, bf->rates, + ARRAY_SIZE(bf->rates)); + + rate = ath5k_get_rate(ah->hw, info, bf, 0); + if (!rate) { ret = -EINVAL; goto err_unmap; @@ -698,8 +740,8 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf, flags |= AR5K_TXDESC_NOACK; rc_flags = info->control.rates[0].flags; - hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ? - rate->hw_value_short : rate->hw_value; + + hw_rate = ath5k_get_rate_hw_value(ah->hw, info, bf, 0); pktlen = skb->len; @@ -722,12 +764,13 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf, duration = le16_to_cpu(ieee80211_ctstoself_duration(ah->hw, info->control.vif, pktlen, info)); } + ret = ah->ah_setup_tx_desc(ah, ds, pktlen, ieee80211_get_hdrlen_from_skb(skb), padsize, get_hw_packet_type(skb), (ah->ah_txpower.txp_requested * 2), hw_rate, - info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags, + bf->rates[0].count, keyidx, ah->ah_tx_ant, flags, cts_rate, duration); if (ret) goto err_unmap; @@ -736,13 +779,15 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf, if (ah->ah_capabilities.cap_has_mrr_support) { memset(mrr_rate, 0, sizeof(mrr_rate)); memset(mrr_tries, 0, sizeof(mrr_tries)); + for (i = 0; i < 3; i++) { - rate = ieee80211_get_alt_retry_rate(ah->hw, info, i); + + rate = ath5k_get_rate(ah->hw, info, bf, i); if (!rate) break; - mrr_rate[i] = rate->hw_value; - mrr_tries[i] = info->control.rates[i + 1].count; + mrr_rate[i] = ath5k_get_rate_hw_value(ah->hw, info, bf, i); + mrr_tries[i] = bf->rates[i].count; } ath5k_hw_setup_mrr_tx_desc(ah, ds, @@ -1515,7 +1560,7 @@ unlock: void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, - struct ath5k_txq *txq) + struct ath5k_txq *txq, struct ieee80211_tx_control *control) { struct ath5k_hw *ah = hw->priv; struct ath5k_buf *bf; @@ -1555,7 +1600,7 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, bf->skb = skb; - if (ath5k_txbuf_setup(ah, bf, txq, padsize)) { + if (ath5k_txbuf_setup(ah, bf, txq, padsize, control)) { bf->skb = NULL; spin_lock_irqsave(&ah->txbuflock, flags); list_add_tail(&bf->list, &ah->txbuf); @@ -1571,11 +1616,13 @@ drop_packet: static void ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb, - struct ath5k_txq *txq, struct ath5k_tx_status *ts) + struct ath5k_txq *txq, struct ath5k_tx_status *ts, + struct ath5k_buf *bf) { struct ieee80211_tx_info *info; u8 tries[3]; int i; + int size = 0; ah->stats.tx_all_count++; ah->stats.tx_bytes_count += skb->len; @@ -1587,6 +1634,9 @@ ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb, ieee80211_tx_info_clear_status(info); + size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates)); + memcpy(info->status.rates, bf->rates, size); + for (i = 0; i < ts->ts_final_idx; i++) { struct ieee80211_tx_rate *r = &info->status.rates[i]; @@ -1663,7 +1713,7 @@ ath5k_tx_processq(struct ath5k_hw *ah, struct ath5k_txq *txq) dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE); - ath5k_tx_frame_completed(ah, skb, txq, &ts); + ath5k_tx_frame_completed(ah, skb, txq, &ts, bf); } /* @@ -1917,7 +1967,7 @@ ath5k_beacon_send(struct ath5k_hw *ah) skb = ieee80211_get_buffered_bc(ah->hw, vif); while (skb) { - ath5k_tx_queue(ah->hw, skb, ah->cabq); + ath5k_tx_queue(ah->hw, skb, ah->cabq, NULL); if (ah->cabq->txq_len >= ah->cabq->txq_max) break; @@ -2442,7 +2492,8 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops) IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_MFP_CAPABLE | - IEEE80211_HW_REPORTS_TX_ACK_STATUS; + IEEE80211_HW_REPORTS_TX_ACK_STATUS | + IEEE80211_HW_SUPPORTS_RC_TABLE; hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_AP) | diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h index 6c94c7ff2350..ca9a83ceeee1 100644 --- a/drivers/net/wireless/ath/ath5k/base.h +++ b/drivers/net/wireless/ath/ath5k/base.h @@ -47,6 +47,7 @@ struct ath5k_hw; struct ath5k_txq; struct ieee80211_channel; struct ath_bus_ops; +struct ieee80211_tx_control; enum nl80211_iftype; enum ath5k_srev_type { @@ -61,11 +62,12 @@ struct ath5k_srev_name { }; struct ath5k_buf { - struct list_head list; - struct ath5k_desc *desc; /* virtual addr of desc */ - dma_addr_t daddr; /* physical addr of desc */ - struct sk_buff *skb; /* skbuff for buf */ - dma_addr_t skbaddr;/* physical addr of skb data */ + struct list_head list; + struct ath5k_desc *desc; /* virtual addr of desc */ + dma_addr_t daddr; /* physical addr of desc */ + struct sk_buff *skb; /* skbuff for buf */ + dma_addr_t skbaddr; /* physical addr of skb data */ + struct ieee80211_tx_rate rates[4]; /* number of multi-rate stages */ }; struct ath5k_vif { @@ -103,7 +105,7 @@ int ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan); void ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf); void ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf); void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, - struct ath5k_txq *txq); + struct ath5k_txq *txq, struct ieee80211_tx_control *control); const char *ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val); diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c index 06f86f435711..81b686c6a376 100644 --- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c +++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c @@ -66,7 +66,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, return; } - ath5k_tx_queue(hw, skb, &ah->txqs[qnum]); + ath5k_tx_queue(hw, skb, &ah->txqs[qnum], control); } diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index 5c9736a94e54..2437ad26949d 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -3175,10 +3175,21 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, { struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev); struct ath6kl *ar = ath6kl_priv(vif->ndev); - u32 id; + u32 id, freq; const struct ieee80211_mgmt *mgmt; bool more_data, queued; + /* default to the current channel, but use the one specified as argument + * if any + */ + freq = vif->ch_hint; + if (chan) + freq = chan->center_freq; + + /* never send freq zero to the firmware */ + if (WARN_ON(freq == 0)) + return -EINVAL; + mgmt = (const struct ieee80211_mgmt *) buf; if (vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) && ieee80211_is_probe_resp(mgmt->frame_control) && @@ -3188,8 +3199,7 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, * command to allow the target to fill in the generic IEs. */ *cookie = 0; /* TX status not supported */ - return ath6kl_send_go_probe_resp(vif, buf, len, - chan->center_freq); + return ath6kl_send_go_probe_resp(vif, buf, len, freq); } id = vif->send_action_id++; @@ -3205,17 +3215,14 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, /* AP mode Power saving processing */ if (vif->nw_type == AP_NETWORK) { - queued = ath6kl_mgmt_powersave_ap(vif, - id, chan->center_freq, - wait, buf, - len, &more_data, no_cck); + queued = ath6kl_mgmt_powersave_ap(vif, id, freq, wait, buf, len, + &more_data, no_cck); if (queued) return 0; } - return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id, - chan->center_freq, wait, - buf, len, no_cck); + return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id, freq, + wait, buf, len, no_cck); } static void ath6kl_mgmt_frame_register(struct wiphy *wiphy, @@ -3679,6 +3686,20 @@ err: return NULL; } +#ifdef CONFIG_PM +static const struct wiphy_wowlan_support ath6kl_wowlan_support = { + .flags = WIPHY_WOWLAN_MAGIC_PKT | + WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_GTK_REKEY_FAILURE | + WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | + WIPHY_WOWLAN_EAP_IDENTITY_REQ | + WIPHY_WOWLAN_4WAY_HANDSHAKE, + .n_patterns = WOW_MAX_FILTERS_PER_LIST, + .pattern_min_len = 1, + .pattern_max_len = WOW_PATTERN_SIZE, +}; +#endif + int ath6kl_cfg80211_init(struct ath6kl *ar) { struct wiphy *wiphy = ar->wiphy; @@ -3772,15 +3793,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar) wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); #ifdef CONFIG_PM - wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | - WIPHY_WOWLAN_DISCONNECT | - WIPHY_WOWLAN_GTK_REKEY_FAILURE | - WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | - WIPHY_WOWLAN_EAP_IDENTITY_REQ | - WIPHY_WOWLAN_4WAY_HANDSHAKE; - wiphy->wowlan.n_patterns = WOW_MAX_FILTERS_PER_LIST; - wiphy->wowlan.pattern_min_len = 1; - wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE; + wiphy->wowlan = &ath6kl_wowlan_support; #endif wiphy->max_sched_scan_ssids = MAX_PROBED_SSIDS; diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c index fe38b836cb26..dbfd17d0a5fa 100644 --- a/drivers/net/wireless/ath/ath6kl/debug.c +++ b/drivers/net/wireless/ath/ath6kl/debug.c @@ -1240,20 +1240,14 @@ static ssize_t ath6kl_force_roam_write(struct file *file, char buf[20]; size_t len; u8 bssid[ETH_ALEN]; - int i; - int addr[ETH_ALEN]; len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) return -EFAULT; buf[len] = '\0'; - if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x", - &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) - != ETH_ALEN) + if (!mac_pton(buf, bssid)) return -EINVAL; - for (i = 0; i < ETH_ALEN; i++) - bssid[i] = addr[i]; ret = ath6kl_wmi_force_roam_cmd(ar->wmi, bssid); if (ret) diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c index 40ffee6184fd..6a67881f94d6 100644 --- a/drivers/net/wireless/ath/ath6kl/init.c +++ b/drivers/net/wireless/ath/ath6kl/init.c @@ -1696,10 +1696,16 @@ static int __ath6kl_init_hw_start(struct ath6kl *ar) test_bit(WMI_READY, &ar->flag), WMI_TIMEOUT); + if (timeleft <= 0) { + clear_bit(WMI_READY, &ar->flag); + ath6kl_err("wmi is not ready or wait was interrupted: %ld\n", + timeleft); + ret = -EIO; + goto err_htc_stop; + } ath6kl_dbg(ATH6KL_DBG_BOOT, "firmware booted\n"); - if (test_and_clear_bit(FIRST_BOOT, &ar->flag)) { ath6kl_info("%s %s fw %s api %d%s\n", ar->hw.name, @@ -1718,12 +1724,6 @@ static int __ath6kl_init_hw_start(struct ath6kl *ar) goto err_htc_stop; } - if (!timeleft || signal_pending(current)) { - ath6kl_err("wmi is not ready or wait was interrupted\n"); - ret = -EIO; - goto err_htc_stop; - } - ath6kl_dbg(ATH6KL_DBG_TRC, "%s: wmi is ready\n", __func__); /* communicate the wmi protocol verision to the target */ diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c index fb141454c6d2..7126bdd4236c 100644 --- a/drivers/net/wireless/ath/ath6kl/sdio.c +++ b/drivers/net/wireless/ath/ath6kl/sdio.c @@ -345,17 +345,17 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio, { struct hif_scatter_req *s_req; struct bus_request *bus_req; - int i, scat_req_sz, scat_list_sz, sg_sz, buf_sz; + int i, scat_req_sz, scat_list_sz, size; u8 *virt_buf; scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item); scat_req_sz = sizeof(*s_req) + scat_list_sz; if (!virt_scat) - sg_sz = sizeof(struct scatterlist) * n_scat_entry; + size = sizeof(struct scatterlist) * n_scat_entry; else - buf_sz = 2 * L1_CACHE_BYTES + - ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER; + size = 2 * L1_CACHE_BYTES + + ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER; for (i = 0; i < n_scat_req; i++) { /* allocate the scatter request */ @@ -364,7 +364,7 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio, return -ENOMEM; if (virt_scat) { - virt_buf = kzalloc(buf_sz, GFP_KERNEL); + virt_buf = kzalloc(size, GFP_KERNEL); if (!virt_buf) { kfree(s_req); return -ENOMEM; @@ -374,7 +374,7 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio, (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf); } else { /* allocate sglist */ - s_req->sgentries = kzalloc(sg_sz, GFP_KERNEL); + s_req->sgentries = kzalloc(size, GFP_KERNEL); if (!s_req->sgentries) { kfree(s_req); diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c index bed0d337712d..f38ff6a6255e 100644 --- a/drivers/net/wireless/ath/ath6kl/usb.c +++ b/drivers/net/wireless/ath/ath6kl/usb.c @@ -1061,6 +1061,22 @@ static void ath6kl_usb_cleanup_scatter(struct ath6kl *ar) return; } +static int ath6kl_usb_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow) +{ + /* + * cfg80211 suspend/WOW currently not supported for USB. + */ + return 0; +} + +static int ath6kl_usb_resume(struct ath6kl *ar) +{ + /* + * cfg80211 resume currently not supported for USB. + */ + return 0; +} + static const struct ath6kl_hif_ops ath6kl_usb_ops = { .diag_read32 = ath6kl_usb_diag_read32, .diag_write32 = ath6kl_usb_diag_write32, @@ -1074,6 +1090,8 @@ static const struct ath6kl_hif_ops ath6kl_usb_ops = { .pipe_map_service = ath6kl_usb_map_service_pipe, .pipe_get_free_queue_number = ath6kl_usb_get_free_queue_number, .cleanup_scatter = ath6kl_usb_cleanup_scatter, + .suspend = ath6kl_usb_suspend, + .resume = ath6kl_usb_resume, }; /* ath6kl usb driver registered functions */ @@ -1152,7 +1170,7 @@ static void ath6kl_usb_remove(struct usb_interface *interface) #ifdef CONFIG_PM -static int ath6kl_usb_suspend(struct usb_interface *interface, +static int ath6kl_usb_pm_suspend(struct usb_interface *interface, pm_message_t message) { struct ath6kl_usb *device; @@ -1162,7 +1180,7 @@ static int ath6kl_usb_suspend(struct usb_interface *interface, return 0; } -static int ath6kl_usb_resume(struct usb_interface *interface) +static int ath6kl_usb_pm_resume(struct usb_interface *interface) { struct ath6kl_usb *device; device = usb_get_intfdata(interface); @@ -1175,7 +1193,7 @@ static int ath6kl_usb_resume(struct usb_interface *interface) return 0; } -static int ath6kl_usb_reset_resume(struct usb_interface *intf) +static int ath6kl_usb_pm_reset_resume(struct usb_interface *intf) { if (usb_get_intfdata(intf)) ath6kl_usb_remove(intf); @@ -1184,9 +1202,9 @@ static int ath6kl_usb_reset_resume(struct usb_interface *intf) #else -#define ath6kl_usb_suspend NULL -#define ath6kl_usb_resume NULL -#define ath6kl_usb_reset_resume NULL +#define ath6kl_usb_pm_suspend NULL +#define ath6kl_usb_pm_resume NULL +#define ath6kl_usb_pm_reset_resume NULL #endif @@ -1201,9 +1219,9 @@ MODULE_DEVICE_TABLE(usb, ath6kl_usb_ids); static struct usb_driver ath6kl_usb_driver = { .name = "ath6kl_usb", .probe = ath6kl_usb_probe, - .suspend = ath6kl_usb_suspend, - .resume = ath6kl_usb_resume, - .reset_resume = ath6kl_usb_reset_resume, + .suspend = ath6kl_usb_pm_suspend, + .resume = ath6kl_usb_pm_resume, + .reset_resume = ath6kl_usb_pm_reset_resume, .disconnect = ath6kl_usb_remove, .id_table = ath6kl_usb_ids, .supports_autosuspend = true, diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig index 3c2cbc9d6295..760ab3fe09e2 100644 --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@ -84,14 +84,6 @@ config ATH9K_DFS_CERTIFIED developed. At this point enabling this option won't do anything except increase code size. -config ATH9K_MAC_DEBUG - bool "Atheros MAC statistics" - depends on ATH9K_DEBUGFS - default y - ---help--- - This option enables collection of statistics for Rx/Tx status - data and some other MAC related statistics - config ATH9K_LEGACY_RATE_CONTROL bool "Atheros ath9k rate control" depends on ATH9K diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c index 7ecd40f07a74..4994bea809eb 100644 --- a/drivers/net/wireless/ath/ath9k/ani.c +++ b/drivers/net/wireless/ath/ath9k/ani.c @@ -46,8 +46,8 @@ static const struct ani_ofdm_level_entry ofdm_level_table[] = { { 5, 4, 1 }, /* lvl 5 */ { 6, 5, 1 }, /* lvl 6 */ { 7, 6, 1 }, /* lvl 7 */ - { 7, 6, 0 }, /* lvl 8 */ - { 7, 7, 0 } /* lvl 9 */ + { 7, 7, 1 }, /* lvl 8 */ + { 7, 8, 0 } /* lvl 9 */ }; #define ATH9K_ANI_OFDM_NUM_LEVEL \ ARRAY_SIZE(ofdm_level_table) @@ -91,8 +91,8 @@ static const struct ani_cck_level_entry cck_level_table[] = { { 4, 0 }, /* lvl 4 */ { 5, 0 }, /* lvl 5 */ { 6, 0 }, /* lvl 6 */ - { 6, 0 }, /* lvl 7 (only for high rssi) */ - { 7, 0 } /* lvl 8 (only for high rssi) */ + { 7, 0 }, /* lvl 7 (only for high rssi) */ + { 8, 0 } /* lvl 8 (only for high rssi) */ }; #define ATH9K_ANI_CCK_NUM_LEVEL \ @@ -118,10 +118,10 @@ static void ath9k_ani_restart(struct ath_hw *ah) { struct ar5416AniState *aniState; - if (!DO_ANI(ah)) + if (!ah->curchan) return; - aniState = &ah->curchan->ani; + aniState = &ah->ani; aniState->listenTime = 0; ENABLE_REGWRITE_BUFFER(ah); @@ -143,7 +143,7 @@ static void ath9k_ani_restart(struct ath_hw *ah) static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel, bool scan) { - struct ar5416AniState *aniState = &ah->curchan->ani; + struct ar5416AniState *aniState = &ah->ani; struct ath_common *common = ath9k_hw_common(ah); const struct ani_ofdm_level_entry *entry_ofdm; const struct ani_cck_level_entry *entry_cck; @@ -177,10 +177,15 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel, BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_HIGH) weak_sig = true; - if (aniState->ofdmWeakSigDetect != weak_sig) - ath9k_hw_ani_control(ah, - ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, - entry_ofdm->ofdm_weak_signal_on); + /* + * OFDM Weak signal detection is always enabled for AP mode. + */ + if (ah->opmode != NL80211_IFTYPE_AP && + aniState->ofdmWeakSigDetect != weak_sig) { + ath9k_hw_ani_control(ah, + ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, + entry_ofdm->ofdm_weak_signal_on); + } if (aniState->ofdmNoiseImmunityLevel >= ATH9K_ANI_OFDM_DEF_LEVEL) { ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH; @@ -195,10 +200,10 @@ static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah) { struct ar5416AniState *aniState; - if (!DO_ANI(ah)) + if (!ah->curchan) return; - aniState = &ah->curchan->ani; + aniState = &ah->ani; if (aniState->ofdmNoiseImmunityLevel < ATH9K_ANI_OFDM_MAX_LEVEL) ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel + 1, false); @@ -210,7 +215,7 @@ static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah) static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel, bool scan) { - struct ar5416AniState *aniState = &ah->curchan->ani; + struct ar5416AniState *aniState = &ah->ani; struct ath_common *common = ath9k_hw_common(ah); const struct ani_ofdm_level_entry *entry_ofdm; const struct ani_cck_level_entry *entry_cck; @@ -251,10 +256,10 @@ static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah) { struct ar5416AniState *aniState; - if (!DO_ANI(ah)) + if (!ah->curchan) return; - aniState = &ah->curchan->ani; + aniState = &ah->ani; if (aniState->cckNoiseImmunityLevel < ATH9K_ANI_CCK_MAX_LEVEL) ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel + 1, @@ -269,7 +274,7 @@ static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah) { struct ar5416AniState *aniState; - aniState = &ah->curchan->ani; + aniState = &ah->ani; /* lower OFDM noise immunity */ if (aniState->ofdmNoiseImmunityLevel > 0 && @@ -292,12 +297,12 @@ static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah) */ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning) { - struct ar5416AniState *aniState = &ah->curchan->ani; + struct ar5416AniState *aniState = &ah->ani; struct ath9k_channel *chan = ah->curchan; struct ath_common *common = ath9k_hw_common(ah); int ofdm_nil, cck_nil; - if (!DO_ANI(ah)) + if (!ah->curchan) return; BUG_ON(aniState == NULL); @@ -363,24 +368,13 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning) ath9k_hw_set_ofdm_nil(ah, ofdm_nil, is_scanning); ath9k_hw_set_cck_nil(ah, cck_nil, is_scanning); - /* - * enable phy counters if hw supports or if not, enable phy - * interrupts (so we can count each one) - */ ath9k_ani_restart(ah); - - ENABLE_REGWRITE_BUFFER(ah); - - REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING); - REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); - - REGWRITE_BUFFER_FLUSH(ah); } static bool ath9k_hw_ani_read_counters(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); - struct ar5416AniState *aniState = &ah->curchan->ani; + struct ar5416AniState *aniState = &ah->ani; u32 phyCnt1, phyCnt2; int32_t listenTime; @@ -415,10 +409,10 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan) struct ath_common *common = ath9k_hw_common(ah); u32 ofdmPhyErrRate, cckPhyErrRate; - if (!DO_ANI(ah)) + if (!ah->curchan) return; - aniState = &ah->curchan->ani; + aniState = &ah->ani; if (!ath9k_hw_ani_read_counters(ah)) return; @@ -490,32 +484,22 @@ EXPORT_SYMBOL(ath9k_hw_disable_mib_counters); void ath9k_hw_ani_init(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); - int i; + struct ar5416AniState *ani = &ah->ani; ath_dbg(common, ANI, "Initialize ANI\n"); ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH; ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW; - ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH; ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW; - for (i = 0; i < ARRAY_SIZE(ah->channels); i++) { - struct ath9k_channel *chan = &ah->channels[i]; - struct ar5416AniState *ani = &chan->ani; - - ani->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL; - - ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL; - - ani->mrcCCK = AR_SREV_9300_20_OR_LATER(ah) ? true : false; - - ani->ofdmsTurn = true; - - ani->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG; - ani->cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL; - ani->ofdmNoiseImmunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL; - } + ani->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL; + ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL; + ani->mrcCCK = AR_SREV_9300_20_OR_LATER(ah) ? true : false; + ani->ofdmsTurn = true; + ani->ofdmWeakSigDetect = true; + ani->cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL; + ani->ofdmNoiseImmunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL; /* * since we expect some ongoing maintenance on the tables, let's sanity @@ -524,9 +508,6 @@ void ath9k_hw_ani_init(struct ath_hw *ah) ah->aniperiod = ATH9K_ANI_PERIOD; ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL; - if (ah->config.enable_ani) - ah->proc_phyerr |= HAL_PROCESS_ANI; - ath9k_ani_restart(ah); ath9k_enable_mib_counters(ah); } diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h index dddb1361039a..b54a3fb01883 100644 --- a/drivers/net/wireless/ath/ath9k/ani.h +++ b/drivers/net/wireless/ath/ath9k/ani.h @@ -17,32 +17,19 @@ #ifndef ANI_H #define ANI_H -#define HAL_PROCESS_ANI 0x00000001 - -#define DO_ANI(ah) (((ah)->proc_phyerr & HAL_PROCESS_ANI) && ah->curchan) - #define BEACON_RSSI(ahp) (ahp->stats.avgbrssi) /* units are errors per second */ -#define ATH9K_ANI_OFDM_TRIG_HIGH 3500 +#define ATH9K_ANI_OFDM_TRIG_HIGH 3500 #define ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI 1000 -/* units are errors per second */ #define ATH9K_ANI_OFDM_TRIG_LOW 400 #define ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI 900 -/* units are errors per second */ #define ATH9K_ANI_CCK_TRIG_HIGH 600 - -/* units are errors per second */ #define ATH9K_ANI_CCK_TRIG_LOW 300 -#define ATH9K_ANI_NOISE_IMMUNE_LVL 4 -#define ATH9K_ANI_USE_OFDM_WEAK_SIG true -#define ATH9K_ANI_CCK_WEAK_SIG_THR false - #define ATH9K_ANI_SPUR_IMMUNE_LVL 3 - #define ATH9K_ANI_FIRSTEP_LVL 2 #define ATH9K_ANI_RSSI_THR_HIGH 40 @@ -53,10 +40,6 @@ /* in ms */ #define ATH9K_ANI_POLLINTERVAL 1000 -#define HAL_NOISE_IMMUNE_MAX 4 -#define HAL_SPUR_IMMUNE_MAX 7 -#define HAL_FIRST_STEP_MAX 2 - #define ATH9K_SIG_FIRSTEP_SETTING_MIN 0 #define ATH9K_SIG_FIRSTEP_SETTING_MAX 20 #define ATH9K_SIG_SPUR_IMM_SETTING_MIN 0 @@ -111,7 +94,7 @@ struct ar5416AniState { u8 mrcCCK; u8 spurImmunityLevel; u8 firstepLevel; - u8 ofdmWeakSigDetect; + bool ofdmWeakSigDetect; u32 listenTime; u32 ofdmPhyErrCount; u32 cckPhyErrCount; @@ -119,8 +102,6 @@ struct ar5416AniState { }; struct ar5416Stats { - u32 ast_ani_niup; - u32 ast_ani_nidown; u32 ast_ani_spurup; u32 ast_ani_spurdown; u32 ast_ani_ofdmon; diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c index 391da5ad6a99..d1acfe98918a 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c @@ -931,7 +931,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah, { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_channel *chan = ah->curchan; - struct ar5416AniState *aniState = &chan->ani; + struct ar5416AniState *aniState = &ah->ani; s32 value, value2; switch (cmd & ah->ani_function) { @@ -1207,7 +1207,7 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_channel *chan = ah->curchan; - struct ar5416AniState *aniState = &chan->ani; + struct ar5416AniState *aniState = &ah->ani; struct ath9k_ani_default *iniDef; u32 val; @@ -1251,7 +1251,7 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah) /* these levels just got reset to defaults by the INI */ aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL; aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL; - aniState->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG; + aniState->ofdmWeakSigDetect = true; aniState->mrcCCK = false; /* not available on pre AR9003 */ } diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c index 830daa12feb6..8dc2d089cdef 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c @@ -38,10 +38,6 @@ static int ar9002_hw_init_mode_regs(struct ath_hw *ah) else INIT_INI_ARRAY(&ah->iniPcieSerdes, ar9280PciePhy_clkreq_always_on_L1_9280); -#ifdef CONFIG_PM_SLEEP - INIT_INI_ARRAY(&ah->iniPcieSerdesWow, - ar9280PciePhy_awow); -#endif if (AR_SREV_9287_11_OR_LATER(ah)) { INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1); diff --git a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h index beb6162cf97c..4d18c66a6790 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h @@ -925,20 +925,6 @@ static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = { {0x00004044, 0x00000000}, }; -static const u32 ar9280PciePhy_awow[][2] = { - /* Addr allmodes */ - {0x00004040, 0x9248fd00}, - {0x00004040, 0x24924924}, - {0x00004040, 0xa8000019}, - {0x00004040, 0x13160820}, - {0x00004040, 0xe5980560}, - {0x00004040, 0xc01dcffd}, - {0x00004040, 0x1aaabe41}, - {0x00004040, 0xbe105554}, - {0x00004040, 0x00043007}, - {0x00004044, 0x00000000}, -}; - static const u32 ar9285Modes_9285_1_2[][5] = { /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160}, diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index e6b92ff265fd..eae23b910bce 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -3563,14 +3563,24 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz) { struct ath9k_hw_capabilities *pCap = &ah->caps; int chain; - u32 regval; + u32 regval, value, gpio; static const u32 switch_chain_reg[AR9300_MAX_CHAINS] = { AR_PHY_SWITCH_CHAIN_0, AR_PHY_SWITCH_CHAIN_1, AR_PHY_SWITCH_CHAIN_2, }; - u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz); + if (AR_SREV_9485(ah) && (ar9003_hw_get_rx_gain_idx(ah) == 0)) { + if (ah->config.xlna_gpio) + gpio = ah->config.xlna_gpio; + else + gpio = AR9300_EXT_LNA_CTL_GPIO_AR9485; + + ath9k_hw_cfg_output(ah, gpio, + AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED); + } + + value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz); if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM, @@ -3796,7 +3806,13 @@ static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan) REG_RMW_FIELD(ah, ext_atten_reg[i], AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB, value); - value = ar9003_hw_atten_chain_get_margin(ah, i, chan); + if (AR_SREV_9485(ah) && + (ar9003_hw_get_rx_gain_idx(ah) == 0) && + ah->config.xatten_margin_cfg) + value = 5; + else + value = ar9003_hw_atten_chain_get_margin(ah, i, chan); + REG_RMW_FIELD(ah, ext_atten_reg[i], AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN, value); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c index 301bf72c53bf..5163abd3937c 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c @@ -469,6 +469,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs, rxs->rs_status = 0; rxs->rs_flags = 0; + rxs->flag = 0; rxs->rs_datalen = rxsp->status2 & AR_DataLen; rxs->rs_tstamp = rxsp->status3; @@ -493,8 +494,8 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs, rxs->rs_isaggr = (rxsp->status11 & AR_RxAggr) ? 1 : 0; rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0; rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7); - rxs->rs_flags = (rxsp->status4 & AR_GI) ? ATH9K_RX_GI : 0; - rxs->rs_flags |= (rxsp->status4 & AR_2040) ? ATH9K_RX_2040 : 0; + rxs->flag |= (rxsp->status4 & AR_GI) ? RX_FLAG_SHORT_GI : 0; + rxs->flag |= (rxsp->status4 & AR_2040) ? RX_FLAG_40MHZ : 0; rxs->evm0 = rxsp->status6; rxs->evm1 = rxsp->status7; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c index 09c1f9da67a0..6343cc91953e 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c @@ -454,6 +454,8 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain) if (accum_cnt <= thresh_accum_cnt) continue; + max_index++; + /* sum(tx amplitude) */ accum_tx = ((data_L[i] >> 16) & 0xffff) | ((data_U[i] & 0x7ff) << 16); @@ -468,20 +470,21 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain) accum_tx <<= scale_factor; accum_rx <<= scale_factor; - x_est[i + 1] = (((accum_tx + accum_cnt) / accum_cnt) + 32) >> - scale_factor; + x_est[max_index] = + (((accum_tx + accum_cnt) / accum_cnt) + 32) >> + scale_factor; - Y[i + 1] = ((((accum_rx + accum_cnt) / accum_cnt) + 32) >> + Y[max_index] = + ((((accum_rx + accum_cnt) / accum_cnt) + 32) >> scale_factor) + - (1 << scale_factor) * max_index + 16; + (1 << scale_factor) * i + 16; if (accum_ang >= (1 << 26)) accum_ang -= 1 << 27; - theta[i + 1] = ((accum_ang * (1 << scale_factor)) + accum_cnt) / - accum_cnt; - - max_index++; + theta[max_index] = + ((accum_ang * (1 << scale_factor)) + accum_cnt) / + accum_cnt; } /* diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index e1714d7c9eeb..bc483128efcd 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -905,7 +905,12 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_channel *chan = ah->curchan; - struct ar5416AniState *aniState = &chan->ani; + struct ar5416AniState *aniState = &ah->ani; + int m1ThreshLow, m2ThreshLow; + int m1Thresh, m2Thresh; + int m2CountThr, m2CountThrLow; + int m1ThreshLowExt, m2ThreshLowExt; + int m1ThreshExt, m2ThreshExt; s32 value, value2; switch (cmd & ah->ani_function) { @@ -919,6 +924,61 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, */ u32 on = param ? 1 : 0; + if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) + goto skip_ws_det; + + m1ThreshLow = on ? + aniState->iniDef.m1ThreshLow : m1ThreshLow_off; + m2ThreshLow = on ? + aniState->iniDef.m2ThreshLow : m2ThreshLow_off; + m1Thresh = on ? + aniState->iniDef.m1Thresh : m1Thresh_off; + m2Thresh = on ? + aniState->iniDef.m2Thresh : m2Thresh_off; + m2CountThr = on ? + aniState->iniDef.m2CountThr : m2CountThr_off; + m2CountThrLow = on ? + aniState->iniDef.m2CountThrLow : m2CountThrLow_off; + m1ThreshLowExt = on ? + aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off; + m2ThreshLowExt = on ? + aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off; + m1ThreshExt = on ? + aniState->iniDef.m1ThreshExt : m1ThreshExt_off; + m2ThreshExt = on ? + aniState->iniDef.m2ThreshExt : m2ThreshExt_off; + + REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, + AR_PHY_SFCORR_LOW_M1_THRESH_LOW, + m1ThreshLow); + REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, + AR_PHY_SFCORR_LOW_M2_THRESH_LOW, + m2ThreshLow); + REG_RMW_FIELD(ah, AR_PHY_SFCORR, + AR_PHY_SFCORR_M1_THRESH, + m1Thresh); + REG_RMW_FIELD(ah, AR_PHY_SFCORR, + AR_PHY_SFCORR_M2_THRESH, + m2Thresh); + REG_RMW_FIELD(ah, AR_PHY_SFCORR, + AR_PHY_SFCORR_M2COUNT_THR, + m2CountThr); + REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, + AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW, + m2CountThrLow); + REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, + AR_PHY_SFCORR_EXT_M1_THRESH_LOW, + m1ThreshLowExt); + REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, + AR_PHY_SFCORR_EXT_M2_THRESH_LOW, + m2ThreshLowExt); + REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, + AR_PHY_SFCORR_EXT_M1_THRESH, + m1ThreshExt); + REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, + AR_PHY_SFCORR_EXT_M2_THRESH, + m2ThreshExt); +skip_ws_det: if (on) REG_SET_BIT(ah, AR_PHY_SFCORR_LOW, AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); @@ -1173,7 +1233,7 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah) struct ath9k_ani_default *iniDef; u32 val; - aniState = &ah->curchan->ani; + aniState = &ah->ani; iniDef = &aniState->iniDef; ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n", @@ -1214,7 +1274,7 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah) /* these levels just got reset to defaults by the INI */ aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL; aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL; - aniState->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG; + aniState->ofdmWeakSigDetect = true; aniState->mrcCCK = true; } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h index e71774196c01..5013c731f9f6 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h @@ -351,6 +351,8 @@ #define AR_PHY_CCA_NOM_VAL_9330_2GHZ -118 +#define AR9300_EXT_LNA_CTL_GPIO_AR9485 9 + /* * AGC Field Definitions */ diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 42b03dc39d14..74965eefbd1c 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -296,6 +296,7 @@ struct ath_tx { struct ath_txq txq[ATH9K_NUM_TX_QUEUES]; struct ath_descdma txdma; struct ath_txq *txq_map[IEEE80211_NUM_ACS]; + struct ath_txq *uapsdq; u32 txq_max_pending[IEEE80211_NUM_ACS]; u16 max_aggr_framelen[IEEE80211_NUM_ACS][4][32]; }; @@ -343,6 +344,8 @@ int ath_txq_update(struct ath_softc *sc, int qnum, void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop); int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, struct ath_tx_control *txctl); +void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct sk_buff *skb); void ath_tx_tasklet(struct ath_softc *sc); void ath_tx_edma_tasklet(struct ath_softc *sc); int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, @@ -353,6 +356,11 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an); void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, struct ath_node *an); +void ath9k_release_buffered_frames(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + u16 tids, int nframes, + enum ieee80211_frame_release_type reason, + bool more_data); /********/ /* VIFs */ @@ -623,6 +631,8 @@ void ath_ant_comb_update(struct ath_softc *sc); /* Main driver core */ /********************/ +#define ATH9K_PCI_CUS198 0x0001 + /* * Default cache line size, in bytes. * Used when PCI device not fully initialized by bootrom/BIOS @@ -642,6 +652,7 @@ enum sc_op_flags { SC_OP_ANI_RUN, SC_OP_PRIM_STA_VIF, SC_OP_HW_RESET, + SC_OP_SCANNING, }; /* Powersave flags */ @@ -706,6 +717,7 @@ struct ath_softc { unsigned int hw_busy_count; unsigned long sc_flags; + unsigned long driver_data; u32 intrstatus; u16 ps_flags; /* PS_* */ @@ -755,7 +767,6 @@ struct ath_softc { struct rchan *rfs_chan_spec_scan; enum spectral_mode spectral_mode; struct ath_spec_scan spec_config; - int scanning; #ifdef CONFIG_PM_SLEEP atomic_t wow_got_bmiss_intr; diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index 2ff570f7f8ff..1a17732bb089 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c @@ -39,7 +39,8 @@ static void ath9k_beaconq_config(struct ath_softc *sc) ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi); - if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) { + if (sc->sc_ah->opmode == NL80211_IFTYPE_AP || + sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) { /* Always burst out beacon and CAB traffic. */ qi.tqi_aifs = 1; qi.tqi_cwmin = 0; @@ -107,23 +108,6 @@ static void ath9k_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif, ath9k_hw_set_txdesc(ah, bf->bf_desc, &info); } -static void ath9k_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb) -{ - struct ath_softc *sc = hw->priv; - struct ath_common *common = ath9k_hw_common(sc->sc_ah); - struct ath_tx_control txctl; - - memset(&txctl, 0, sizeof(struct ath_tx_control)); - txctl.txq = sc->beacon.cabq; - - ath_dbg(common, XMIT, "transmitting CABQ packet, skb: %p\n", skb); - - if (ath_tx_start(hw, skb, &txctl) != 0) { - ath_dbg(common, XMIT, "CABQ TX failed\n"); - ieee80211_free_txskb(hw, skb); - } -} - static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -205,10 +189,8 @@ static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw, ath9k_beacon_setup(sc, vif, bf, info->control.rates[0].idx); - while (skb) { - ath9k_tx_cabq(hw, skb); - skb = ieee80211_get_buffered_bc(hw, vif); - } + if (skb) + ath_tx_cabq(hw, vif, skb); return bf; } @@ -273,7 +255,8 @@ static int ath9k_beacon_choose_slot(struct ath_softc *sc) u64 tsf; int slot; - if (sc->sc_ah->opmode != NL80211_IFTYPE_AP) { + if (sc->sc_ah->opmode != NL80211_IFTYPE_AP && + sc->sc_ah->opmode != NL80211_IFTYPE_MESH_POINT) { ath_dbg(common, BEACON, "slot 0, tsf: %llu\n", ath9k_hw_gettsf64(sc->sc_ah)); return 0; @@ -765,10 +748,10 @@ void ath9k_set_beacon(struct ath_softc *sc) switch (sc->sc_ah->opmode) { case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_MESH_POINT: ath9k_beacon_config_ap(sc, cur_conf); break; case NL80211_IFTYPE_ADHOC: - case NL80211_IFTYPE_MESH_POINT: ath9k_beacon_config_adhoc(sc, cur_conf); break; case NL80211_IFTYPE_STATION: diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c index 7304e7585009..5e8219a91e25 100644 --- a/drivers/net/wireless/ath/ath9k/calib.c +++ b/drivers/net/wireless/ath/ath9k/calib.c @@ -387,7 +387,6 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan) if (!caldata) { chan->noisefloor = nf; - ah->noise = ath9k_hw_getchan_noise(ah, chan); return false; } diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index b37eb8d38811..2721f52f0177 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -173,25 +173,69 @@ static const struct file_operations fops_rx_chainmask = { .llseek = default_llseek, }; -static ssize_t read_file_disable_ani(struct file *file, char __user *user_buf, +static ssize_t read_file_ani(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; struct ath_common *common = ath9k_hw_common(sc->sc_ah); - char buf[32]; - unsigned int len; + struct ath_hw *ah = sc->sc_ah; + unsigned int len = 0, size = 1024; + ssize_t retval = 0; + char *buf; - len = sprintf(buf, "%d\n", common->disable_ani); - return simple_read_from_buffer(user_buf, count, ppos, buf, len); + buf = kzalloc(size, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + if (common->disable_ani) { + len += snprintf(buf + len, size - len, "%s: %s\n", + "ANI", "DISABLED"); + goto exit; + } + + len += snprintf(buf + len, size - len, "%15s: %s\n", + "ANI", "ENABLED"); + len += snprintf(buf + len, size - len, "%15s: %u\n", + "ANI RESET", ah->stats.ast_ani_reset); + len += snprintf(buf + len, size - len, "%15s: %u\n", + "SPUR UP", ah->stats.ast_ani_spurup); + len += snprintf(buf + len, size - len, "%15s: %u\n", + "SPUR DOWN", ah->stats.ast_ani_spurup); + len += snprintf(buf + len, size - len, "%15s: %u\n", + "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon); + len += snprintf(buf + len, size - len, "%15s: %u\n", + "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff); + len += snprintf(buf + len, size - len, "%15s: %u\n", + "MRC-CCK ON", ah->stats.ast_ani_ccklow); + len += snprintf(buf + len, size - len, "%15s: %u\n", + "MRC-CCK OFF", ah->stats.ast_ani_cckhigh); + len += snprintf(buf + len, size - len, "%15s: %u\n", + "FIR-STEP UP", ah->stats.ast_ani_stepup); + len += snprintf(buf + len, size - len, "%15s: %u\n", + "FIR-STEP DOWN", ah->stats.ast_ani_stepdown); + len += snprintf(buf + len, size - len, "%15s: %u\n", + "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero); + len += snprintf(buf + len, size - len, "%15s: %u\n", + "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs); + len += snprintf(buf + len, size - len, "%15s: %u\n", + "CCK ERRORS", ah->stats.ast_ani_cckerrs); +exit: + if (len > size) + len = size; + + retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + + return retval; } -static ssize_t write_file_disable_ani(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) +static ssize_t write_file_ani(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; struct ath_common *common = ath9k_hw_common(sc->sc_ah); - unsigned long disable_ani; + unsigned long ani; char buf[32]; ssize_t len; @@ -200,12 +244,15 @@ static ssize_t write_file_disable_ani(struct file *file, return -EFAULT; buf[len] = '\0'; - if (strict_strtoul(buf, 0, &disable_ani)) + if (strict_strtoul(buf, 0, &ani)) return -EINVAL; - common->disable_ani = !!disable_ani; + if (ani < 0 || ani > 1) + return -EINVAL; + + common->disable_ani = !ani; - if (disable_ani) { + if (common->disable_ani) { clear_bit(SC_OP_ANI_RUN, &sc->sc_flags); ath_stop_ani(sc); } else { @@ -215,9 +262,9 @@ static ssize_t write_file_disable_ani(struct file *file, return count; } -static const struct file_operations fops_disable_ani = { - .read = read_file_disable_ani, - .write = write_file_disable_ani, +static const struct file_operations fops_ani = { + .read = read_file_ani, + .write = write_file_ani, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, @@ -738,8 +785,6 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf, struct ath_tx_status *ts, struct ath_txq *txq, unsigned int flags) { -#define TX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].ts\ - [sc->debug.tsidx].c) int qnum = txq->axq_qnum; TX_STAT_INC(qnum, tx_pkts_all); @@ -771,37 +816,6 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf, TX_STAT_INC(qnum, data_underrun); if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN) TX_STAT_INC(qnum, delim_underrun); - -#ifdef CONFIG_ATH9K_MAC_DEBUG - spin_lock(&sc->debug.samp_lock); - TX_SAMP_DBG(jiffies) = jiffies; - TX_SAMP_DBG(rssi_ctl0) = ts->ts_rssi_ctl0; - TX_SAMP_DBG(rssi_ctl1) = ts->ts_rssi_ctl1; - TX_SAMP_DBG(rssi_ctl2) = ts->ts_rssi_ctl2; - TX_SAMP_DBG(rssi_ext0) = ts->ts_rssi_ext0; - TX_SAMP_DBG(rssi_ext1) = ts->ts_rssi_ext1; - TX_SAMP_DBG(rssi_ext2) = ts->ts_rssi_ext2; - TX_SAMP_DBG(rateindex) = ts->ts_rateindex; - TX_SAMP_DBG(isok) = !!(ts->ts_status & ATH9K_TXERR_MASK); - TX_SAMP_DBG(rts_fail_cnt) = ts->ts_shortretry; - TX_SAMP_DBG(data_fail_cnt) = ts->ts_longretry; - TX_SAMP_DBG(rssi) = ts->ts_rssi; - TX_SAMP_DBG(tid) = ts->tid; - TX_SAMP_DBG(qid) = ts->qid; - - if (ts->ts_flags & ATH9K_TX_BA) { - TX_SAMP_DBG(ba_low) = ts->ba_low; - TX_SAMP_DBG(ba_high) = ts->ba_high; - } else { - TX_SAMP_DBG(ba_low) = 0; - TX_SAMP_DBG(ba_high) = 0; - } - - sc->debug.tsidx = (sc->debug.tsidx + 1) % ATH_DBG_MAX_SAMPLES; - spin_unlock(&sc->debug.samp_lock); -#endif - -#undef TX_SAMP_DBG } static const struct file_operations fops_xmit = { @@ -915,8 +929,6 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf, void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs) { #define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++ -#define RX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].rs\ - [sc->debug.rsidx].c) RX_STAT_INC(rx_pkts_all); sc->debug.stats.rxstats.rx_bytes_all += rs->rs_datalen; @@ -940,27 +952,7 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs) RX_PHY_ERR_INC(rs->rs_phyerr); } -#ifdef CONFIG_ATH9K_MAC_DEBUG - spin_lock(&sc->debug.samp_lock); - RX_SAMP_DBG(jiffies) = jiffies; - RX_SAMP_DBG(rssi_ctl0) = rs->rs_rssi_ctl0; - RX_SAMP_DBG(rssi_ctl1) = rs->rs_rssi_ctl1; - RX_SAMP_DBG(rssi_ctl2) = rs->rs_rssi_ctl2; - RX_SAMP_DBG(rssi_ext0) = rs->rs_rssi_ext0; - RX_SAMP_DBG(rssi_ext1) = rs->rs_rssi_ext1; - RX_SAMP_DBG(rssi_ext2) = rs->rs_rssi_ext2; - RX_SAMP_DBG(antenna) = rs->rs_antenna; - RX_SAMP_DBG(rssi) = rs->rs_rssi; - RX_SAMP_DBG(rate) = rs->rs_rate; - RX_SAMP_DBG(is_mybeacon) = rs->is_mybeacon; - - sc->debug.rsidx = (sc->debug.rsidx + 1) % ATH_DBG_MAX_SAMPLES; - spin_unlock(&sc->debug.samp_lock); - -#endif - #undef RX_PHY_ERR_INC -#undef RX_SAMP_DBG } static const struct file_operations fops_recv = { @@ -1485,283 +1477,6 @@ static const struct file_operations fops_modal_eeprom = { .llseek = default_llseek, }; -#ifdef CONFIG_ATH9K_MAC_DEBUG - -void ath9k_debug_samp_bb_mac(struct ath_softc *sc) -{ -#define ATH_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].c) - struct ath_hw *ah = sc->sc_ah; - struct ath_common *common = ath9k_hw_common(ah); - unsigned long flags; - int i; - - ath9k_ps_wakeup(sc); - - spin_lock_bh(&sc->debug.samp_lock); - - spin_lock_irqsave(&common->cc_lock, flags); - ath_hw_cycle_counters_update(common); - - ATH_SAMP_DBG(cc.cycles) = common->cc_ani.cycles; - ATH_SAMP_DBG(cc.rx_busy) = common->cc_ani.rx_busy; - ATH_SAMP_DBG(cc.rx_frame) = common->cc_ani.rx_frame; - ATH_SAMP_DBG(cc.tx_frame) = common->cc_ani.tx_frame; - spin_unlock_irqrestore(&common->cc_lock, flags); - - ATH_SAMP_DBG(noise) = ah->noise; - - REG_WRITE_D(ah, AR_MACMISC, - ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) | - (AR_MACMISC_MISC_OBS_BUS_1 << - AR_MACMISC_MISC_OBS_BUS_MSB_S))); - - for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) - ATH_SAMP_DBG(dma_dbg_reg_vals[i]) = REG_READ_D(ah, - AR_DMADBG_0 + (i * sizeof(u32))); - - ATH_SAMP_DBG(pcu_obs) = REG_READ_D(ah, AR_OBS_BUS_1); - ATH_SAMP_DBG(pcu_cr) = REG_READ_D(ah, AR_CR); - - memcpy(ATH_SAMP_DBG(nfCalHist), sc->caldata.nfCalHist, - sizeof(ATH_SAMP_DBG(nfCalHist))); - - sc->debug.sampidx = (sc->debug.sampidx + 1) % ATH_DBG_MAX_SAMPLES; - spin_unlock_bh(&sc->debug.samp_lock); - ath9k_ps_restore(sc); - -#undef ATH_SAMP_DBG -} - -static int open_file_bb_mac_samps(struct inode *inode, struct file *file) -{ -#define ATH_SAMP_DBG(c) bb_mac_samp[sampidx].c - struct ath_softc *sc = inode->i_private; - struct ath_hw *ah = sc->sc_ah; - struct ath_common *common = ath9k_hw_common(ah); - struct ieee80211_conf *conf = &common->hw->conf; - struct ath_dbg_bb_mac_samp *bb_mac_samp; - struct ath9k_nfcal_hist *h; - int i, j, qcuOffset = 0, dcuOffset = 0; - u32 *qcuBase, *dcuBase, size = 30000, len = 0; - u32 sampidx = 0; - u8 *buf; - u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask; - u8 nread; - - if (test_bit(SC_OP_INVALID, &sc->sc_flags)) - return -EAGAIN; - - buf = vmalloc(size); - if (!buf) - return -ENOMEM; - bb_mac_samp = vmalloc(sizeof(*bb_mac_samp) * ATH_DBG_MAX_SAMPLES); - if (!bb_mac_samp) { - vfree(buf); - return -ENOMEM; - } - /* Account the current state too */ - ath9k_debug_samp_bb_mac(sc); - - spin_lock_bh(&sc->debug.samp_lock); - memcpy(bb_mac_samp, sc->debug.bb_mac_samp, - sizeof(*bb_mac_samp) * ATH_DBG_MAX_SAMPLES); - len += snprintf(buf + len, size - len, - "Current Sample Index: %d\n", sc->debug.sampidx); - spin_unlock_bh(&sc->debug.samp_lock); - - len += snprintf(buf + len, size - len, - "Raw DMA Debug Dump:\n"); - len += snprintf(buf + len, size - len, "Sample |\t"); - for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) - len += snprintf(buf + len, size - len, " DMA Reg%d |\t", i); - len += snprintf(buf + len, size - len, "\n"); - - for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) { - len += snprintf(buf + len, size - len, "%d\t", sampidx); - - for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) - len += snprintf(buf + len, size - len, " %08x\t", - ATH_SAMP_DBG(dma_dbg_reg_vals[i])); - len += snprintf(buf + len, size - len, "\n"); - } - len += snprintf(buf + len, size - len, "\n"); - - len += snprintf(buf + len, size - len, - "Sample Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n"); - for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) { - qcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[0]); - dcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[4]); - - for (i = 0; i < ATH9K_NUM_QUEUES; i++, - qcuOffset += 4, dcuOffset += 5) { - if (i == 8) { - qcuOffset = 0; - qcuBase++; - } - - if (i == 6) { - dcuOffset = 0; - dcuBase++; - } - if (!sc->debug.stats.txstats[i].queued) - continue; - - len += snprintf(buf + len, size - len, - "%4d %7d %2x %1x %2x %2x\n", - sampidx, i, - (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset, - (*qcuBase & (0x8 << qcuOffset)) >> - (qcuOffset + 3), - ATH_SAMP_DBG(dma_dbg_reg_vals[2]) & - (0x7 << (i * 3)) >> (i * 3), - (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset); - } - len += snprintf(buf + len, size - len, "\n"); - } - len += snprintf(buf + len, size - len, - "samp qcu_sh qcu_fh qcu_comp dcu_comp dcu_arb dcu_fp " - "ch_idle_dur ch_idle_dur_val txfifo_val0 txfifo_val1 " - "txfifo_dcu0 txfifo_dcu1 pcu_obs AR_CR\n"); - - for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) { - qcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[0]); - dcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[4]); - - len += snprintf(buf + len, size - len, "%4d %5x %5x ", sampidx, - (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x003c0000) >> 18, - (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x03c00000) >> 22); - len += snprintf(buf + len, size - len, "%7x %8x ", - (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x1c000000) >> 26, - (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x3)); - len += snprintf(buf + len, size - len, "%7x %7x ", - (ATH_SAMP_DBG(dma_dbg_reg_vals[5]) & 0x06000000) >> 25, - (ATH_SAMP_DBG(dma_dbg_reg_vals[5]) & 0x38000000) >> 27); - len += snprintf(buf + len, size - len, "%7d %12d ", - (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x000003fc) >> 2, - (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00000400) >> 10); - len += snprintf(buf + len, size - len, "%12d %12d ", - (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00000800) >> 11, - (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00001000) >> 12); - len += snprintf(buf + len, size - len, "%12d %12d ", - (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x0001e000) >> 13, - (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x001e0000) >> 17); - len += snprintf(buf + len, size - len, "0x%07x 0x%07x\n", - ATH_SAMP_DBG(pcu_obs), ATH_SAMP_DBG(pcu_cr)); - } - - len += snprintf(buf + len, size - len, - "Sample ChNoise Chain privNF #Reading Readings\n"); - for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) { - h = ATH_SAMP_DBG(nfCalHist); - if (!ATH_SAMP_DBG(noise)) - continue; - - for (i = 0; i < NUM_NF_READINGS; i++) { - if (!(chainmask & (1 << i)) || - ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))) - continue; - - nread = AR_PHY_CCA_FILTERWINDOW_LENGTH - - h[i].invalidNFcount; - len += snprintf(buf + len, size - len, - "%4d %5d %4d\t %d\t %d\t", - sampidx, ATH_SAMP_DBG(noise), - i, h[i].privNF, nread); - for (j = 0; j < nread; j++) - len += snprintf(buf + len, size - len, - " %d", h[i].nfCalBuffer[j]); - len += snprintf(buf + len, size - len, "\n"); - } - } - len += snprintf(buf + len, size - len, "\nCycle counters:\n" - "Sample Total Rxbusy Rxframes Txframes\n"); - for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) { - if (!ATH_SAMP_DBG(cc.cycles)) - continue; - len += snprintf(buf + len, size - len, - "%4d %08x %08x %08x %08x\n", - sampidx, ATH_SAMP_DBG(cc.cycles), - ATH_SAMP_DBG(cc.rx_busy), - ATH_SAMP_DBG(cc.rx_frame), - ATH_SAMP_DBG(cc.tx_frame)); - } - - len += snprintf(buf + len, size - len, "Tx status Dump :\n"); - len += snprintf(buf + len, size - len, - "Sample rssi:- ctl0 ctl1 ctl2 ext0 ext1 ext2 comb " - "isok rts_fail data_fail rate tid qid " - "ba_low ba_high tx_before(ms)\n"); - for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) { - for (i = 0; i < ATH_DBG_MAX_SAMPLES; i++) { - if (!ATH_SAMP_DBG(ts[i].jiffies)) - continue; - len += snprintf(buf + len, size - len, "%-14d" - "%-4d %-4d %-4d %-4d %-4d %-4d %-4d %-4d %-8d " - "%-9d %-4d %-3d %-3d %08x %08x %-11d\n", - sampidx, - ATH_SAMP_DBG(ts[i].rssi_ctl0), - ATH_SAMP_DBG(ts[i].rssi_ctl1), - ATH_SAMP_DBG(ts[i].rssi_ctl2), - ATH_SAMP_DBG(ts[i].rssi_ext0), - ATH_SAMP_DBG(ts[i].rssi_ext1), - ATH_SAMP_DBG(ts[i].rssi_ext2), - ATH_SAMP_DBG(ts[i].rssi), - ATH_SAMP_DBG(ts[i].isok), - ATH_SAMP_DBG(ts[i].rts_fail_cnt), - ATH_SAMP_DBG(ts[i].data_fail_cnt), - ATH_SAMP_DBG(ts[i].rateindex), - ATH_SAMP_DBG(ts[i].tid), - ATH_SAMP_DBG(ts[i].qid), - ATH_SAMP_DBG(ts[i].ba_low), - ATH_SAMP_DBG(ts[i].ba_high), - jiffies_to_msecs(jiffies - - ATH_SAMP_DBG(ts[i].jiffies))); - } - } - - len += snprintf(buf + len, size - len, "Rx status Dump :\n"); - len += snprintf(buf + len, size - len, "Sample rssi:- ctl0 ctl1 ctl2 " - "ext0 ext1 ext2 comb beacon ant rate rx_before(ms)\n"); - for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) { - for (i = 0; i < ATH_DBG_MAX_SAMPLES; i++) { - if (!ATH_SAMP_DBG(rs[i].jiffies)) - continue; - len += snprintf(buf + len, size - len, "%-14d" - "%-4d %-4d %-4d %-4d %-4d %-4d %-4d %-9s %-2d %02x %-13d\n", - sampidx, - ATH_SAMP_DBG(rs[i].rssi_ctl0), - ATH_SAMP_DBG(rs[i].rssi_ctl1), - ATH_SAMP_DBG(rs[i].rssi_ctl2), - ATH_SAMP_DBG(rs[i].rssi_ext0), - ATH_SAMP_DBG(rs[i].rssi_ext1), - ATH_SAMP_DBG(rs[i].rssi_ext2), - ATH_SAMP_DBG(rs[i].rssi), - ATH_SAMP_DBG(rs[i].is_mybeacon) ? - "True" : "False", - ATH_SAMP_DBG(rs[i].antenna), - ATH_SAMP_DBG(rs[i].rate), - jiffies_to_msecs(jiffies - - ATH_SAMP_DBG(rs[i].jiffies))); - } - } - - vfree(bb_mac_samp); - file->private_data = buf; - - return 0; -#undef ATH_SAMP_DBG -} - -static const struct file_operations fops_samps = { - .open = open_file_bb_mac_samps, - .read = ath9k_debugfs_read_buf, - .release = ath9k_debugfs_release_buf, - .owner = THIS_MODULE, - .llseek = default_llseek, -}; - -#endif - #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT static ssize_t read_file_btcoex(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -2059,8 +1774,8 @@ int ath9k_init_debug(struct ath_hw *ah) sc->debug.debugfs_phy, sc, &fops_rx_chainmask); debugfs_create_file("tx_chainmask", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_tx_chainmask); - debugfs_create_file("disable_ani", S_IRUSR | S_IWUSR, - sc->debug.debugfs_phy, sc, &fops_disable_ani); + debugfs_create_file("ani", S_IRUSR | S_IWUSR, + sc->debug.debugfs_phy, sc, &fops_ani); debugfs_create_bool("paprd", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, &sc->sc_ah->config.enable_paprd); debugfs_create_file("regidx", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, @@ -2095,11 +1810,6 @@ int ath9k_init_debug(struct ath_hw *ah) debugfs_create_file("spectral_fft_period", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_spectral_fft_period); - -#ifdef CONFIG_ATH9K_MAC_DEBUG - debugfs_create_file("samples", S_IRUSR, sc->debug.debugfs_phy, sc, - &fops_samps); -#endif debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask); debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR, diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h index 9d49aab8b989..fc679198a0f3 100644 --- a/drivers/net/wireless/ath/ath9k/debug.h +++ b/drivers/net/wireless/ath/ath9k/debug.h @@ -251,56 +251,10 @@ struct ath_stats { u32 reset[__RESET_TYPE_MAX]; }; -#define ATH_DBG_MAX_SAMPLES 10 -struct ath_dbg_bb_mac_samp { - u32 dma_dbg_reg_vals[ATH9K_NUM_DMA_DEBUG_REGS]; - u32 pcu_obs, pcu_cr, noise; - struct { - u64 jiffies; - int8_t rssi_ctl0; - int8_t rssi_ctl1; - int8_t rssi_ctl2; - int8_t rssi_ext0; - int8_t rssi_ext1; - int8_t rssi_ext2; - int8_t rssi; - bool isok; - u8 rts_fail_cnt; - u8 data_fail_cnt; - u8 rateindex; - u8 qid; - u8 tid; - u32 ba_low; - u32 ba_high; - } ts[ATH_DBG_MAX_SAMPLES]; - struct { - u64 jiffies; - int8_t rssi_ctl0; - int8_t rssi_ctl1; - int8_t rssi_ctl2; - int8_t rssi_ext0; - int8_t rssi_ext1; - int8_t rssi_ext2; - int8_t rssi; - bool is_mybeacon; - u8 antenna; - u8 rate; - } rs[ATH_DBG_MAX_SAMPLES]; - struct ath_cycle_counters cc; - struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS]; -}; - struct ath9k_debug { struct dentry *debugfs_phy; u32 regidx; struct ath_stats stats; -#ifdef CONFIG_ATH9K_MAC_DEBUG - spinlock_t samp_lock; - struct ath_dbg_bb_mac_samp bb_mac_samp[ATH_DBG_MAX_SAMPLES]; - u8 sampidx; - u8 tsidx; - u8 rsidx; -#endif }; int ath9k_init_debug(struct ath_hw *ah); @@ -364,17 +318,4 @@ static inline void ath_debug_stat_rx(struct ath_softc *sc, #endif /* CONFIG_ATH9K_DEBUGFS */ -#ifdef CONFIG_ATH9K_MAC_DEBUG - -void ath9k_debug_samp_bb_mac(struct ath_softc *sc); - -#else - -static inline void ath9k_debug_samp_bb_mac(struct ath_softc *sc) -{ -} - -#endif - - #endif /* DEBUG_H */ diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h index d3b099d7898b..69581031f2cd 100644 --- a/drivers/net/wireless/ath/ath9k/htc.h +++ b/drivers/net/wireless/ath/ath9k/htc.h @@ -142,6 +142,7 @@ struct ath9k_htc_target_aggr { #define WLAN_RC_40_FLAG 0x02 #define WLAN_RC_SGI_FLAG 0x04 #define WLAN_RC_HT_FLAG 0x08 +#define ATH_RC_TX_STBC_FLAG 0x20 struct ath9k_htc_rateset { u8 rs_nrates; @@ -208,6 +209,9 @@ struct ath9k_htc_target_rx_stats { case NL80211_IFTYPE_AP: \ _priv->num_ap_vif++; \ break; \ + case NL80211_IFTYPE_MESH_POINT: \ + _priv->num_mbss_vif++; \ + break; \ default: \ break; \ } \ @@ -224,6 +228,9 @@ struct ath9k_htc_target_rx_stats { case NL80211_IFTYPE_AP: \ _priv->num_ap_vif--; \ break; \ + case NL80211_IFTYPE_MESH_POINT: \ + _priv->num_mbss_vif--; \ + break; \ default: \ break; \ } \ @@ -450,6 +457,7 @@ struct ath9k_htc_priv { u8 sta_slot; u8 vif_sta_pos[ATH9K_HTC_MAX_VIF]; u8 num_ibss_vif; + u8 num_mbss_vif; u8 num_sta_vif; u8 num_sta_assoc_vif; u8 num_ap_vif; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c index f13f458dd656..e0c03bd64182 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c @@ -28,7 +28,8 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv) ath9k_hw_get_txq_props(ah, priv->beaconq, &qi); - if (priv->ah->opmode == NL80211_IFTYPE_AP) { + if (priv->ah->opmode == NL80211_IFTYPE_AP || + priv->ah->opmode == NL80211_IFTYPE_MESH_POINT) { qi.tqi_aifs = 1; qi.tqi_cwmin = 0; qi.tqi_cwmax = 0; @@ -628,6 +629,7 @@ void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv, case NL80211_IFTYPE_ADHOC: ath9k_htc_beacon_config_adhoc(priv, cur_conf); break; + case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_AP: ath9k_htc_beacon_config_ap(priv, cur_conf); break; @@ -649,6 +651,7 @@ void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv) case NL80211_IFTYPE_ADHOC: ath9k_htc_beacon_config_adhoc(priv, cur_conf); break; + case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_AP: ath9k_htc_beacon_config_ap(priv, cur_conf); break; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index a47f5e05fc04..bb0ba9e3e083 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -517,6 +517,9 @@ static void setup_ht_cap(struct ath9k_htc_priv *priv, ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n", tx_streams, rx_streams); + if (tx_streams >= 2) + ht_info->cap |= IEEE80211_HT_CAP_TX_STBC; + if (tx_streams != rx_streams) { ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; ht_info->mcs.tx_params |= ((tx_streams - 1) << @@ -698,7 +701,8 @@ static const struct ieee80211_iface_limit if_limits[] = { { .max = 2, .types = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_CLIENT) }, { .max = 2, .types = BIT(NL80211_IFTYPE_AP) | - BIT(NL80211_IFTYPE_P2P_GO) }, + BIT(NL80211_IFTYPE_P2P_GO) | + BIT(NL80211_IFTYPE_MESH_POINT) }, }; static const struct ieee80211_iface_combination if_comb = { @@ -721,6 +725,7 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv, IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_PS_NULLFUNC_STACK | IEEE80211_HW_REPORTS_TX_ACK_STATUS | + IEEE80211_HW_MFP_CAPABLE | IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; hw->wiphy->interface_modes = @@ -728,7 +733,8 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv, BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_GO) | - BIT(NL80211_IFTYPE_P2P_CLIENT); + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_MESH_POINT); hw->wiphy->iface_combinations = &if_comb; hw->wiphy->n_iface_combinations = 1; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 0743a47cef8f..eaa94feb4333 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -113,7 +113,9 @@ static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) struct ath9k_htc_priv *priv = data; struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; - if ((vif->type == NL80211_IFTYPE_AP) && bss_conf->enable_beacon) + if ((vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_MESH_POINT) && + bss_conf->enable_beacon) priv->reconfig_beacon = true; if (bss_conf->assoc) { @@ -180,6 +182,8 @@ static void ath9k_htc_set_opmode(struct ath9k_htc_priv *priv) priv->ah->opmode = NL80211_IFTYPE_ADHOC; else if (priv->num_ap_vif) priv->ah->opmode = NL80211_IFTYPE_AP; + else if (priv->num_mbss_vif) + priv->ah->opmode = NL80211_IFTYPE_MESH_POINT; else priv->ah->opmode = NL80211_IFTYPE_STATION; @@ -623,6 +627,8 @@ static void ath9k_htc_setup_rate(struct ath9k_htc_priv *priv, trate->rates.ht_rates.rs_nrates = j; caps = WLAN_RC_HT_FLAG; + if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) + caps |= ATH_RC_TX_STBC_FLAG; if (sta->ht_cap.mcs.rx_mask[1]) caps |= WLAN_RC_DS_FLAG; if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) && @@ -810,8 +816,7 @@ void ath9k_htc_ani_work(struct work_struct *work) } /* Verify whether we must check ANI */ - if (ah->config.enable_ani && - (timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) { + if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) { aniflag = true; common->ani.checkani_timer = timestamp; } @@ -841,8 +846,7 @@ set_timer: * short calibration and long calibration. */ cal_interval = ATH_LONG_CALINTERVAL; - if (ah->config.enable_ani) - cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL); + cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL); if (!common->ani.caldone) cal_interval = min(cal_interval, (u32)short_cal_interval); @@ -1052,6 +1056,9 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw, case NL80211_IFTYPE_AP: hvif.opmode = HTC_M_HOSTAP; break; + case NL80211_IFTYPE_MESH_POINT: + hvif.opmode = HTC_M_WDS; /* close enough */ + break; default: ath_err(common, "Interface type %d not yet supported\n", vif->type); @@ -1084,6 +1091,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw, INC_VIF(priv, vif->type); if ((vif->type == NL80211_IFTYPE_AP) || + (vif->type == NL80211_IFTYPE_MESH_POINT) || (vif->type == NL80211_IFTYPE_ADHOC)) ath9k_htc_assign_bslot(priv, vif); @@ -1134,6 +1142,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw, DEC_VIF(priv, vif->type); if ((vif->type == NL80211_IFTYPE_AP) || + vif->type == NL80211_IFTYPE_MESH_POINT || (vif->type == NL80211_IFTYPE_ADHOC)) ath9k_htc_remove_bslot(priv, vif); @@ -1525,9 +1534,10 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw, if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon) { /* * Disable SWBA interrupt only if there are no - * AP/IBSS interfaces. + * concurrent AP/mesh or IBSS interfaces. */ - if ((priv->num_ap_vif <= 1) || priv->num_ibss_vif) { + if ((priv->num_ap_vif + priv->num_mbss_vif <= 1) || + priv->num_ibss_vif) { ath_dbg(common, CONFIG, "Beacon disabled for BSS: %pM\n", bss_conf->bssid); @@ -1538,12 +1548,15 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_BEACON_INT) { /* - * Reset the HW TSF for the first AP interface. + * Reset the HW TSF for the first AP or mesh interface. */ - if ((priv->ah->opmode == NL80211_IFTYPE_AP) && - (priv->nvifs == 1) && - (priv->num_ap_vif == 1) && - (vif->type == NL80211_IFTYPE_AP)) { + if (priv->nvifs == 1 && + ((priv->ah->opmode == NL80211_IFTYPE_AP && + vif->type == NL80211_IFTYPE_AP && + priv->num_ap_vif == 1) || + (priv->ah->opmode == NL80211_IFTYPE_MESH_POINT && + vif->type == NL80211_IFTYPE_MESH_POINT && + priv->num_mbss_vif == 1))) { set_bit(OP_TSF_RESET, &priv->op_flags); } ath_dbg(common, CONFIG, diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 6bd0e92ea2aa..e602c9519709 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -887,7 +887,7 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv) if (priv->rxfilter & FIF_PSPOLL) rfilt |= ATH9K_RX_FILTER_PSPOLL; - if (priv->nvifs > 1) + if (priv->nvifs > 1 || priv->rxfilter & FIF_OTHER_BSS) rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; return rfilt; diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 15dfefcf2d0f..ca9d9cd14ddb 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -452,7 +452,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah) ah->config.pcie_clock_req = 0; ah->config.pcie_waen = 0; ah->config.analog_shiftreg = 1; - ah->config.enable_ani = true; for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { ah->config.spurchans[i][0] = AR_NO_SPUR; @@ -549,8 +548,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah) ah->eep_ops->get_eeprom_ver(ah), ah->eep_ops->get_eeprom_rev(ah)); - if (ah->config.enable_ani) - ath9k_hw_ani_init(ah); + ath9k_hw_ani_init(ah); return 0; } @@ -1250,10 +1248,10 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode) switch (opmode) { case NL80211_IFTYPE_ADHOC: - case NL80211_IFTYPE_MESH_POINT: set |= AR_STA_ID1_ADHOC; REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); break; + case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_AP: set |= AR_STA_ID1_STA_AP; /* fall through */ @@ -1872,7 +1870,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, ah->caldata = caldata; if (caldata && (chan->channel != caldata->channel || - chan->channelFlags != caldata->channelFlags)) { + chan->channelFlags != caldata->channelFlags || + chan->chanmode != caldata->chanmode)) { /* Operating channel changed, reset channel calibration data */ memset(caldata, 0, sizeof(*caldata)); ath9k_init_nfcal_hist_buffer(ah, chan); @@ -2255,12 +2254,12 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period) switch (ah->opmode) { case NL80211_IFTYPE_ADHOC: - case NL80211_IFTYPE_MESH_POINT: REG_SET_BIT(ah, AR_TXCFG, AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY); REG_WRITE(ah, AR_NEXT_NDP_TIMER, next_beacon + TU_TO_USEC(ah->atim_window ? ah->atim_window : 1)); flags |= AR_NDP_TIMER_EN; + case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_AP: REG_WRITE(ah, AR_NEXT_TBTT_TIMER, next_beacon); REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, next_beacon - @@ -2604,13 +2603,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) pCap->hw_caps |= ATH9K_HW_CAP_RTT; } - if (AR_SREV_9280_20_OR_LATER(ah)) { - pCap->hw_caps |= ATH9K_HW_WOW_DEVICE_CAPABLE | - ATH9K_HW_WOW_PATTERN_MATCH_EXACT; - - if (AR_SREV_9280(ah)) - pCap->hw_caps |= ATH9K_HW_WOW_PATTERN_MATCH_DWORD; - } + if (AR_SREV_9462(ah)) + pCap->hw_caps |= ATH9K_HW_WOW_DEVICE_CAPABLE; if (AR_SREV_9300_20_OR_LATER(ah) && ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index ae3034374bc4..ed7d4fcbdcab 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -246,9 +246,7 @@ enum ath9k_hw_caps { ATH9K_HW_CAP_MCI = BIT(15), ATH9K_HW_CAP_DFS = BIT(16), ATH9K_HW_WOW_DEVICE_CAPABLE = BIT(17), - ATH9K_HW_WOW_PATTERN_MATCH_EXACT = BIT(18), - ATH9K_HW_WOW_PATTERN_MATCH_DWORD = BIT(19), - ATH9K_HW_CAP_PAPRD = BIT(20), + ATH9K_HW_CAP_PAPRD = BIT(18), }; /* @@ -291,7 +289,6 @@ struct ath9k_ops_config { u32 ofdm_trig_high; u32 cck_trig_high; u32 cck_trig_low; - u32 enable_ani; u32 enable_paprd; int serialize_regmode; bool rx_intr_mitigation; @@ -310,6 +307,10 @@ struct ath9k_ops_config { u16 spurchans[AR_EEPROM_MODAL_SPURS][2]; u8 max_txtrig_level; u16 ani_poll_interval; /* ANI poll interval in ms */ + + /* Platform specific config */ + u32 xlna_gpio; + bool xatten_margin_cfg; }; enum ath9k_int { @@ -423,7 +424,6 @@ struct ath9k_hw_cal_data { struct ath9k_channel { struct ieee80211_channel *chan; - struct ar5416AniState ani; u16 channel; u32 channelFlags; u32 chanmode; @@ -854,10 +854,10 @@ struct ath_hw { u32 globaltxtimeout; /* ANI */ - u32 proc_phyerr; u32 aniperiod; enum ath9k_ani_cmd ani_function; u32 ani_skip_count; + struct ar5416AniState ani; #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT struct ath_btcoex_hw btcoex_hw; @@ -882,9 +882,6 @@ struct ath_hw { struct ar5416IniArray iniBank6; struct ar5416IniArray iniAddac; struct ar5416IniArray iniPcieSerdes; -#ifdef CONFIG_PM_SLEEP - struct ar5416IniArray iniPcieSerdesWow; -#endif struct ar5416IniArray iniPcieSerdesLowPower; struct ar5416IniArray iniModesFastClock; struct ar5416IniArray iniAdditional; @@ -1165,8 +1162,6 @@ static inline void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable) } #endif - - #define ATH9K_CLOCK_RATE_CCK 22 #define ATH9K_CLOCK_RATE_5GHZ_OFDM 40 #define ATH9K_CLOCK_RATE_2GHZ_OFDM 44 diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 2ba494567777..7c2ed1cef9b7 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -21,6 +21,7 @@ #include <linux/ath9k_platform.h> #include <linux/module.h> #include <linux/relay.h> +#include <net/ieee80211_radiotap.h> #include "ath9k.h" @@ -431,6 +432,8 @@ static int ath9k_init_queues(struct ath_softc *sc) sc->config.cabqReadytime = ATH_CABQ_READY_TIME; ath_cabq_update(sc); + sc->tx.uapsdq = ath_txq_setup(sc, ATH9K_TX_QUEUE_UAPSD, 0); + for (i = 0; i < IEEE80211_NUM_ACS; i++) { sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i); sc->tx.txq_map[i]->mac80211_qnum = i; @@ -510,6 +513,22 @@ static void ath9k_init_misc(struct ath_softc *sc) sc->spec_config.fft_period = 0xF; } +static void ath9k_init_platform(struct ath_softc *sc) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + + if (common->bus_ops->ath_bus_type != ATH_PCI) + return; + + if (sc->driver_data & ATH9K_PCI_CUS198) { + ah->config.xlna_gpio = 9; + ah->config.xatten_margin_cfg = true; + + ath_info(common, "Set parameters for CUS198\n"); + } +} + static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob, void *ctx) { @@ -602,6 +621,11 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, common->disable_ani = false; /* + * Platform quirks. + */ + ath9k_init_platform(sc); + + /* * Enable Antenna diversity only when BTCOEX is disabled * and the user manually requests the feature. */ @@ -613,9 +637,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, spin_lock_init(&sc->sc_serial_rw); spin_lock_init(&sc->sc_pm_lock); mutex_init(&sc->mutex); -#ifdef CONFIG_ATH9K_MAC_DEBUG - spin_lock_init(&sc->debug.samp_lock); -#endif tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc); tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet, (unsigned long)sc); @@ -755,6 +776,15 @@ static const struct ieee80211_iface_combination if_comb[] = { } }; +#ifdef CONFIG_PM +static const struct wiphy_wowlan_support ath9k_wowlan_support = { + .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT, + .n_patterns = MAX_NUM_USER_PATTERN, + .pattern_min_len = 1, + .pattern_max_len = MAX_PATTERN_SIZE, +}; +#endif + void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) { struct ath_hw *ah = sc->sc_ah; @@ -769,12 +799,19 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_RC_TABLE; - if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) - hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; + if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { + hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; + + if (AR_SREV_9280_20_OR_LATER(ah)) + hw->radiotap_mcs_details |= + IEEE80211_RADIOTAP_MCS_HAVE_STBC; + } if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt) hw->flags |= IEEE80211_HW_MFP_CAPABLE; + hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; + hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_CLIENT) | @@ -794,21 +831,12 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; #ifdef CONFIG_PM_SLEEP - if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) && - device_can_wakeup(sc->dev)) { - - hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | - WIPHY_WOWLAN_DISCONNECT; - hw->wiphy->wowlan.n_patterns = MAX_NUM_USER_PATTERN; - hw->wiphy->wowlan.pattern_min_len = 1; - hw->wiphy->wowlan.pattern_max_len = MAX_PATTERN_SIZE; - - } + device_can_wakeup(sc->dev)) + hw->wiphy->wowlan = &ath9k_wowlan_support; atomic_set(&sc->wow_sleep_proc_intr, -1); atomic_set(&sc->wow_got_bmiss_intr, -1); - #endif hw->queues = 4; diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c index 849259b07370..fff5d3ccc663 100644 --- a/drivers/net/wireless/ath/ath9k/link.c +++ b/drivers/net/wireless/ath/ath9k/link.c @@ -390,9 +390,7 @@ void ath_ani_calibrate(unsigned long data) } /* Verify whether we must check ANI */ - if (sc->sc_ah->config.enable_ani - && (timestamp - common->ani.checkani_timer) >= - ah->config.ani_poll_interval) { + if ((timestamp - common->ani.checkani_timer) >= ah->config.ani_poll_interval) { aniflag = true; common->ani.checkani_timer = timestamp; } @@ -418,7 +416,6 @@ void ath_ani_calibrate(unsigned long data) longcal ? "long" : "", shortcal ? "short" : "", aniflag ? "ani" : "", common->ani.caldone ? "true" : "false"); - ath9k_debug_samp_bb_mac(sc); ath9k_ps_restore(sc); set_timer: @@ -428,9 +425,7 @@ set_timer: * short calibration and long calibration. */ cal_interval = ATH_LONG_CALINTERVAL; - if (sc->sc_ah->config.enable_ani) - cal_interval = min(cal_interval, - (u32)ah->config.ani_poll_interval); + cal_interval = min(cal_interval, (u32)ah->config.ani_poll_interval); if (!common->ani.caldone) cal_interval = min(cal_interval, (u32)short_cal_interval); diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index 566109a40fb3..2ef05ebffbcf 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -547,6 +547,7 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, rs->rs_status = 0; rs->rs_flags = 0; + rs->flag = 0; rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen; rs->rs_tstamp = ads.AR_RcvTimestamp; @@ -586,10 +587,17 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, rs->rs_moreaggr = (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0; rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna); - rs->rs_flags = - (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0; - rs->rs_flags |= - (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0; + + /* directly mapped flags for ieee80211_rx_status */ + rs->flag |= + (ads.ds_rxstatus3 & AR_GI) ? RX_FLAG_SHORT_GI : 0; + rs->flag |= + (ads.ds_rxstatus3 & AR_2040) ? RX_FLAG_40MHZ : 0; + if (AR_SREV_9280_20_OR_LATER(ah)) + rs->flag |= + (ads.ds_rxstatus3 & AR_STBC) ? + /* we can only Nss=1 STBC */ + (1 << RX_FLAG_STBC_SHIFT) : 0; if (ads.ds_rxstatus8 & AR_PreDelimCRCErr) rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE; diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h index 5865f92998e1..b02dfce964b4 100644 --- a/drivers/net/wireless/ath/ath9k/mac.h +++ b/drivers/net/wireless/ath/ath9k/mac.h @@ -149,6 +149,7 @@ struct ath_rx_status { u32 evm2; u32 evm3; u32 evm4; + u32 flag; /* see enum mac80211_rx_flags */ }; struct ath_htc_rx_status { @@ -533,7 +534,8 @@ struct ar5416_desc { #define AR_2040 0x00000002 #define AR_Parallel40 0x00000004 #define AR_Parallel40_S 2 -#define AR_RxStatusRsvd30 0x000000f8 +#define AR_STBC 0x00000008 /* on ar9280 and later */ +#define AR_RxStatusRsvd30 0x000000f0 #define AR_RxAntenna 0xffffff00 #define AR_RxAntenna_S 8 diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 5092ecae7706..1737a3e33685 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -193,7 +193,6 @@ static bool ath_prepare_reset(struct ath_softc *sc) ath_stop_ani(sc); del_timer_sync(&sc->rx_poll_timer); - ath9k_debug_samp_bb_mac(sc); ath9k_hw_disable_interrupts(ah); if (!ath_drain_all_txq(sc)) @@ -1211,13 +1210,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) ath_update_survey_stats(sc); spin_unlock_irqrestore(&common->cc_lock, flags); - /* - * Preserve the current channel values, before updating - * the same channel - */ - if (ah->curchan && (old_pos == pos)) - ath9k_hw_getnf(ah, ah->curchan); - ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos], curchan, channel_type); @@ -1273,7 +1265,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) curchan->center_freq); } else { /* perform spectral scan if requested. */ - if (sc->scanning && + if (test_bit(SC_OP_SCANNING, &sc->sc_flags) && sc->spectral_mode == SPECTRAL_CHANSCAN) ath9k_spectral_scan_trigger(hw); } @@ -1690,7 +1682,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw, bool flush = false; int ret = 0; - local_bh_disable(); + mutex_lock(&sc->mutex); switch (action) { case IEEE80211_AMPDU_RX_START: @@ -1723,7 +1715,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw, ath_err(ath9k_hw_common(sc->sc_ah), "Unknown AMPDU action\n"); } - local_bh_enable(); + mutex_unlock(&sc->mutex); return ret; } @@ -2007,7 +1999,6 @@ static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_capabilities *pcaps = &ah->caps; int pattern_count = 0; int i, byte_cnt; u8 dis_deauth_pattern[MAX_PATTERN_SIZE]; @@ -2077,36 +2068,9 @@ static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc) /* Create Disassociate pattern mask */ - if (pcaps->hw_caps & ATH9K_HW_WOW_PATTERN_MATCH_EXACT) { - - if (pcaps->hw_caps & ATH9K_HW_WOW_PATTERN_MATCH_DWORD) { - /* - * for AR9280, because of hardware limitation, the - * first 4 bytes have to be matched for all patterns. - * the mask for disassociation and de-auth pattern - * matching need to enable the first 4 bytes. - * also the duration field needs to be filled. - */ - dis_deauth_mask[0] = 0xf0; - - /* - * fill in duration field - FIXME: what is the exact value ? - */ - dis_deauth_pattern[2] = 0xff; - dis_deauth_pattern[3] = 0xff; - } else { - dis_deauth_mask[0] = 0xfe; - } - - dis_deauth_mask[1] = 0x03; - dis_deauth_mask[2] = 0xc0; - } else { - dis_deauth_mask[0] = 0xef; - dis_deauth_mask[1] = 0x3f; - dis_deauth_mask[2] = 0x00; - dis_deauth_mask[3] = 0xfc; - } + dis_deauth_mask[0] = 0xfe; + dis_deauth_mask[1] = 0x03; + dis_deauth_mask[2] = 0xc0; ath_dbg(common, WOW, "Adding disassoc/deauth patterns for WoW\n"); @@ -2342,15 +2306,13 @@ static void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled) static void ath9k_sw_scan_start(struct ieee80211_hw *hw) { struct ath_softc *sc = hw->priv; - - sc->scanning = 1; + set_bit(SC_OP_SCANNING, &sc->sc_flags); } static void ath9k_sw_scan_complete(struct ieee80211_hw *hw) { struct ath_softc *sc = hw->priv; - - sc->scanning = 0; + clear_bit(SC_OP_SCANNING, &sc->sc_flags); } struct ieee80211_ops ath9k_ops = { @@ -2378,6 +2340,7 @@ struct ieee80211_ops ath9k_ops = { .flush = ath9k_flush, .tx_frames_pending = ath9k_tx_frames_pending, .tx_last_beacon = ath9k_tx_last_beacon, + .release_buffered_frames = ath9k_release_buffered_frames, .get_stats = ath9k_get_stats, .set_antenna = ath9k_set_antenna, .get_antenna = ath9k_get_antenna, diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 0e0d39583837..4ac00b4b0d25 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -34,6 +34,34 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = { { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */ { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */ { PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */ + + /* PCI-E CUS198 */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_AZWAVE, + 0x2086), + .driver_data = ATH9K_PCI_CUS198 }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_AZWAVE, + 0x1237), + .driver_data = ATH9K_PCI_CUS198 }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_AZWAVE, + 0x2126), + .driver_data = ATH9K_PCI_CUS198 }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_AZWAVE, + 0x2152), + .driver_data = ATH9K_PCI_CUS198 }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_FOXCONN, + 0xE075), + .driver_data = ATH9K_PCI_CUS198 }, + { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */ { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */ { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */ @@ -221,6 +249,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) sc->hw = hw; sc->dev = &pdev->dev; sc->mem = pcim_iomap_table(pdev)[0]; + sc->driver_data = id->driver_data; /* Will be cleared in ath9k_start() */ set_bit(SC_OP_INVALID, &sc->sc_flags); diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 8be2b5d8c155..865e043e8aa6 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -868,10 +868,7 @@ static int ath9k_process_rate(struct ath_common *common, if (rx_stats->rs_rate & 0x80) { /* HT rate */ rxs->flag |= RX_FLAG_HT; - if (rx_stats->rs_flags & ATH9K_RX_2040) - rxs->flag |= RX_FLAG_40MHZ; - if (rx_stats->rs_flags & ATH9K_RX_GI) - rxs->flag |= RX_FLAG_SHORT_GI; + rxs->flag |= rx_stats->flag; rxs->rate_idx = rx_stats->rs_rate & 0x7f; return 0; } @@ -958,11 +955,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, if (rx_stats->rs_more) return 0; - ath9k_process_rssi(common, hw, hdr, rx_stats); - if (ath9k_process_rate(common, hw, rx_stats, rx_status)) return -EINVAL; + ath9k_process_rssi(common, hw, hdr, rx_stats); + rx_status->band = hw->conf.chandef.chan->band; rx_status->freq = hw->conf.chandef.chan->center_freq; rx_status->signal = ah->noise + rx_stats->rs_rssi; diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c index 9f8563091bea..81c88dd606dc 100644 --- a/drivers/net/wireless/ath/ath9k/wow.c +++ b/drivers/net/wireless/ath/ath9k/wow.c @@ -34,17 +34,6 @@ const char *ath9k_hw_wow_event_to_string(u32 wow_event) } EXPORT_SYMBOL(ath9k_hw_wow_event_to_string); -static void ath9k_hw_config_serdes_wow_sleep(struct ath_hw *ah) -{ - int i; - - for (i = 0; i < ah->iniPcieSerdesWow.ia_rows; i++) - REG_WRITE(ah, INI_RA(&ah->iniPcieSerdesWow, i, 0), - INI_RA(&ah->iniPcieSerdesWow, i, 1)); - - usleep_range(1000, 1500); -} - static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); @@ -58,15 +47,8 @@ static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah) ath_err(common, "Failed to stop Rx DMA in 10ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n", REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW)); return; - } else { - if (!AR_SREV_9300_20_OR_LATER(ah)) - REG_WRITE(ah, AR_RXDP, 0x0); } - /* AR9280 WoW has sleep issue, do not set it to sleep */ - if (AR_SREV_9280_20(ah)) - return; - REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT); } @@ -84,27 +66,16 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah) /* set the transmit buffer */ ctl[0] = (KAL_FRAME_LEN | (MAX_RATE_POWER << 16)); - - if (!(AR_SREV_9300_20_OR_LATER(ah))) - ctl[0] += (KAL_ANTENNA_MODE << 25); - ctl[1] = 0; ctl[3] = 0xb; /* OFDM_6M hardware value for this rate */ ctl[4] = 0; ctl[7] = (ah->txchainmask) << 2; - - if (AR_SREV_9300_20_OR_LATER(ah)) - ctl[2] = 0xf << 16; /* tx_tries 0 */ - else - ctl[2] = 0x7 << 16; /* tx_tries 0 */ - + ctl[2] = 0xf << 16; /* tx_tries 0 */ for (i = 0; i < KAL_NUM_DESC_WORDS; i++) REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]); - /* for AR9300 family 13 descriptor words */ - if (AR_SREV_9300_20_OR_LATER(ah)) - REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]); + REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]); data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) | (KAL_TO_DS << 8) | (KAL_DURATION_ID << 16); @@ -183,9 +154,6 @@ void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern, ah->wow_event_mask |= BIT(pattern_count + AR_WOW_PAT_FOUND_SHIFT); - if (!AR_SREV_9285_12_OR_LATER(ah)) - return; - if (pattern_count < 4) { /* Pattern 0-3 uses AR_WOW_LENGTH1 register */ set = (pattern_len & AR_WOW_LENGTH_MAX) << @@ -207,6 +175,7 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah) { u32 wow_status = 0; u32 val = 0, rval; + /* * read the WoW status register to know * the wakeup reason @@ -223,19 +192,14 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah) val &= ah->wow_event_mask; if (val) { - if (val & AR_WOW_MAGIC_PAT_FOUND) wow_status |= AH_WOW_MAGIC_PATTERN_EN; - if (AR_WOW_PATTERN_FOUND(val)) wow_status |= AH_WOW_USER_PATTERN_EN; - if (val & AR_WOW_KEEP_ALIVE_FAIL) wow_status |= AH_WOW_LINK_CHANGE; - if (val & AR_WOW_BEACON_FAIL) wow_status |= AH_WOW_BEACON_MISS; - } /* @@ -255,17 +219,6 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah) AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN))); /* - * tie reset register for AR9002 family of chipsets - * NB: not tieing it back might have some repurcussions. - */ - - if (!AR_SREV_9300_20_OR_LATER(ah)) { - REG_SET_BIT(ah, AR_WA, AR_WA_UNTIE_RESET_EN | - AR_WA_POR_SHORT | AR_WA_RESET_EN); - } - - - /* * restore the beacon threshold to init value */ REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR); @@ -277,8 +230,7 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah) * reset to our Chip's Power On Reset so that any PCI-E * reset from the bus will not reset our chip */ - - if (AR_SREV_9280_20_OR_LATER(ah) && ah->is_pciexpress) + if (ah->is_pciexpress) ath9k_hw_configpcipowersave(ah, false); ah->wow_event_mask = 0; @@ -298,7 +250,6 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable) * are from the 'pattern_enable' in this function and * 'pattern_count' of ath9k_hw_wow_apply_pattern() */ - wow_event_mask = ah->wow_event_mask; /* @@ -306,50 +257,15 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable) * WOW sleep, we do want the Reset from the PCI-E to disturb * our hw state */ - if (ah->is_pciexpress) { - /* * we need to untie the internal POR (power-on-reset) * to the external PCI-E reset. We also need to tie * the PCI-E Phy reset to the PCI-E reset. */ - - if (AR_SREV_9300_20_OR_LATER(ah)) { - set = AR_WA_RESET_EN | AR_WA_POR_SHORT; - clr = AR_WA_UNTIE_RESET_EN | AR_WA_D3_L1_DISABLE; - REG_RMW(ah, AR_WA, set, clr); - } else { - if (AR_SREV_9285(ah) || AR_SREV_9287(ah)) - set = AR9285_WA_DEFAULT; - else - set = AR9280_WA_DEFAULT; - - /* - * In AR9280 and AR9285, bit 14 in WA register - * (disable L1) should only be set when device - * enters D3 state and be cleared when device - * comes back to D0 - */ - - if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE) - set |= AR_WA_D3_L1_DISABLE; - - clr = AR_WA_UNTIE_RESET_EN; - set |= AR_WA_RESET_EN | AR_WA_POR_SHORT; - REG_RMW(ah, AR_WA, set, clr); - - /* - * for WoW sleep, we reprogram the SerDes so that the - * PLL and CLK REQ are both enabled. This uses more - * power but otherwise WoW sleep is unstable and the - * chip may disappear. - */ - - if (AR_SREV_9285_12_OR_LATER(ah)) - ath9k_hw_config_serdes_wow_sleep(ah); - - } + set = AR_WA_RESET_EN | AR_WA_POR_SHORT; + clr = AR_WA_UNTIE_RESET_EN | AR_WA_D3_L1_DISABLE; + REG_RMW(ah, AR_WA, set, clr); } /* @@ -378,7 +294,6 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable) * Program default values for pattern backoff, aifs/slot/KAL count, * beacon miss timeout, KAL timeout, etc. */ - set = AR_WOW_BACK_OFF_SHIFT(AR_WOW_PAT_BACKOFF); REG_SET_BIT(ah, AR_WOW_PATTERN, set); @@ -398,7 +313,7 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable) /* * Keep alive timo in ms except AR9280 */ - if (!pattern_enable || AR_SREV_9280(ah)) + if (!pattern_enable) set = AR_WOW_KEEP_ALIVE_NEVER; else set = KAL_TIMEOUT * 32; @@ -420,7 +335,6 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable) /* * Configure MAC WoW Registers */ - set = 0; /* Send keep alive timeouts anyway */ clr = AR_WOW_KEEP_ALIVE_AUTO_DIS; @@ -430,16 +344,9 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable) else set = AR_WOW_KEEP_ALIVE_FAIL_DIS; - /* - * FIXME: For now disable keep alive frame - * failure. This seems to sometimes trigger - * unnecessary wake up with AR9485 chipsets. - */ set = AR_WOW_KEEP_ALIVE_FAIL_DIS; - REG_RMW(ah, AR_WOW_KEEP_ALIVE, set, clr); - /* * we are relying on a bmiss failure. ensure we have * enough threshold to prevent false positives @@ -473,14 +380,8 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable) set |= AR_WOW_MAC_INTR_EN; REG_RMW(ah, AR_WOW_PATTERN, set, clr); - /* - * For AR9285 and later version of chipsets - * enable WoW pattern match for packets less - * than 256 bytes for all patterns - */ - if (AR_SREV_9285_12_OR_LATER(ah)) - REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B, - AR_WOW_PATTERN_SUPPORTED); + REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B, + AR_WOW_PATTERN_SUPPORTED); /* * Set the power states appropriately and enable PME @@ -488,43 +389,32 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable) clr = 0; set = AR_PMCTRL_PWR_STATE_D1D3 | AR_PMCTRL_HOST_PME_EN | AR_PMCTRL_PWR_PM_CTRL_ENA; - /* - * This is needed for AR9300 chipsets to wake-up - * the host. - */ - if (AR_SREV_9300_20_OR_LATER(ah)) - clr = AR_PCIE_PM_CTRL_ENA; + clr = AR_PCIE_PM_CTRL_ENA; REG_RMW(ah, AR_PCIE_PM_CTRL, set, clr); - if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { - /* - * this is needed to prevent the chip waking up - * the host within 3-4 seconds with certain - * platform/BIOS. The fix is to enable - * D1 & D3 to match original definition and - * also match the OTP value. Anyway this - * is more related to SW WOW. - */ - clr = AR_PMCTRL_PWR_STATE_D1D3; - REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr); - - set = AR_PMCTRL_PWR_STATE_D1D3_REAL; - REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set); - } - + /* + * this is needed to prevent the chip waking up + * the host within 3-4 seconds with certain + * platform/BIOS. The fix is to enable + * D1 & D3 to match original definition and + * also match the OTP value. Anyway this + * is more related to SW WOW. + */ + clr = AR_PMCTRL_PWR_STATE_D1D3; + REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr); + set = AR_PMCTRL_PWR_STATE_D1D3_REAL; + REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set); REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM); - if (AR_SREV_9300_20_OR_LATER(ah)) { - /* to bring down WOW power low margin */ - set = BIT(13); - REG_SET_BIT(ah, AR_PCIE_PHY_REG3, set); - /* HW WoW */ - clr = BIT(5); - REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, clr); - } + /* to bring down WOW power low margin */ + set = BIT(13); + REG_SET_BIT(ah, AR_PCIE_PHY_REG3, set); + /* HW WoW */ + clr = BIT(5); + REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, clr); ath9k_hw_set_powermode_wow_sleep(ah); ah->wow_event_mask = wow_event_mask; diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 1c9b1bac8b0d..7e19d9b5214e 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -518,6 +518,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, !txfail); } else { + if (tx_info->flags & IEEE80211_TX_STATUS_EOSP) { + tx_info->flags &= ~IEEE80211_TX_STATUS_EOSP; + ieee80211_sta_eosp(sta); + } /* retry the un-acked ones */ if (bf->bf_next == NULL && bf_last->bf_stale) { struct ath_buf *tbf; @@ -786,25 +790,20 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, return ndelim; } -static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, - struct ath_txq *txq, - struct ath_atx_tid *tid, - struct list_head *bf_q, - int *aggr_len) +static struct ath_buf * +ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq, + struct ath_atx_tid *tid) { -#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) - struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL; - int rl = 0, nframes = 0, ndelim, prev_al = 0; - u16 aggr_limit = 0, al = 0, bpad = 0, - al_delta, h_baw = tid->baw_size / 2; - enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; - struct ieee80211_tx_info *tx_info; struct ath_frame_info *fi; struct sk_buff *skb; + struct ath_buf *bf; u16 seqno; - do { + while (1) { skb = skb_peek(&tid->buf_q); + if (!skb) + break; + fi = get_frame_info(skb); bf = fi->bf; if (!fi->bf) @@ -820,10 +819,8 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, seqno = bf->bf_state.seqno; /* do not step over block-ack window */ - if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) { - status = ATH_AGGR_BAW_CLOSED; + if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) break; - } if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) { struct ath_tx_status ts = {}; @@ -837,6 +834,40 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, continue; } + bf->bf_next = NULL; + bf->bf_lastbf = bf; + return bf; + } + + return NULL; +} + +static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, + struct ath_txq *txq, + struct ath_atx_tid *tid, + struct list_head *bf_q, + int *aggr_len) +{ +#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) + struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL; + int rl = 0, nframes = 0, ndelim, prev_al = 0; + u16 aggr_limit = 0, al = 0, bpad = 0, + al_delta, h_baw = tid->baw_size / 2; + enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; + struct ieee80211_tx_info *tx_info; + struct ath_frame_info *fi; + struct sk_buff *skb; + + do { + bf = ath_tx_get_tid_subframe(sc, txq, tid); + if (!bf) { + status = ATH_AGGR_BAW_CLOSED; + break; + } + + skb = bf->bf_mpdu; + fi = get_frame_info(skb); + if (!bf_first) bf_first = bf; @@ -882,7 +913,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, /* link buffers of this frame to the aggregate */ if (!fi->retries) - ath_tx_addto_baw(sc, tid, seqno); + ath_tx_addto_baw(sc, tid, bf->bf_state.seqno); bf->bf_state.ndelim = ndelim; __skb_unlink(skb, &tid->buf_q); @@ -1090,10 +1121,8 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, struct ath_txq *txq, int len) { struct ath_hw *ah = sc->sc_ah; - struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); - struct ath_buf *bf_first = bf; + struct ath_buf *bf_first = NULL; struct ath_tx_info info; - bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR); memset(&info, 0, sizeof(info)); info.is_first = true; @@ -1101,24 +1130,11 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, info.txpower = MAX_RATE_POWER; info.qcu = txq->axq_qnum; - info.flags = ATH9K_TXDESC_INTREQ; - if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) - info.flags |= ATH9K_TXDESC_NOACK; - if (tx_info->flags & IEEE80211_TX_CTL_LDPC) - info.flags |= ATH9K_TXDESC_LDPC; - - ath_buf_set_rate(sc, bf, &info, len); - - if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) - info.flags |= ATH9K_TXDESC_CLRDMASK; - - if (bf->bf_state.bfs_paprd) - info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S; - - while (bf) { struct sk_buff *skb = bf->bf_mpdu; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ath_frame_info *fi = get_frame_info(skb); + bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR); info.type = get_hw_packet_type(skb); if (bf->bf_next) @@ -1126,6 +1142,26 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, else info.link = 0; + if (!bf_first) { + bf_first = bf; + + info.flags = ATH9K_TXDESC_INTREQ; + if ((tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) || + txq == sc->tx.uapsdq) + info.flags |= ATH9K_TXDESC_CLRDMASK; + + if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) + info.flags |= ATH9K_TXDESC_NOACK; + if (tx_info->flags & IEEE80211_TX_CTL_LDPC) + info.flags |= ATH9K_TXDESC_LDPC; + + if (bf->bf_state.bfs_paprd) + info.flags |= (u32) bf->bf_state.bfs_paprd << + ATH9K_TXDESC_PAPRD_S; + + ath_buf_set_rate(sc, bf, &info, len); + } + info.buf_addr[0] = bf->bf_buf_addr; info.buf_len[0] = skb->len; info.pkt_len = fi->framelen; @@ -1135,7 +1171,7 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, if (aggr) { if (bf == bf_first) info.aggr = AGGR_BUF_FIRST; - else if (!bf->bf_next) + else if (bf == bf_first->bf_lastbf) info.aggr = AGGR_BUF_LAST; else info.aggr = AGGR_BUF_MIDDLE; @@ -1144,6 +1180,9 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, info.aggr_len = len; } + if (bf == bf_first->bf_lastbf) + bf_first = NULL; + ath9k_hw_set_txdesc(ah, bf->bf_desc, &info); bf = bf->bf_next; } @@ -1328,6 +1367,70 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, ath_txq_unlock_complete(sc, txq); } +void ath9k_release_buffered_frames(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + u16 tids, int nframes, + enum ieee80211_frame_release_type reason, + bool more_data) +{ + struct ath_softc *sc = hw->priv; + struct ath_node *an = (struct ath_node *)sta->drv_priv; + struct ath_txq *txq = sc->tx.uapsdq; + struct ieee80211_tx_info *info; + struct list_head bf_q; + struct ath_buf *bf_tail = NULL, *bf; + int sent = 0; + int i; + + INIT_LIST_HEAD(&bf_q); + for (i = 0; tids && nframes; i++, tids >>= 1) { + struct ath_atx_tid *tid; + + if (!(tids & 1)) + continue; + + tid = ATH_AN_2_TID(an, i); + if (tid->paused) + continue; + + ath_txq_lock(sc, tid->ac->txq); + while (!skb_queue_empty(&tid->buf_q) && nframes > 0) { + bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid); + if (!bf) + break; + + __skb_unlink(bf->bf_mpdu, &tid->buf_q); + list_add_tail(&bf->list, &bf_q); + ath_set_rates(tid->an->vif, tid->an->sta, bf); + ath_tx_addto_baw(sc, tid, bf->bf_state.seqno); + bf->bf_state.bf_type &= ~BUF_AGGR; + if (bf_tail) + bf_tail->bf_next = bf; + + bf_tail = bf; + nframes--; + sent++; + TX_STAT_INC(txq->axq_qnum, a_queued_hw); + + if (skb_queue_empty(&tid->buf_q)) + ieee80211_sta_set_buffered(an->sta, i, false); + } + ath_txq_unlock_complete(sc, tid->ac->txq); + } + + if (list_empty(&bf_q)) + return; + + info = IEEE80211_SKB_CB(bf_tail->bf_mpdu); + info->flags |= IEEE80211_TX_STATUS_EOSP; + + bf = list_first_entry(&bf_q, struct ath_buf, list); + ath_txq_lock(sc, txq); + ath_tx_fill_desc(sc, bf, txq, 0); + ath_tx_txqaddbuf(sc, txq, &bf_q, false); + ath_txq_unlock(sc, txq); +} + /********************/ /* Queue Management */ /********************/ @@ -1681,8 +1784,9 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, } } -static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, - struct sk_buff *skb, struct ath_tx_control *txctl) +static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_txq *txq, + struct ath_atx_tid *tid, struct sk_buff *skb, + struct ath_tx_control *txctl) { struct ath_frame_info *fi = get_frame_info(skb); struct list_head bf_head; @@ -1695,21 +1799,22 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, * - seqno is not within block-ack window * - h/w queue depth exceeds low water mark */ - if (!skb_queue_empty(&tid->buf_q) || tid->paused || - !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) || - txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) { + if ((!skb_queue_empty(&tid->buf_q) || tid->paused || + !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) || + txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) && + txq != sc->tx.uapsdq) { /* * Add this frame to software queue for scheduling later * for aggregation. */ - TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw); + TX_STAT_INC(txq->axq_qnum, a_queued_sw); __skb_queue_tail(&tid->buf_q, skb); if (!txctl->an || !txctl->an->sleeping) - ath_tx_queue_tid(txctl->txq, tid); + ath_tx_queue_tid(txq, tid); return; } - bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); + bf = ath_tx_setup_buffer(sc, txq, tid, skb); if (!bf) { ieee80211_free_txskb(sc->hw, skb); return; @@ -1724,10 +1829,10 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, ath_tx_addto_baw(sc, tid, bf->bf_state.seqno); /* Queue to h/w without aggregation */ - TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw); + TX_STAT_INC(txq->axq_qnum, a_queued_hw); bf->bf_lastbf = bf; - ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen); - ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false); + ath_tx_fill_desc(sc, bf, txq, fi->framelen); + ath_tx_txqaddbuf(sc, txq, &bf_head, false); } static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, @@ -1865,22 +1970,16 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, return bf; } -/* Upon failure caller should free skb */ -int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, - struct ath_tx_control *txctl) +static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ath_tx_control *txctl) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_sta *sta = txctl->sta; struct ieee80211_vif *vif = info->control.vif; struct ath_softc *sc = hw->priv; - struct ath_txq *txq = txctl->txq; - struct ath_atx_tid *tid = NULL; - struct ath_buf *bf; - int padpos, padsize; int frmlen = skb->len + FCS_LEN; - u8 tidno; - int q; + int padpos, padsize; /* NOTE: sta can be NULL according to net/mac80211.h */ if (sta) @@ -1901,6 +2000,11 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); } + if ((vif && vif->type != NL80211_IFTYPE_AP && + vif->type != NL80211_IFTYPE_AP_VLAN) || + !ieee80211_is_data(hdr->frame_control)) + info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; + /* Add the padding after the header if this is not already done */ padpos = ieee80211_hdrlen(hdr->frame_control); padsize = padpos & 3; @@ -1910,16 +2014,34 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, skb_push(skb, padsize); memmove(skb->data, skb->data + padsize, padpos); - hdr = (struct ieee80211_hdr *) skb->data; } - if ((vif && vif->type != NL80211_IFTYPE_AP && - vif->type != NL80211_IFTYPE_AP_VLAN) || - !ieee80211_is_data(hdr->frame_control)) - info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; - setup_frame_info(hw, sta, skb, frmlen); + return 0; +} + +/* Upon failure caller should free skb */ +int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ath_tx_control *txctl) +{ + struct ieee80211_hdr *hdr; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_sta *sta = txctl->sta; + struct ieee80211_vif *vif = info->control.vif; + struct ath_softc *sc = hw->priv; + struct ath_txq *txq = txctl->txq; + struct ath_atx_tid *tid = NULL; + struct ath_buf *bf; + u8 tidno; + int q; + int ret; + + ret = ath_tx_prepare(hw, skb, txctl); + if (ret) + return ret; + + hdr = (struct ieee80211_hdr *) skb->data; /* * At this point, the vif, hw_key and sta pointers in the tx control * info are no longer valid (overwritten by the ath_frame_info data. @@ -1935,6 +2057,12 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, txq->stopped = true; } + if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) { + ath_txq_unlock(sc, txq); + txq = sc->tx.uapsdq; + ath_txq_lock(sc, txq); + } + if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) { tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; @@ -1948,11 +2076,11 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, * Try aggregation if it's a unicast data frame * and the destination is HT capable. */ - ath_tx_send_ampdu(sc, tid, skb, txctl); + ath_tx_send_ampdu(sc, txq, tid, skb, txctl); goto out; } - bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); + bf = ath_tx_setup_buffer(sc, txq, tid, skb); if (!bf) { if (txctl->paprd) dev_kfree_skb_any(skb); @@ -1967,7 +2095,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, bf->bf_state.bfs_paprd_timestamp = jiffies; ath_set_rates(vif, sta, bf); - ath_tx_send_normal(sc, txctl->txq, tid, skb); + ath_tx_send_normal(sc, txq, tid, skb); out: ath_txq_unlock(sc, txq); @@ -1975,6 +2103,74 @@ out: return 0; } +void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct sk_buff *skb) +{ + struct ath_softc *sc = hw->priv; + struct ath_tx_control txctl = { + .txq = sc->beacon.cabq + }; + struct ath_tx_info info = {}; + struct ieee80211_hdr *hdr; + struct ath_buf *bf_tail = NULL; + struct ath_buf *bf; + LIST_HEAD(bf_q); + int duration = 0; + int max_duration; + + max_duration = + sc->cur_beacon_conf.beacon_interval * 1000 * + sc->cur_beacon_conf.dtim_period / ATH_BCBUF; + + do { + struct ath_frame_info *fi = get_frame_info(skb); + + if (ath_tx_prepare(hw, skb, &txctl)) + break; + + bf = ath_tx_setup_buffer(sc, txctl.txq, NULL, skb); + if (!bf) + break; + + bf->bf_lastbf = bf; + ath_set_rates(vif, NULL, bf); + ath_buf_set_rate(sc, bf, &info, fi->framelen); + duration += info.rates[0].PktDuration; + if (bf_tail) + bf_tail->bf_next = bf; + + list_add_tail(&bf->list, &bf_q); + bf_tail = bf; + skb = NULL; + + if (duration > max_duration) + break; + + skb = ieee80211_get_buffered_bc(hw, vif); + } while(skb); + + if (skb) + ieee80211_free_txskb(hw, skb); + + if (list_empty(&bf_q)) + return; + + bf = list_first_entry(&bf_q, struct ath_buf, list); + hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data; + + if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) { + hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA; + dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, + sizeof(*hdr), DMA_TO_DEVICE); + } + + ath_txq_lock(sc, txctl.txq); + ath_tx_fill_desc(sc, bf, txctl.txq, 0); + ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false); + TX_STAT_INC(txctl.txq->axq_qnum, queued); + ath_txq_unlock(sc, txctl.txq); +} + /*****************/ /* TX Completion */ /*****************/ @@ -2020,7 +2216,12 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, } spin_unlock_irqrestore(&sc->sc_pm_lock, flags); + __skb_queue_tail(&txq->complete_q, skb); + q = skb_get_queue_mapping(skb); + if (txq == sc->tx.uapsdq) + txq = sc->tx.txq_map[q]; + if (txq == sc->tx.txq_map[q]) { if (WARN_ON(--txq->pending_frames < 0)) txq->pending_frames = 0; @@ -2031,8 +2232,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, txq->stopped = false; } } - - __skb_queue_tail(&txq->complete_q, skb); } static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h index 9dce106cd6d4..8596aba34f96 100644 --- a/drivers/net/wireless/ath/carl9170/carl9170.h +++ b/drivers/net/wireless/ath/carl9170/carl9170.h @@ -133,6 +133,9 @@ struct carl9170_sta_tid { /* Preaggregation reorder queue */ struct sk_buff_head queue; + + struct ieee80211_sta *sta; + struct ieee80211_vif *vif; }; #define CARL9170_QUEUE_TIMEOUT 256 diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index e9010a481dfd..4a33c6e39ca2 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -1448,6 +1448,8 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw, tid_info->state = CARL9170_TID_STATE_PROGRESS; tid_info->tid = tid; tid_info->max = sta_info->ampdu_max_len; + tid_info->sta = sta; + tid_info->vif = vif; INIT_LIST_HEAD(&tid_info->list); INIT_LIST_HEAD(&tid_info->tmp_list); @@ -1857,6 +1859,7 @@ void *carl9170_alloc(size_t priv_size) IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_PS_NULLFUNC_STACK | IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | + IEEE80211_HW_SUPPORTS_RC_TABLE | IEEE80211_HW_SIGNAL_DBM; if (!modparam_noht) { diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c index c61cafa2665b..e3f696ee4d23 100644 --- a/drivers/net/wireless/ath/carl9170/tx.c +++ b/drivers/net/wireless/ath/carl9170/tx.c @@ -625,7 +625,7 @@ static void carl9170_tx_ampdu_timeout(struct ar9170 *ar) msecs_to_jiffies(CARL9170_QUEUE_TIMEOUT))) goto unlock; - sta = __carl9170_get_tx_sta(ar, skb); + sta = iter->sta; if (WARN_ON(!sta)) goto unlock; @@ -866,6 +866,93 @@ static bool carl9170_tx_cts_check(struct ar9170 *ar, return false; } +static void carl9170_tx_get_rates(struct ar9170 *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info; + + BUILD_BUG_ON(IEEE80211_TX_MAX_RATES < CARL9170_TX_MAX_RATES); + BUILD_BUG_ON(IEEE80211_TX_MAX_RATES > IEEE80211_TX_RATE_TABLE_SIZE); + + info = IEEE80211_SKB_CB(skb); + + ieee80211_get_tx_rates(vif, sta, skb, + info->control.rates, + IEEE80211_TX_MAX_RATES); +} + +static void carl9170_tx_apply_rateset(struct ar9170 *ar, + struct ieee80211_tx_info *sinfo, + struct sk_buff *skb) +{ + struct ieee80211_tx_rate *txrate; + struct ieee80211_tx_info *info; + struct _carl9170_tx_superframe *txc = (void *) skb->data; + int i; + bool ampdu; + bool no_ack; + + info = IEEE80211_SKB_CB(skb); + ampdu = !!(info->flags & IEEE80211_TX_CTL_AMPDU); + no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK); + + /* Set the rate control probe flag for all (sub-) frames. + * This is because the TX_STATS_AMPDU flag is only set on + * the last frame, so it has to be inherited. + */ + info->flags |= (sinfo->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); + + /* NOTE: For the first rate, the ERP & AMPDU flags are directly + * taken from mac_control. For all fallback rate, the firmware + * updates the mac_control flags from the rate info field. + */ + for (i = 0; i < CARL9170_TX_MAX_RATES; i++) { + __le32 phy_set; + + txrate = &sinfo->control.rates[i]; + if (txrate->idx < 0) + break; + + phy_set = carl9170_tx_physet(ar, info, txrate); + if (i == 0) { + __le16 mac_tmp = cpu_to_le16(0); + + /* first rate - part of the hw's frame header */ + txc->f.phy_control = phy_set; + + if (ampdu && txrate->flags & IEEE80211_TX_RC_MCS) + mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR); + + if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack)) + mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS); + else if (carl9170_tx_cts_check(ar, txrate)) + mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS); + + txc->f.mac_control |= mac_tmp; + } else { + /* fallback rates are stored in the firmware's + * retry rate set array. + */ + txc->s.rr[i - 1] = phy_set; + } + + SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i], + txrate->count); + + if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack)) + txc->s.ri[i] |= (AR9170_TX_MAC_PROT_RTS << + CARL9170_TX_SUPER_RI_ERP_PROT_S); + else if (carl9170_tx_cts_check(ar, txrate)) + txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS << + CARL9170_TX_SUPER_RI_ERP_PROT_S); + + if (ampdu && (txrate->flags & IEEE80211_TX_RC_MCS)) + txc->s.ri[i] |= CARL9170_TX_SUPER_RI_AMPDU; + } +} + static int carl9170_tx_prepare(struct ar9170 *ar, struct ieee80211_sta *sta, struct sk_buff *skb) @@ -874,13 +961,10 @@ static int carl9170_tx_prepare(struct ar9170 *ar, struct _carl9170_tx_superframe *txc; struct carl9170_vif_info *cvif; struct ieee80211_tx_info *info; - struct ieee80211_tx_rate *txrate; struct carl9170_tx_info *arinfo; unsigned int hw_queue; - int i; __le16 mac_tmp; u16 len; - bool ampdu, no_ack; BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data)); BUILD_BUG_ON(sizeof(struct _carl9170_tx_superdesc) != @@ -889,8 +973,6 @@ static int carl9170_tx_prepare(struct ar9170 *ar, BUILD_BUG_ON(sizeof(struct _ar9170_tx_hwdesc) != AR9170_TX_HWDESC_LEN); - BUILD_BUG_ON(IEEE80211_TX_MAX_RATES < CARL9170_TX_MAX_RATES); - BUILD_BUG_ON(AR9170_MAX_VIRTUAL_MAC > ((CARL9170_TX_SUPER_MISC_VIF_ID >> CARL9170_TX_SUPER_MISC_VIF_ID_S) + 1)); @@ -932,8 +1014,7 @@ static int carl9170_tx_prepare(struct ar9170 *ar, mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) & AR9170_TX_MAC_QOS); - no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK); - if (unlikely(no_ack)) + if (unlikely(info->flags & IEEE80211_TX_CTL_NO_ACK)) mac_tmp |= cpu_to_le16(AR9170_TX_MAC_NO_ACK); if (info->control.hw_key) { @@ -954,8 +1035,7 @@ static int carl9170_tx_prepare(struct ar9170 *ar, } } - ampdu = !!(info->flags & IEEE80211_TX_CTL_AMPDU); - if (ampdu) { + if (info->flags & IEEE80211_TX_CTL_AMPDU) { unsigned int density, factor; if (unlikely(!sta || !cvif)) @@ -982,50 +1062,6 @@ static int carl9170_tx_prepare(struct ar9170 *ar, txc->s.ampdu_settings, factor); } - /* - * NOTE: For the first rate, the ERP & AMPDU flags are directly - * taken from mac_control. For all fallback rate, the firmware - * updates the mac_control flags from the rate info field. - */ - for (i = 0; i < CARL9170_TX_MAX_RATES; i++) { - __le32 phy_set; - txrate = &info->control.rates[i]; - if (txrate->idx < 0) - break; - - phy_set = carl9170_tx_physet(ar, info, txrate); - if (i == 0) { - /* first rate - part of the hw's frame header */ - txc->f.phy_control = phy_set; - - if (ampdu && txrate->flags & IEEE80211_TX_RC_MCS) - mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR); - if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack)) - mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS); - else if (carl9170_tx_cts_check(ar, txrate)) - mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS); - - } else { - /* fallback rates are stored in the firmware's - * retry rate set array. - */ - txc->s.rr[i - 1] = phy_set; - } - - SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i], - txrate->count); - - if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack)) - txc->s.ri[i] |= (AR9170_TX_MAC_PROT_RTS << - CARL9170_TX_SUPER_RI_ERP_PROT_S); - else if (carl9170_tx_cts_check(ar, txrate)) - txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS << - CARL9170_TX_SUPER_RI_ERP_PROT_S); - - if (ampdu && (txrate->flags & IEEE80211_TX_RC_MCS)) - txc->s.ri[i] |= CARL9170_TX_SUPER_RI_AMPDU; - } - txc->s.len = cpu_to_le16(skb->len); txc->f.length = cpu_to_le16(len + FCS_LEN); txc->f.mac_control = mac_tmp; @@ -1086,31 +1122,12 @@ static void carl9170_set_ampdu_params(struct ar9170 *ar, struct sk_buff *skb) } } -static bool carl9170_tx_rate_check(struct ar9170 *ar, struct sk_buff *_dest, - struct sk_buff *_src) -{ - struct _carl9170_tx_superframe *dest, *src; - - dest = (void *) _dest->data; - src = (void *) _src->data; - - /* - * The mac80211 rate control algorithm expects that all MPDUs in - * an AMPDU share the same tx vectors. - * This is not really obvious right now, because the hardware - * does the AMPDU setup according to its own rulebook. - * Our nicely assembled, strictly monotonic increasing mpdu - * chains will be broken up, mashed back together... - */ - - return (dest->f.phy_control == src->f.phy_control); -} - static void carl9170_tx_ampdu(struct ar9170 *ar) { struct sk_buff_head agg; struct carl9170_sta_tid *tid_info; struct sk_buff *skb, *first; + struct ieee80211_tx_info *tx_info_first; unsigned int i = 0, done_ampdus = 0; u16 seq, queue, tmpssn; @@ -1156,6 +1173,7 @@ retry: goto processed; } + tx_info_first = NULL; while ((skb = skb_peek(&tid_info->queue))) { /* strict 0, 1, ..., n - 1, n frame sequence order */ if (unlikely(carl9170_get_seq(skb) != seq)) @@ -1166,8 +1184,13 @@ retry: (tid_info->max - 1))) break; - if (!carl9170_tx_rate_check(ar, skb, first)) - break; + if (!tx_info_first) { + carl9170_tx_get_rates(ar, tid_info->vif, + tid_info->sta, first); + tx_info_first = IEEE80211_SKB_CB(first); + } + + carl9170_tx_apply_rateset(ar, tx_info_first, skb); atomic_inc(&ar->tx_ampdu_upload); tid_info->snx = seq = SEQ_NEXT(seq); @@ -1182,8 +1205,7 @@ retry: if (skb_queue_empty(&tid_info->queue) || carl9170_get_seq(skb_peek(&tid_info->queue)) != tid_info->snx) { - /* - * stop TID, if A-MPDU frames are still missing, + /* stop TID, if A-MPDU frames are still missing, * or whenever the queue is empty. */ @@ -1450,12 +1472,14 @@ void carl9170_op_tx(struct ieee80211_hw *hw, struct ar9170 *ar = hw->priv; struct ieee80211_tx_info *info; struct ieee80211_sta *sta = control->sta; + struct ieee80211_vif *vif; bool run; if (unlikely(!IS_STARTED(ar))) goto err_free; info = IEEE80211_SKB_CB(skb); + vif = info->control.vif; if (unlikely(carl9170_tx_prepare(ar, sta, skb))) goto err_free; @@ -1486,6 +1510,8 @@ void carl9170_op_tx(struct ieee80211_hw *hw, } else { unsigned int queue = skb_get_queue_mapping(skb); + carl9170_tx_get_rates(ar, vif, sta, skb); + carl9170_tx_apply_rateset(ar, info, skb); skb_queue_tail(&ar->tx_pending[queue], skb); } diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c index ccc4c718f124..7d077c752dd5 100644 --- a/drivers/net/wireless/ath/regd.c +++ b/drivers/net/wireless/ath/regd.c @@ -42,11 +42,11 @@ static int __ath_regd_init(struct ath_regulatory *reg); NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_OFDM) /* We allow IBSS on these on a case by case basis by regulatory domain */ -#define ATH9K_5GHZ_5150_5350 REG_RULE(5150-10, 5350+10, 40, 0, 30,\ +#define ATH9K_5GHZ_5150_5350 REG_RULE(5150-10, 5350+10, 80, 0, 30,\ NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS) -#define ATH9K_5GHZ_5470_5850 REG_RULE(5470-10, 5850+10, 40, 0, 30,\ +#define ATH9K_5GHZ_5470_5850 REG_RULE(5470-10, 5850+10, 80, 0, 30,\ NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS) -#define ATH9K_5GHZ_5725_5850 REG_RULE(5725-10, 5850+10, 40, 0, 30,\ +#define ATH9K_5GHZ_5725_5850 REG_RULE(5725-10, 5850+10, 80, 0, 30,\ NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS) #define ATH9K_2GHZ_ALL ATH9K_2GHZ_CH01_11, \ diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig index bac3d98a0cfb..ce8c0381825e 100644 --- a/drivers/net/wireless/ath/wil6210/Kconfig +++ b/drivers/net/wireless/ath/wil6210/Kconfig @@ -27,3 +27,15 @@ config WIL6210_ISR_COR self-clear when accessed for debug purposes, it makes such monitoring impossible. Say y unless you debug interrupts + +config WIL6210_TRACING + bool "wil6210 tracing support" + depends on WIL6210 + depends on EVENT_TRACING + default y + ---help--- + Say Y here to enable tracepoints for the wil6210 driver + using the kernel tracing infrastructure. Select this + option if you are interested in debugging the driver. + + If unsure, say Y to make it easier to debug problems. diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile index d288eea0a26a..f891d514d881 100644 --- a/drivers/net/wireless/ath/wil6210/Makefile +++ b/drivers/net/wireless/ath/wil6210/Makefile @@ -1,15 +1,20 @@ obj-$(CONFIG_WIL6210) += wil6210.o -wil6210-objs := main.o -wil6210-objs += netdev.o -wil6210-objs += cfg80211.o -wil6210-objs += pcie_bus.o -wil6210-objs += debugfs.o -wil6210-objs += wmi.o -wil6210-objs += interrupt.o -wil6210-objs += txrx.o +wil6210-y := main.o +wil6210-y += netdev.o +wil6210-y += cfg80211.o +wil6210-y += pcie_bus.o +wil6210-y += debugfs.o +wil6210-y += wmi.o +wil6210-y += interrupt.o +wil6210-y += txrx.o +wil6210-y += debug.o +wil6210-$(CONFIG_WIL6210_TRACING) += trace.o ifeq (, $(findstring -W,$(EXTRA_CFLAGS))) subdir-ccflags-y += -Werror endif +# for tracing framework to find trace.h +CFLAGS_trace.o := -I$(src) + subdir-ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index c5d4a87abaaf..61c302a6bdea 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -322,12 +322,16 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, * FW don't support scan after connection attempt */ set_bit(wil_status_dontscan, &wil->status); + set_bit(wil_status_fwconnecting, &wil->status); rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn)); if (rc == 0) { /* Connect can take lots of time */ mod_timer(&wil->connect_timer, jiffies + msecs_to_jiffies(2000)); + } else { + clear_bit(wil_status_dontscan, &wil->status); + clear_bit(wil_status_fwconnecting, &wil->status); } out: @@ -398,6 +402,30 @@ static int wil_cfg80211_set_default_key(struct wiphy *wiphy, return 0; } +static int wil_fix_bcon(struct wil6210_priv *wil, + struct cfg80211_beacon_data *bcon) +{ + struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp; + size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); + int rc = 0; + + if (bcon->probe_resp_len <= hlen) + return 0; + + if (!bcon->proberesp_ies) { + bcon->proberesp_ies = f->u.probe_resp.variable; + bcon->proberesp_ies_len = bcon->probe_resp_len - hlen; + rc = 1; + } + if (!bcon->assocresp_ies) { + bcon->assocresp_ies = f->u.probe_resp.variable; + bcon->assocresp_ies_len = bcon->probe_resp_len - hlen; + rc = 1; + } + + return rc; +} + static int wil_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_ap_settings *info) @@ -419,10 +447,18 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET, info->ssid, info->ssid_len); + if (wil_fix_bcon(wil, bcon)) + wil_dbg_misc(wil, "Fixed bcon\n"); + rc = wil_reset(wil); if (rc) return rc; + /* Rx VRING. */ + rc = wil_rx_init(wil); + if (rc) + return rc; + rc = wmi_set_ssid(wil, info->ssid_len, info->ssid); if (rc) return rc; @@ -451,8 +487,6 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, if (rc) return rc; - /* Rx VRING. After MAC and beacon */ - rc = wil_rx_init(wil); netif_carrier_on(ndev); diff --git a/drivers/net/wireless/ath/wil6210/debug.c b/drivers/net/wireless/ath/wil6210/debug.c new file mode 100644 index 000000000000..9eeabf4a5879 --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/debug.c @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wil6210.h" +#include "trace.h" + +int wil_err(struct wil6210_priv *wil, const char *fmt, ...) +{ + struct net_device *ndev = wil_to_ndev(wil); + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + int ret; + + va_start(args, fmt); + vaf.va = &args; + ret = netdev_err(ndev, "%pV", &vaf); + trace_wil6210_log_err(&vaf); + va_end(args); + + return ret; +} + +int wil_info(struct wil6210_priv *wil, const char *fmt, ...) +{ + struct net_device *ndev = wil_to_ndev(wil); + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + int ret; + + va_start(args, fmt); + vaf.va = &args; + ret = netdev_info(ndev, "%pV", &vaf); + trace_wil6210_log_info(&vaf); + va_end(args); + + return ret; +} + +int wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + + va_start(args, fmt); + vaf.va = &args; + trace_wil6210_log_dbg(&vaf); + va_end(args); + + return 0; +} diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 727b1f53e6ad..e8308ec30970 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -418,9 +418,15 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data) if (skb) { unsigned char printbuf[16 * 3 + 2]; int i = 0; - int len = skb_headlen(skb); + int len = le16_to_cpu(d->dma.length); void *p = skb->data; + if (len != skb_headlen(skb)) { + seq_printf(s, "!!! len: desc = %d skb = %d\n", + len, skb_headlen(skb)); + len = min_t(int, len, skb_headlen(skb)); + } + seq_printf(s, " len = %d\n", len); while (i < len) { diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index e3c1e7684f9c..8205d3e4ab66 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -17,6 +17,7 @@ #include <linux/interrupt.h> #include "wil6210.h" +#include "trace.h" /** * Theory of operation: @@ -103,14 +104,14 @@ static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil) clear_bit(wil_status_irqen, &wil->status); } -static void wil6210_unmask_irq_tx(struct wil6210_priv *wil) +void wil6210_unmask_irq_tx(struct wil6210_priv *wil) { iowrite32(WIL6210_IMC_TX, wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) + offsetof(struct RGF_ICR, IMC)); } -static void wil6210_unmask_irq_rx(struct wil6210_priv *wil) +void wil6210_unmask_irq_rx(struct wil6210_priv *wil) { iowrite32(WIL6210_IMC_RX, wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) + @@ -168,6 +169,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie) HOSTADDR(RGF_DMA_EP_RX_ICR) + offsetof(struct RGF_ICR, ICR)); + trace_wil6210_irq_rx(isr); wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr); if (!isr) { @@ -180,13 +182,14 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie) if (isr & BIT_DMA_EP_RX_ICR_RX_DONE) { wil_dbg_irq(wil, "RX done\n"); isr &= ~BIT_DMA_EP_RX_ICR_RX_DONE; - wil_rx_handle(wil); + wil_dbg_txrx(wil, "NAPI schedule\n"); + napi_schedule(&wil->napi_rx); } if (isr) wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr); - wil6210_unmask_irq_rx(wil); + /* Rx IRQ will be enabled when NAPI processing finished */ return IRQ_HANDLED; } @@ -198,6 +201,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie) HOSTADDR(RGF_DMA_EP_TX_ICR) + offsetof(struct RGF_ICR, ICR)); + trace_wil6210_irq_tx(isr); wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr); if (!isr) { @@ -208,23 +212,17 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie) wil6210_mask_irq_tx(wil); if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) { - uint i; wil_dbg_irq(wil, "TX done\n"); + napi_schedule(&wil->napi_tx); isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE; - for (i = 0; i < 24; i++) { - u32 mask = BIT_DMA_EP_TX_ICR_TX_DONE_N(i); - if (isr & mask) { - isr &= ~mask; - wil_dbg_irq(wil, "TX done(%i)\n", i); - wil_tx_complete(wil, i); - } - } + /* clear also all VRING interrupts */ + isr &= ~(BIT(25) - 1UL); } if (isr) wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr); - wil6210_unmask_irq_tx(wil); + /* Tx IRQ will be enabled when NAPI processing finished */ return IRQ_HANDLED; } @@ -256,6 +254,7 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie) HOSTADDR(RGF_DMA_EP_MISC_ICR) + offsetof(struct RGF_ICR, ICR)); + trace_wil6210_irq_misc(isr); wil_dbg_irq(wil, "ISR MISC 0x%08x\n", isr); if (!isr) { @@ -301,6 +300,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie) struct wil6210_priv *wil = cookie; u32 isr = wil->isr_misc; + trace_wil6210_irq_misc_thread(isr); wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr); if (isr & ISR_MISC_FW_ERROR) { @@ -408,6 +408,7 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie) if (wil6210_debug_irq_mask(wil, pseudo_cause)) return IRQ_NONE; + trace_wil6210_irq_pseudo(pseudo_cause); wil_dbg_irq(wil, "Pseudo IRQ 0x%08x\n", pseudo_cause); wil6210_mask_irq_pseudo(wil); diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index a0478e2f6868..0a2844c48a60 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -56,27 +56,21 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid) { uint i; struct net_device *ndev = wil_to_ndev(wil); - struct wireless_dev *wdev = wil->wdev; wil_dbg_misc(wil, "%s()\n", __func__); wil_link_off(wil); - clear_bit(wil_status_fwconnected, &wil->status); - - switch (wdev->sme_state) { - case CFG80211_SME_CONNECTED: - cfg80211_disconnected(ndev, WLAN_STATUS_UNSPECIFIED_FAILURE, + if (test_bit(wil_status_fwconnected, &wil->status)) { + clear_bit(wil_status_fwconnected, &wil->status); + cfg80211_disconnected(ndev, + WLAN_STATUS_UNSPECIFIED_FAILURE, NULL, 0, GFP_KERNEL); - break; - case CFG80211_SME_CONNECTING: + } else if (test_bit(wil_status_fwconnecting, &wil->status)) { cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL); - break; - default: - break; } - + clear_bit(wil_status_fwconnecting, &wil->status); for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) wil_vring_fini_tx(wil, i); @@ -292,41 +286,36 @@ static int __wil_up(struct wil6210_priv *wil) { struct net_device *ndev = wil_to_ndev(wil); struct wireless_dev *wdev = wil->wdev; - struct ieee80211_channel *channel = wdev->preset_chandef.chan; int rc; - int bi; - u16 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype); rc = wil_reset(wil); if (rc) return rc; - /* FIXME Firmware works now in PBSS mode(ToDS=0, FromDS=0) */ - wmi_nettype = wil_iftype_nl2wmi(NL80211_IFTYPE_ADHOC); + /* Rx VRING. After MAC and beacon */ + rc = wil_rx_init(wil); + if (rc) + return rc; + switch (wdev->iftype) { case NL80211_IFTYPE_STATION: wil_dbg_misc(wil, "type: STATION\n"); - bi = 0; ndev->type = ARPHRD_ETHER; break; case NL80211_IFTYPE_AP: wil_dbg_misc(wil, "type: AP\n"); - bi = 100; ndev->type = ARPHRD_ETHER; break; case NL80211_IFTYPE_P2P_CLIENT: wil_dbg_misc(wil, "type: P2P_CLIENT\n"); - bi = 0; ndev->type = ARPHRD_ETHER; break; case NL80211_IFTYPE_P2P_GO: wil_dbg_misc(wil, "type: P2P_GO\n"); - bi = 100; ndev->type = ARPHRD_ETHER; break; case NL80211_IFTYPE_MONITOR: wil_dbg_misc(wil, "type: Monitor\n"); - bi = 0; ndev->type = ARPHRD_IEEE80211_RADIOTAP; /* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_RADIOTAP ? */ break; @@ -334,36 +323,12 @@ static int __wil_up(struct wil6210_priv *wil) return -EOPNOTSUPP; } - /* Apply profile in the following order: */ - /* SSID and channel for the AP */ - switch (wdev->iftype) { - case NL80211_IFTYPE_AP: - case NL80211_IFTYPE_P2P_GO: - if (wdev->ssid_len == 0) { - wil_err(wil, "SSID not set\n"); - return -EINVAL; - } - rc = wmi_set_ssid(wil, wdev->ssid_len, wdev->ssid); - if (rc) - return rc; - break; - default: - break; - } - /* MAC address - pre-requisite for other commands */ wmi_set_mac_address(wil, ndev->dev_addr); - /* Set up beaconing if required. */ - if (bi > 0) { - rc = wmi_pcp_start(wil, bi, wmi_nettype, - (channel ? channel->hw_value : 0)); - if (rc) - return rc; - } - /* Rx VRING. After MAC and beacon */ - wil_rx_init(wil); + napi_enable(&wil->napi_rx); + napi_enable(&wil->napi_tx); return 0; } @@ -381,6 +346,9 @@ int wil_up(struct wil6210_priv *wil) static int __wil_down(struct wil6210_priv *wil) { + napi_disable(&wil->napi_rx); + napi_disable(&wil->napi_tx); + if (wil->scan_request) { cfg80211_scan_done(wil->scan_request, true); wil->scan_request = NULL; diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index 098a8ec6b841..29dd1e58cb17 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -40,6 +40,55 @@ static const struct net_device_ops wil_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; +static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget) +{ + struct wil6210_priv *wil = container_of(napi, struct wil6210_priv, + napi_rx); + int quota = budget; + int done; + + wil_rx_handle(wil, "a); + done = budget - quota; + + if (done <= 1) { /* burst ends - only one packet processed */ + napi_complete(napi); + wil6210_unmask_irq_rx(wil); + wil_dbg_txrx(wil, "NAPI RX complete\n"); + } + + wil_dbg_txrx(wil, "NAPI RX poll(%d) done %d\n", budget, done); + + return done; +} + +static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget) +{ + struct wil6210_priv *wil = container_of(napi, struct wil6210_priv, + napi_tx); + int tx_done = 0; + uint i; + + /* always process ALL Tx complete, regardless budget - it is fast */ + for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { + struct vring *vring = &wil->vring_tx[i]; + + if (!vring->va) + continue; + + tx_done += wil_tx_complete(wil, i); + } + + if (tx_done <= 1) { /* burst ends - only one packet processed */ + napi_complete(napi); + wil6210_unmask_irq_tx(wil); + wil_dbg_txrx(wil, "NAPI TX complete\n"); + } + + wil_dbg_txrx(wil, "NAPI TX poll(%d) done %d\n", budget, tx_done); + + return min(tx_done, budget); +} + void *wil_if_alloc(struct device *dev, void __iomem *csr) { struct net_device *ndev; @@ -81,6 +130,11 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr) SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); wdev->netdev = ndev; + netif_napi_add(ndev, &wil->napi_rx, wil6210_netdev_poll_rx, + WIL6210_NAPI_BUDGET); + netif_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx, + WIL6210_NAPI_BUDGET); + wil_link_off(wil); return wil; diff --git a/drivers/net/wireless/ath/wil6210/trace.c b/drivers/net/wireless/ath/wil6210/trace.c new file mode 100644 index 000000000000..cd2534b9c5aa --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/trace.c @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/module.h> + +#define CREATE_TRACE_POINTS +#include "trace.h" diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h new file mode 100644 index 000000000000..eff1239be53a --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/trace.h @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM wil6210 +#if !defined(WIL6210_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define WIL6210_TRACE_H + +#include <linux/tracepoint.h> +#include "wil6210.h" +#include "txrx.h" + +/* create empty functions when tracing is disabled */ +#if !defined(CONFIG_WIL6210_TRACING) || defined(__CHECKER__) + +#undef TRACE_EVENT +#define TRACE_EVENT(name, proto, ...) \ +static inline void trace_ ## name(proto) {} +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(...) +#undef DEFINE_EVENT +#define DEFINE_EVENT(evt_class, name, proto, ...) \ +static inline void trace_ ## name(proto) {} +#endif /* !CONFIG_WIL6210_TRACING || defined(__CHECKER__) */ + +DECLARE_EVENT_CLASS(wil6210_wmi, + TP_PROTO(u16 id, void *buf, u16 buf_len), + + TP_ARGS(id, buf, buf_len), + + TP_STRUCT__entry( + __field(u16, id) + __field(u16, buf_len) + __dynamic_array(u8, buf, buf_len) + ), + + TP_fast_assign( + __entry->id = id; + __entry->buf_len = buf_len; + memcpy(__get_dynamic_array(buf), buf, buf_len); + ), + + TP_printk( + "id 0x%04x len %d", + __entry->id, __entry->buf_len + ) +); + +DEFINE_EVENT(wil6210_wmi, wil6210_wmi_cmd, + TP_PROTO(u16 id, void *buf, u16 buf_len), + TP_ARGS(id, buf, buf_len) +); + +DEFINE_EVENT(wil6210_wmi, wil6210_wmi_event, + TP_PROTO(u16 id, void *buf, u16 buf_len), + TP_ARGS(id, buf, buf_len) +); + +#define WIL6210_MSG_MAX (200) + +DECLARE_EVENT_CLASS(wil6210_log_event, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf), + TP_STRUCT__entry( + __dynamic_array(char, msg, WIL6210_MSG_MAX) + ), + TP_fast_assign( + WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg), + WIL6210_MSG_MAX, + vaf->fmt, + *vaf->va) >= WIL6210_MSG_MAX); + ), + TP_printk("%s", __get_str(msg)) +); + +DEFINE_EVENT(wil6210_log_event, wil6210_log_err, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf) +); + +DEFINE_EVENT(wil6210_log_event, wil6210_log_info, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf) +); + +DEFINE_EVENT(wil6210_log_event, wil6210_log_dbg, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf) +); + +#define wil_pseudo_irq_cause(x) __print_flags(x, "|", \ + {BIT_DMA_PSEUDO_CAUSE_RX, "Rx" }, \ + {BIT_DMA_PSEUDO_CAUSE_TX, "Tx" }, \ + {BIT_DMA_PSEUDO_CAUSE_MISC, "Misc" }) + +TRACE_EVENT(wil6210_irq_pseudo, + TP_PROTO(u32 x), + TP_ARGS(x), + TP_STRUCT__entry( + __field(u32, x) + ), + TP_fast_assign( + __entry->x = x; + ), + TP_printk("cause 0x%08x : %s", __entry->x, + wil_pseudo_irq_cause(__entry->x)) +); + +DECLARE_EVENT_CLASS(wil6210_irq, + TP_PROTO(u32 x), + TP_ARGS(x), + TP_STRUCT__entry( + __field(u32, x) + ), + TP_fast_assign( + __entry->x = x; + ), + TP_printk("cause 0x%08x", __entry->x) +); + +DEFINE_EVENT(wil6210_irq, wil6210_irq_rx, + TP_PROTO(u32 x), + TP_ARGS(x) +); + +DEFINE_EVENT(wil6210_irq, wil6210_irq_tx, + TP_PROTO(u32 x), + TP_ARGS(x) +); + +DEFINE_EVENT(wil6210_irq, wil6210_irq_misc, + TP_PROTO(u32 x), + TP_ARGS(x) +); + +DEFINE_EVENT(wil6210_irq, wil6210_irq_misc_thread, + TP_PROTO(u32 x), + TP_ARGS(x) +); + +TRACE_EVENT(wil6210_rx, + TP_PROTO(u16 index, struct vring_rx_desc *d), + TP_ARGS(index, d), + TP_STRUCT__entry( + __field(u16, index) + __field(unsigned int, len) + __field(u8, mid) + __field(u8, cid) + __field(u8, tid) + __field(u8, type) + __field(u8, subtype) + __field(u16, seq) + __field(u8, mcs) + ), + TP_fast_assign( + __entry->index = index; + __entry->len = d->dma.length; + __entry->mid = wil_rxdesc_mid(d); + __entry->cid = wil_rxdesc_cid(d); + __entry->tid = wil_rxdesc_tid(d); + __entry->type = wil_rxdesc_ftype(d); + __entry->subtype = wil_rxdesc_subtype(d); + __entry->seq = wil_rxdesc_seq(d); + __entry->mcs = wil_rxdesc_mcs(d); + ), + TP_printk("index %d len %d mid %d cid %d tid %d mcs %d seq 0x%03x" + " type 0x%1x subtype 0x%1x", __entry->index, __entry->len, + __entry->mid, __entry->cid, __entry->tid, __entry->mcs, + __entry->seq, __entry->type, __entry->subtype) +); + +TRACE_EVENT(wil6210_tx, + TP_PROTO(u8 vring, u16 index, unsigned int len, u8 frags), + TP_ARGS(vring, index, len, frags), + TP_STRUCT__entry( + __field(u8, vring) + __field(u8, frags) + __field(u16, index) + __field(unsigned int, len) + ), + TP_fast_assign( + __entry->vring = vring; + __entry->frags = frags; + __entry->index = index; + __entry->len = len; + ), + TP_printk("vring %d index %d len %d frags %d", + __entry->vring, __entry->index, __entry->len, __entry->frags) +); + +TRACE_EVENT(wil6210_tx_done, + TP_PROTO(u8 vring, u16 index, unsigned int len, u8 err), + TP_ARGS(vring, index, len, err), + TP_STRUCT__entry( + __field(u8, vring) + __field(u8, err) + __field(u16, index) + __field(unsigned int, len) + ), + TP_fast_assign( + __entry->vring = vring; + __entry->index = index; + __entry->len = len; + __entry->err = err; + ), + TP_printk("vring %d index %d len %d err 0x%02x", + __entry->vring, __entry->index, __entry->len, + __entry->err) +); + +#endif /* WIL6210_TRACE_H || TRACE_HEADER_MULTI_READ*/ + +#if defined(CONFIG_WIL6210_TRACING) && !defined(__CHECKER__) +/* we don't want to use include/trace/events */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +/* This part must be outside protection */ +#include <trace/define_trace.h> +#endif /* defined(CONFIG_WIL6210_TRACING) && !defined(__CHECKER__) */ diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 797024507c71..e1c492b9dfef 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -22,6 +22,7 @@ #include "wil6210.h" #include "wmi.h" #include "txrx.h" +#include "trace.h" static bool rtap_include_phy_info; module_param(rtap_include_phy_info, bool, S_IRUGO); @@ -89,8 +90,8 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) * we can use any */ for (i = 0; i < vring->size; i++) { - volatile struct vring_tx_desc *d = &(vring->va[i].tx); - d->dma.status = TX_DMA_STATUS_DU; + volatile struct vring_tx_desc *_d = &(vring->va[i].tx); + _d->dma.status = TX_DMA_STATUS_DU; } wil_dbg_misc(wil, "vring[%d] 0x%p:0x%016llx 0x%p\n", vring->size, @@ -106,30 +107,39 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, size_t sz = vring->size * sizeof(vring->va[0]); while (!wil_vring_is_empty(vring)) { + dma_addr_t pa; + struct sk_buff *skb; + u16 dmalen; + if (tx) { - volatile struct vring_tx_desc *d = + struct vring_tx_desc dd, *d = ⅆ + volatile struct vring_tx_desc *_d = &vring->va[vring->swtail].tx; - dma_addr_t pa = d->dma.addr_low | - ((u64)d->dma.addr_high << 32); - struct sk_buff *skb = vring->ctx[vring->swtail]; + + *d = *_d; + pa = wil_desc_addr(&d->dma.addr); + dmalen = le16_to_cpu(d->dma.length); + skb = vring->ctx[vring->swtail]; if (skb) { - dma_unmap_single(dev, pa, d->dma.length, + dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE); dev_kfree_skb_any(skb); vring->ctx[vring->swtail] = NULL; } else { - dma_unmap_page(dev, pa, d->dma.length, + dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE); } vring->swtail = wil_vring_next_tail(vring); } else { /* rx */ - volatile struct vring_rx_desc *d = + struct vring_rx_desc dd, *d = ⅆ + volatile struct vring_rx_desc *_d = &vring->va[vring->swtail].rx; - dma_addr_t pa = d->dma.addr_low | - ((u64)d->dma.addr_high << 32); - struct sk_buff *skb = vring->ctx[vring->swhead]; - dma_unmap_single(dev, pa, d->dma.length, - DMA_FROM_DEVICE); + + *d = *_d; + pa = wil_desc_addr(&d->dma.addr); + dmalen = le16_to_cpu(d->dma.length); + skb = vring->ctx[vring->swhead]; + dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE); kfree_skb(skb); wil_vring_advance_head(vring, 1); } @@ -151,7 +161,8 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring, { struct device *dev = wil_to_dev(wil); unsigned int sz = RX_BUF_LEN; - volatile struct vring_rx_desc *d = &(vring->va[i].rx); + struct vring_rx_desc dd, *d = ⅆ + volatile struct vring_rx_desc *_d = &(vring->va[i].rx); dma_addr_t pa; /* TODO align */ @@ -169,13 +180,13 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring, } d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT; - d->dma.addr_low = lower_32_bits(pa); - d->dma.addr_high = (u16)upper_32_bits(pa); + wil_desc_addr_set(&d->dma.addr, pa); /* ip_length don't care */ /* b11 don't care */ /* error don't care */ d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */ - d->dma.length = sz; + d->dma.length = cpu_to_le16(sz); + *_d = *d; vring->ctx[i] = skb; return 0; @@ -321,11 +332,12 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, { struct device *dev = wil_to_dev(wil); struct net_device *ndev = wil_to_ndev(wil); - volatile struct vring_rx_desc *d; - struct vring_rx_desc *d1; + volatile struct vring_rx_desc *_d; + struct vring_rx_desc *d; struct sk_buff *skb; dma_addr_t pa; unsigned int sz = RX_BUF_LEN; + u16 dmalen; u8 ftype; u8 ds_bits; @@ -334,32 +346,44 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, if (wil_vring_is_empty(vring)) return NULL; - d = &(vring->va[vring->swhead].rx); - if (!(d->dma.status & RX_DMA_STATUS_DU)) { + _d = &(vring->va[vring->swhead].rx); + if (!(_d->dma.status & RX_DMA_STATUS_DU)) { /* it is not error, we just reached end of Rx done area */ return NULL; } - pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32); skb = vring->ctx[vring->swhead]; + d = wil_skb_rxdesc(skb); + *d = *_d; + pa = wil_desc_addr(&d->dma.addr); + vring->ctx[vring->swhead] = NULL; + wil_vring_advance_head(vring, 1); + dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE); - skb_trim(skb, d->dma.length); + dmalen = le16_to_cpu(d->dma.length); + + trace_wil6210_rx(vring->swhead, d); + wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, dmalen); + wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4, + (const void *)d, sizeof(*d), false); + + if (dmalen > sz) { + wil_err(wil, "Rx size too large: %d bytes!\n", dmalen); + kfree_skb(skb); + return NULL; + } + skb_trim(skb, dmalen); + + wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, + skb->data, skb_headlen(skb), false); - d1 = wil_skb_rxdesc(skb); - *d1 = *d; - wil->stats.last_mcs_rx = wil_rxdesc_mcs(d1); + wil->stats.last_mcs_rx = wil_rxdesc_mcs(d); /* use radiotap header only if required */ if (ndev->type == ARPHRD_IEEE80211_RADIOTAP) wil_rx_add_radiotap_header(wil, skb); - wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, d->dma.length); - wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4, - (const void *)d, sizeof(*d), false); - - wil_vring_advance_head(vring, 1); - /* no extra checks if in sniffer mode */ if (ndev->type != ARPHRD_ETHER) return skb; @@ -368,7 +392,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, * Driver should recognize it by frame type, that is found * in Rx descriptor. If type is not data, it is 802.11 frame as is */ - ftype = wil_rxdesc_ftype(d1) << 2; + ftype = wil_rxdesc_ftype(d) << 2; if (ftype != IEEE80211_FTYPE_DATA) { wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype); /* TODO: process it */ @@ -383,7 +407,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, return NULL; } - ds_bits = wil_rxdesc_ds_bits(d1); + ds_bits = wil_rxdesc_ds_bits(d); if (ds_bits == 1) { /* * HW bug - in ToDS mode, i.e. Rx on AP side, @@ -425,6 +449,7 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count) /* * Pass Rx packet to the netif. Update statistics. + * Called in softirq context (NAPI poll). */ static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) { @@ -433,10 +458,7 @@ static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) skb_orphan(skb); - if (in_interrupt()) - rc = netif_rx(skb); - else - rc = netif_rx_ni(skb); + rc = netif_receive_skb(skb); if (likely(rc == NET_RX_SUCCESS)) { ndev->stats.rx_packets++; @@ -450,9 +472,9 @@ static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) /** * Proceed all completed skb's from Rx VRING * - * Safe to call from IRQ + * Safe to call from NAPI poll, i.e. softirq with interrupts enabled */ -void wil_rx_handle(struct wil6210_priv *wil) +void wil_rx_handle(struct wil6210_priv *wil, int *quota) { struct net_device *ndev = wil_to_ndev(wil); struct vring *v = &wil->vring_rx; @@ -463,9 +485,8 @@ void wil_rx_handle(struct wil6210_priv *wil) return; } wil_dbg_txrx(wil, "%s()\n", __func__); - while (NULL != (skb = wil_vring_reap_rx(wil, v))) { - wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, - skb->data, skb_headlen(skb), false); + while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) { + (*quota)--; if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) { skb->dev = ndev; @@ -600,17 +621,15 @@ static struct vring *wil_find_tx_vring(struct wil6210_priv *wil, return NULL; } -static int wil_tx_desc_map(volatile struct vring_tx_desc *d, - dma_addr_t pa, u32 len) +static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len) { - d->dma.addr_low = lower_32_bits(pa); - d->dma.addr_high = (u16)upper_32_bits(pa); + wil_desc_addr_set(&d->dma.addr, pa); d->dma.ip_length = 0; /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/ d->dma.b11 = 0/*14 | BIT(7)*/; d->dma.error = 0; d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */ - d->dma.length = len; + d->dma.length = cpu_to_le16((u16)len); d->dma.d0 = 0; d->mac.d[0] = 0; d->mac.d[1] = 0; @@ -630,7 +649,8 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, struct sk_buff *skb) { struct device *dev = wil_to_dev(wil); - volatile struct vring_tx_desc *d; + struct vring_tx_desc dd, *d = ⅆ + volatile struct vring_tx_desc *_d; u32 swhead = vring->swhead; int avail = wil_vring_avail_tx(vring); int nr_frags = skb_shinfo(skb)->nr_frags; @@ -648,7 +668,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, 1 + nr_frags); return -ENOMEM; } - d = &(vring->va[i].tx); + _d = &(vring->va[i].tx); /* FIXME FW can accept only unicast frames for the peer */ memcpy(skb->data, wil->dst_addr[vring_index], ETH_ALEN); @@ -667,25 +687,30 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, wil_tx_desc_map(d, pa, skb_headlen(skb)); d->mac.d[2] |= ((nr_frags + 1) << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS); + if (nr_frags) + *_d = *d; + /* middle segments */ for (f = 0; f < nr_frags; f++) { const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f]; int len = skb_frag_size(frag); i = (swhead + f + 1) % vring->size; - d = &(vring->va[i].tx); + _d = &(vring->va[i].tx); pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, pa))) goto dma_error; wil_tx_desc_map(d, pa, len); vring->ctx[i] = NULL; + *_d = *d; } /* for the last seg only */ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS); d->dma.d0 |= BIT(9); /* BUG: undocumented bit */ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS); d->dma.d0 |= (vring_index << DMA_CFG_DESC_TX_0_QID_POS); + *_d = *d; wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4, (const void *)d, sizeof(*d), false); @@ -693,6 +718,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, /* advance swhead */ wil_vring_advance_head(vring, nr_frags + 1); wil_dbg_txrx(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead); + trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags); iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail)); /* hold reference to skb * to prevent skb release before accounting @@ -705,14 +731,18 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, /* unmap what we have mapped */ /* Note: increment @f to operate with positive index */ for (f++; f > 0; f--) { + u16 dmalen; + i = (swhead + f) % vring->size; - d = &(vring->va[i].tx); - d->dma.status = TX_DMA_STATUS_DU; - pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32); + _d = &(vring->va[i].tx); + *d = *_d; + _d->dma.status = TX_DMA_STATUS_DU; + pa = wil_desc_addr(&d->dma.addr); + dmalen = le16_to_cpu(d->dma.length); if (vring->ctx[i]) - dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE); + dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE); else - dma_unmap_page(dev, pa, d->dma.length, DMA_TO_DEVICE); + dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE); } return -EINVAL; @@ -738,18 +768,16 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) wil_err(wil, "Xmit in monitor mode not supported\n"); goto drop; } - if (skb->protocol == cpu_to_be16(ETH_P_PAE)) { - rc = wmi_tx_eapol(wil, skb); - } else { - /* find vring */ - vring = wil_find_tx_vring(wil, skb); - if (!vring) { - wil_err(wil, "No Tx VRING available\n"); - goto drop; - } - /* set up vring entry */ - rc = wil_tx_vring(wil, vring, skb); + + /* find vring */ + vring = wil_find_tx_vring(wil, skb); + if (!vring) { + wil_err(wil, "No Tx VRING available\n"); + goto drop; } + /* set up vring entry */ + rc = wil_tx_vring(wil, vring, skb); + switch (rc) { case 0: /* statistics will be updated on the tx_complete */ @@ -761,7 +789,6 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) break; /* goto drop; */ } drop: - netif_tx_stop_all_queues(ndev); ndev->stats.tx_dropped++; dev_kfree_skb_any(skb); @@ -771,41 +798,48 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) /** * Clean up transmitted skb's from the Tx VRING * + * Return number of descriptors cleared + * * Safe to call from IRQ */ -void wil_tx_complete(struct wil6210_priv *wil, int ringid) +int wil_tx_complete(struct wil6210_priv *wil, int ringid) { struct net_device *ndev = wil_to_ndev(wil); struct device *dev = wil_to_dev(wil); struct vring *vring = &wil->vring_tx[ringid]; + int done = 0; if (!vring->va) { wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid); - return; + return 0; } wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid); while (!wil_vring_is_empty(vring)) { - volatile struct vring_tx_desc *d1 = + volatile struct vring_tx_desc *_d = &vring->va[vring->swtail].tx; struct vring_tx_desc dd, *d = ⅆ dma_addr_t pa; struct sk_buff *skb; + u16 dmalen; - dd = *d1; + *d = *_d; if (!(d->dma.status & TX_DMA_STATUS_DU)) break; + dmalen = le16_to_cpu(d->dma.length); + trace_wil6210_tx_done(ringid, vring->swtail, dmalen, + d->dma.error); wil_dbg_txrx(wil, "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n", - vring->swtail, d->dma.length, d->dma.status, + vring->swtail, dmalen, d->dma.status, d->dma.error); wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4, (const void *)d, sizeof(*d), false); - pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32); + pa = wil_desc_addr(&d->dma.addr); skb = vring->ctx[vring->swtail]; if (skb) { if (d->dma.error == 0) { @@ -815,18 +849,21 @@ void wil_tx_complete(struct wil6210_priv *wil, int ringid) ndev->stats.tx_errors++; } - dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE); + dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE); dev_kfree_skb_any(skb); vring->ctx[vring->swtail] = NULL; } else { - dma_unmap_page(dev, pa, d->dma.length, DMA_TO_DEVICE); + dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE); } - d->dma.addr_low = 0; - d->dma.addr_high = 0; + d->dma.addr.addr_low = 0; + d->dma.addr.addr_high = 0; d->dma.length = 0; d->dma.status = TX_DMA_STATUS_DU; vring->swtail = wil_vring_next_tail(vring); + done++; } if (wil_vring_avail_tx(vring) > vring->size/4) netif_tx_wake_all_queues(wil_to_ndev(wil)); + + return done; } diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h index adef12fb2aee..23c0781cdb93 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.h +++ b/drivers/net/wireless/ath/wil6210/txrx.h @@ -27,6 +27,28 @@ #define WIL6210_RTAP_SIZE (128) /* Tx/Rx path */ + +/* + * Common representation of physical address in Vring + */ +struct vring_dma_addr { + __le32 addr_low; + __le16 addr_high; +} __packed; + +static inline dma_addr_t wil_desc_addr(struct vring_dma_addr *addr) +{ + return le32_to_cpu(addr->addr_low) | + ((u64)le16_to_cpu(addr->addr_high) << 32); +} + +static inline void wil_desc_addr_set(struct vring_dma_addr *addr, + dma_addr_t pa) +{ + addr->addr_low = cpu_to_le32(lower_32_bits(pa)); + addr->addr_high = cpu_to_le16((u16)upper_32_bits(pa)); +} + /* * Tx descriptor - MAC part * [dword 0] @@ -216,13 +238,12 @@ struct vring_tx_mac { struct vring_tx_dma { u32 d0; - u32 addr_low; - u16 addr_high; + struct vring_dma_addr addr; u8 ip_length; u8 b11; /* 0..6: mac_length; 7:ip_version */ u8 error; /* 0..2: err; 3..7: reserved; */ u8 status; /* 0: used; 1..7; reserved */ - u16 length; + __le16 length; } __packed; /* @@ -315,13 +336,12 @@ struct vring_rx_mac { struct vring_rx_dma { u32 d0; - u32 addr_low; - u16 addr_high; + struct vring_dma_addr addr; u8 ip_length; u8 b11; u8 error; u8 status; - u16 length; + __le16 length; } __packed; struct vring_tx_desc { diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 8f76ecd8a7e5..44fdab51de7e 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -34,9 +34,11 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1) #define WIL6210_MEM_SIZE (2*1024*1024UL) -#define WIL6210_RX_RING_SIZE (128) -#define WIL6210_TX_RING_SIZE (128) -#define WIL6210_MAX_TX_RINGS (24) +#define WIL6210_RX_RING_SIZE (128) +#define WIL6210_TX_RING_SIZE (128) +#define WIL6210_MAX_TX_RINGS (24) /* HW limit */ +#define WIL6210_MAX_CID (8) /* HW limit */ +#define WIL6210_NAPI_BUDGET (16) /* arbitrary */ /* Hardware definitions begin */ @@ -184,6 +186,7 @@ struct vring { enum { /* for wil6210_priv.status */ wil_status_fwready = 0, + wil_status_fwconnecting, wil_status_fwconnected, wil_status_dontscan, wil_status_reset_done, @@ -239,6 +242,8 @@ struct wil6210_priv { * - consumed in thread by wmi_event_worker */ spinlock_t wmi_ev_lock; + struct napi_struct napi_rx; + struct napi_struct napi_tx; /* DMA related */ struct vring vring_rx; struct vring vring_tx[WIL6210_MAX_TX_RINGS]; @@ -267,9 +272,13 @@ struct wil6210_priv { #define wil_to_ndev(i) (wil_to_wdev(i)->netdev) #define ndev_to_wil(n) (wdev_to_wil(n->ieee80211_ptr)) -#define wil_dbg(wil, fmt, arg...) netdev_dbg(wil_to_ndev(wil), fmt, ##arg) -#define wil_info(wil, fmt, arg...) netdev_info(wil_to_ndev(wil), fmt, ##arg) -#define wil_err(wil, fmt, arg...) netdev_err(wil_to_ndev(wil), fmt, ##arg) +int wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...); +int wil_err(struct wil6210_priv *wil, const char *fmt, ...); +int wil_info(struct wil6210_priv *wil, const char *fmt, ...); +#define wil_dbg(wil, fmt, arg...) do { \ + netdev_dbg(wil_to_ndev(wil), fmt, ##arg); \ + wil_dbg_trace(wil, fmt, ##arg); \ +} while (0) #define wil_dbg_irq(wil, fmt, arg...) wil_dbg(wil, "DBG[ IRQ]" fmt, ##arg) #define wil_dbg_txrx(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg) @@ -320,7 +329,6 @@ int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid); int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid); int wmi_set_channel(struct wil6210_priv *wil, int channel); int wmi_get_channel(struct wil6210_priv *wil, int *channel); -int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb); int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index, const void *mac_addr); int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index, @@ -356,10 +364,12 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, void wil_vring_fini_tx(struct wil6210_priv *wil, int id); netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev); -void wil_tx_complete(struct wil6210_priv *wil, int ringid); +int wil_tx_complete(struct wil6210_priv *wil, int ringid); +void wil6210_unmask_irq_tx(struct wil6210_priv *wil); /* RX API */ -void wil_rx_handle(struct wil6210_priv *wil); +void wil_rx_handle(struct wil6210_priv *wil, int *quota); +void wil6210_unmask_irq_rx(struct wil6210_priv *wil); int wil_iftype_nl2wmi(enum nl80211_iftype type); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 45b04e383f9a..dc8059ad4bab 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -20,6 +20,7 @@ #include "wil6210.h" #include "txrx.h" #include "wmi.h" +#include "trace.h" /** * WMI event receiving - theory of operations @@ -74,10 +75,11 @@ static const struct { {0x800000, 0x808000, 0x900000}, /* FW data RAM 32k */ {0x840000, 0x860000, 0x908000}, /* peripheral data RAM 128k/96k used */ {0x880000, 0x88a000, 0x880000}, /* various RGF */ - {0x8c0000, 0x932000, 0x8c0000}, /* trivial mapping for upper area */ + {0x8c0000, 0x949000, 0x8c0000}, /* trivial mapping for upper area */ /* * 920000..930000 ucode code RAM * 930000..932000 ucode data RAM + * 932000..949000 back-door debug data */ }; @@ -246,6 +248,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) iowrite32(r->head = next_head, wil->csr + HOST_MBOX + offsetof(struct wil6210_mbox_ctl, tx.head)); + trace_wil6210_wmi_cmd(cmdid, buf, len); + /* interrupt to FW */ iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT); @@ -311,8 +315,8 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d\n", data->info.channel, data->info.mcs, data->info.snr); - wil_dbg_wmi(wil, "status 0x%04x len %d stype %04x\n", d_status, d_len, - le16_to_cpu(data->info.stype)); + wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len, + le16_to_cpu(fc)); wil_dbg_wmi(wil, "qid %d mid %d cid %d\n", data->info.qid, data->info.mid, data->info.cid); @@ -406,7 +410,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) if ((wdev->iftype == NL80211_IFTYPE_STATION) || (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) { - if (wdev->sme_state != CFG80211_SME_CONNECTING) { + if (!test_bit(wil_status_fwconnecting, &wil->status)) { wil_err(wil, "Not in connecting state\n"); return; } @@ -430,6 +434,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL); } + clear_bit(wil_status_fwconnecting, &wil->status); set_bit(wil_status_fwconnected, &wil->status); /* FIXME FW can transmit only ucast frames to peer */ @@ -635,8 +640,9 @@ void wmi_recv_cmd(struct wil6210_priv *wil) hdr.flags); if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) && (len >= sizeof(struct wil6210_mbox_hdr_wmi))) { - wil_dbg_wmi(wil, "WMI event 0x%04x\n", - evt->event.wmi.id); + u16 id = le16_to_cpu(evt->event.wmi.id); + wil_dbg_wmi(wil, "WMI event 0x%04x\n", id); + trace_wil6210_wmi_event(id, &evt->event.wmi, len); } wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1, &evt->event.hdr, sizeof(hdr) + len, true); @@ -724,7 +730,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan) .bcon_interval = cpu_to_le16(bi), .network_type = wmi_nettype, .disable_sec_offload = 1, - .channel = chan, + .channel = chan - 1, }; struct { struct wil6210_mbox_hdr_wmi wmi; @@ -734,8 +740,12 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan) if (!wil->secure_pcp) cmd.disable_sec = 1; + /* + * Processing time may be huge, in case of secure AP it takes about + * 3500ms for FW to start AP + */ rc = wmi_call(wil, WMI_PCP_START_CMDID, &cmd, sizeof(cmd), - WMI_PCP_STARTED_EVENTID, &reply, sizeof(reply), 100); + WMI_PCP_STARTED_EVENTID, &reply, sizeof(reply), 5000); if (rc) return rc; @@ -829,40 +839,6 @@ int wmi_p2p_cfg(struct wil6210_priv *wil, int channel) return wmi_send(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd)); } -int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb) -{ - struct wmi_eapol_tx_cmd *cmd; - struct ethhdr *eth; - u16 eapol_len = skb->len - ETH_HLEN; - void *eapol = skb->data + ETH_HLEN; - uint i; - int rc; - - skb_set_mac_header(skb, 0); - eth = eth_hdr(skb); - wil_dbg_wmi(wil, "EAPOL %d bytes to %pM\n", eapol_len, eth->h_dest); - for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) { - if (memcmp(wil->dst_addr[i], eth->h_dest, ETH_ALEN) == 0) - goto found_dest; - } - - return -EINVAL; - - found_dest: - /* find out eapol data & len */ - cmd = kzalloc(sizeof(*cmd) + eapol_len, GFP_KERNEL); - if (!cmd) - return -EINVAL; - - memcpy(cmd->dst_mac, eth->h_dest, ETH_ALEN); - cmd->eapol_len = cpu_to_le16(eapol_len); - memcpy(cmd->eapol, eapol, eapol_len); - rc = wmi_send(wil, WMI_EAPOL_TX_CMDID, cmd, sizeof(*cmd) + eapol_len); - kfree(cmd); - - return rc; -} - int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index, const void *mac_addr) { diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig index 078e6f3477a9..3f21e0ba39ba 100644 --- a/drivers/net/wireless/b43/Kconfig +++ b/drivers/net/wireless/b43/Kconfig @@ -28,7 +28,7 @@ config B43 config B43_BCMA bool "Support for BCMA bus" - depends on B43 && BCMA + depends on B43 && (BCMA = y || BCMA = B43) default y config B43_BCMA_EXTRA @@ -39,7 +39,7 @@ config B43_BCMA_EXTRA config B43_SSB bool - depends on B43 && SSB + depends on B43 && (SSB = y || SSB = B43) default y # Auto-select SSB PCI-HOST support, if possible @@ -111,6 +111,7 @@ config B43_PIO config B43_PHY_N bool "Support for 802.11n (N-PHY) devices" depends on B43 + default y ---help--- Support for the N-PHY. @@ -132,6 +133,7 @@ config B43_PHY_LP config B43_PHY_HT bool "Support for HT-PHY (high throughput) devices" depends on B43 && B43_BCMA + default y ---help--- Support for the HT-PHY. diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c index 44fa0cdbf97b..11400b39cf0b 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c @@ -606,7 +606,8 @@ static int brcmf_sdio_pd_remove(struct platform_device *pdev) static struct platform_driver brcmf_sdio_pd = { .remove = brcmf_sdio_pd_remove, .driver = { - .name = BRCMFMAC_SDIO_PDATA_NAME + .name = BRCMFMAC_SDIO_PDATA_NAME, + .owner = THIS_MODULE, } }; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h index 28db9cf39672..86cbfe2c7c6c 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h @@ -583,6 +583,7 @@ enum brcmf_netif_stop_reason { * @bssidx: index of bss associated with this interface. * @mac_addr: assigned mac address. * @netif_stop: bitmap indicates reason why netif queues are stopped. + * @netif_stop_lock: spinlock for update netif_stop from multiple sources. * @pend_8021x_cnt: tracks outstanding number of 802.1x frames. * @pend_8021x_wait: used for signalling change in count. */ @@ -598,6 +599,7 @@ struct brcmf_if { s32 bssidx; u8 mac_addr[ETH_ALEN]; u8 netif_stop; + spinlock_t netif_stop_lock; atomic_t pend_8021x_cnt; wait_queue_head_t pend_8021x_wait; }; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c index 59c77aa3b959..dd85401063cb 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c @@ -30,6 +30,7 @@ #include "dhd_bus.h" #include "fwsignal.h" #include "dhd_dbg.h" +#include "tracepoint.h" struct brcmf_proto_cdc_dcmd { __le32 cmd; /* dongle command value */ @@ -292,6 +293,7 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx, u8 offset, h->flags2 = 0; h->data_offset = offset; BDC_SET_IF_IDX(h, ifidx); + trace_brcmf_bdchdr(pktbuf->data); } int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx, @@ -309,6 +311,7 @@ int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx, return -EBADE; } + trace_brcmf_bdchdr(pktbuf->data); h = (struct brcmf_proto_bdc_header *)(pktbuf->data); *ifidx = BDC_GET_IF_IDX(h); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c index 202869cd0932..c37b9d68e458 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c @@ -156,8 +156,11 @@ ssize_t brcmf_debugfs_fws_stats_read(struct file *f, char __user *data, "txs_suppr_core: %u\n" "txs_suppr_ps: %u\n" "txs_tossed: %u\n" + "txs_host_tossed: %u\n" + "bus_flow_block: %u\n" + "fws_flow_block: %u\n" "send_pkts: BK:%u BE:%u VO:%u VI:%u BCMC:%u\n" - "fifo_credits_sent: BK:%u BE:%u VO:%u VI:%u BCMC:%u\n", + "requested_sent: BK:%u BE:%u VO:%u VI:%u BCMC:%u\n", fwstats->header_pulls, fwstats->header_only_pkt, fwstats->tlv_parse_failed, @@ -176,14 +179,17 @@ ssize_t brcmf_debugfs_fws_stats_read(struct file *f, char __user *data, fwstats->txs_supp_core, fwstats->txs_supp_ps, fwstats->txs_tossed, + fwstats->txs_host_tossed, + fwstats->bus_flow_block, + fwstats->fws_flow_block, fwstats->send_pkts[0], fwstats->send_pkts[1], fwstats->send_pkts[2], fwstats->send_pkts[3], fwstats->send_pkts[4], - fwstats->fifo_credits_sent[0], - fwstats->fifo_credits_sent[1], - fwstats->fifo_credits_sent[2], - fwstats->fifo_credits_sent[3], - fwstats->fifo_credits_sent[4]); + fwstats->requested_sent[0], + fwstats->requested_sent[1], + fwstats->requested_sent[2], + fwstats->requested_sent[3], + fwstats->requested_sent[4]); return simple_read_from_buffer(data, count, ppos, buf, res); } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h index 009c87bfd9ae..0af1f5dc583a 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h @@ -141,8 +141,7 @@ struct brcmf_fws_stats { u32 header_pulls; u32 pkt2bus; u32 send_pkts[5]; - u32 fifo_credits_sent[5]; - u32 fifo_credits_back[6]; + u32 requested_sent[5]; u32 generic_error; u32 mac_update_failed; u32 mac_ps_update_failed; @@ -158,6 +157,9 @@ struct brcmf_fws_stats { u32 txs_supp_core; u32 txs_supp_ps; u32 txs_tossed; + u32 txs_host_tossed; + u32 bus_flow_block; + u32 fws_flow_block; }; struct brcmf_pub; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c index 2c593570497c..8c402e7b97eb 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c @@ -179,7 +179,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, struct brcmf_pub *drvr = ifp->drvr; struct ethhdr *eh; - brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx); + brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx); /* Can the device send data? */ if (drvr->bus_if->state != BRCMF_BUS_DATA) { @@ -240,11 +240,15 @@ done: void brcmf_txflowblock_if(struct brcmf_if *ifp, enum brcmf_netif_stop_reason reason, bool state) { + unsigned long flags; + if (!ifp) return; brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n", ifp->bssidx, ifp->netif_stop, reason, state); + + spin_lock_irqsave(&ifp->netif_stop_lock, flags); if (state) { if (!ifp->netif_stop) netif_stop_queue(ifp->ndev); @@ -254,6 +258,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp, if (!ifp->netif_stop) netif_wake_queue(ifp->ndev); } + spin_unlock_irqrestore(&ifp->netif_stop_lock, flags); } void brcmf_txflowblock(struct device *dev, bool state) @@ -264,9 +269,14 @@ void brcmf_txflowblock(struct device *dev, bool state) brcmf_dbg(TRACE, "Enter\n"); - for (i = 0; i < BRCMF_MAX_IFS; i++) - brcmf_txflowblock_if(drvr->iflist[i], - BRCMF_NETIF_STOP_REASON_BLOCK_BUS, state); + if (brcmf_fws_fc_active(drvr->fws)) { + brcmf_fws_bus_blocked(drvr, state); + } else { + for (i = 0; i < BRCMF_MAX_IFS; i++) + brcmf_txflowblock_if(drvr->iflist[i], + BRCMF_NETIF_STOP_REASON_BLOCK_BUS, + state); + } } void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list) @@ -280,7 +290,7 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list) u8 ifidx; int ret; - brcmf_dbg(TRACE, "Enter\n"); + brcmf_dbg(DATA, "Enter\n"); skb_queue_walk_safe(skb_list, skb, pnext) { skb_unlink(skb, skb_list); @@ -630,7 +640,7 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked) /* set appropriate operations */ ndev->netdev_ops = &brcmf_netdev_ops_pri; - ndev->hard_header_len = ETH_HLEN + drvr->hdrlen; + ndev->hard_header_len += drvr->hdrlen; ndev->ethtool_ops = &brcmf_ethtool_ops; drvr->rxsz = ndev->mtu + ndev->hard_header_len + @@ -779,6 +789,7 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx, ifp->bssidx = bssidx; init_waitqueue_head(&ifp->pend_8021x_wait); + spin_lock_init(&ifp->netif_stop_lock); if (mac_addr != NULL) memcpy(ifp->mac_addr, mac_addr, ETH_ALEN); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index d2487518bd2a..6f3d181659ef 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c @@ -2369,12 +2369,12 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt) } else { ret = 0; } - spin_unlock_bh(&bus->txqlock); if (pktq_len(&bus->txq) >= TXHI) { bus->txoff = true; brcmf_txflowblock(bus->sdiodev->dev, true); } + spin_unlock_bh(&bus->txqlock); #ifdef DEBUG if (pktq_plen(&bus->txq, prec) > qcount[prec]) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c index 5352dc1fdf3c..70f70cea7f7d 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c @@ -157,11 +157,13 @@ static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id) * @BRCMF_FWS_SKBSTATE_NEW: sk_buff is newly arrived in the driver. * @BRCMF_FWS_SKBSTATE_DELAYED: sk_buff had to wait on queue. * @BRCMF_FWS_SKBSTATE_SUPPRESSED: sk_buff has been suppressed by firmware. + * @BRCMF_FWS_SKBSTATE_TIM: allocated for TIM update info. */ enum brcmf_fws_skb_state { BRCMF_FWS_SKBSTATE_NEW, BRCMF_FWS_SKBSTATE_DELAYED, - BRCMF_FWS_SKBSTATE_SUPPRESSED + BRCMF_FWS_SKBSTATE_SUPPRESSED, + BRCMF_FWS_SKBSTATE_TIM }; /** @@ -193,9 +195,8 @@ struct brcmf_skbuff_cb { * b[11] - packet sent upon firmware request. * b[10] - packet only contains signalling data. * b[9] - packet is a tx packet. - * b[8] - packet uses FIFO credit (non-pspoll). + * b[8] - packet used requested credit * b[7] - interface in AP mode. - * b[6:4] - AC FIFO number. * b[3:0] - interface index. */ #define BRCMF_SKB_IF_FLAGS_REQUESTED_MASK 0x0800 @@ -204,12 +205,10 @@ struct brcmf_skbuff_cb { #define BRCMF_SKB_IF_FLAGS_SIGNAL_ONLY_SHIFT 10 #define BRCMF_SKB_IF_FLAGS_TRANSMIT_MASK 0x0200 #define BRCMF_SKB_IF_FLAGS_TRANSMIT_SHIFT 9 -#define BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK 0x0100 -#define BRCMF_SKB_IF_FLAGS_CREDITCHECK_SHIFT 8 +#define BRCMF_SKB_IF_FLAGS_REQ_CREDIT_MASK 0x0100 +#define BRCMF_SKB_IF_FLAGS_REQ_CREDIT_SHIFT 8 #define BRCMF_SKB_IF_FLAGS_IF_AP_MASK 0x0080 #define BRCMF_SKB_IF_FLAGS_IF_AP_SHIFT 7 -#define BRCMF_SKB_IF_FLAGS_FIFO_MASK 0x0070 -#define BRCMF_SKB_IF_FLAGS_FIFO_SHIFT 4 #define BRCMF_SKB_IF_FLAGS_INDEX_MASK 0x000f #define BRCMF_SKB_IF_FLAGS_INDEX_SHIFT 0 @@ -246,7 +245,7 @@ struct brcmf_skbuff_cb { #define BRCMF_SKB_HTOD_TAG_HSLOT_MASK 0x00ffff00 #define BRCMF_SKB_HTOD_TAG_HSLOT_SHIFT 8 #define BRCMF_SKB_HTOD_TAG_FREERUN_MASK 0x000000ff -#define BRCMF_SKB_HTOD_TAG_FREERUN_SHIFT 0 +#define BRCMF_SKB_HTOD_TAG_FREERUN_SHIFT 0 #define brcmf_skb_htod_tag_set_field(skb, field, value) \ brcmu_maskset32(&(brcmf_skbcb(skb)->htod), \ @@ -278,6 +277,7 @@ struct brcmf_skbuff_cb { /** * enum brcmf_fws_fifo - fifo indices used by dongle firmware. * + * @BRCMF_FWS_FIFO_FIRST: first fifo, ie. background. * @BRCMF_FWS_FIFO_AC_BK: fifo for background traffic. * @BRCMF_FWS_FIFO_AC_BE: fifo for best-effort traffic. * @BRCMF_FWS_FIFO_AC_VI: fifo for video traffic. @@ -287,7 +287,8 @@ struct brcmf_skbuff_cb { * @BRCMF_FWS_FIFO_COUNT: number of fifos. */ enum brcmf_fws_fifo { - BRCMF_FWS_FIFO_AC_BK, + BRCMF_FWS_FIFO_FIRST, + BRCMF_FWS_FIFO_AC_BK = BRCMF_FWS_FIFO_FIRST, BRCMF_FWS_FIFO_AC_BE, BRCMF_FWS_FIFO_AC_VI, BRCMF_FWS_FIFO_AC_VO, @@ -307,12 +308,15 @@ enum brcmf_fws_fifo { * firmware suppress the packet as device is already in PS mode. * @BRCMF_FWS_TXSTATUS_FW_TOSSED: * firmware tossed the packet. + * @BRCMF_FWS_TXSTATUS_HOST_TOSSED: + * host tossed the packet. */ enum brcmf_fws_txstatus { BRCMF_FWS_TXSTATUS_DISCARD, BRCMF_FWS_TXSTATUS_CORE_SUPPRESS, BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS, - BRCMF_FWS_TXSTATUS_FW_TOSSED + BRCMF_FWS_TXSTATUS_FW_TOSSED, + BRCMF_FWS_TXSTATUS_HOST_TOSSED }; enum brcmf_fws_fcmode { @@ -343,6 +347,7 @@ enum brcmf_fws_mac_desc_state { * @transit_count: packet in transit to firmware. */ struct brcmf_fws_mac_descriptor { + char name[16]; u8 occupied; u8 mac_handle; u8 interface_id; @@ -356,7 +361,6 @@ struct brcmf_fws_mac_descriptor { u8 seq[BRCMF_FWS_FIFO_COUNT]; struct pktq psq; int transit_count; - int suppress_count; int suppr_transit_count; bool send_tim_signal; u8 traffic_pending_bmp; @@ -383,12 +387,10 @@ enum brcmf_fws_hanger_item_state { * struct brcmf_fws_hanger_item - single entry for tx pending packet. * * @state: entry is either free or occupied. - * @gen: generation. * @pkt: packet itself. */ struct brcmf_fws_hanger_item { enum brcmf_fws_hanger_item_state state; - u8 gen; struct sk_buff *pkt; }; @@ -434,6 +436,8 @@ struct brcmf_fws_info { u32 fifo_credit_map; u32 fifo_delay_map; unsigned long borrow_defer_timestamp; + bool bus_flow_blocked; + bool creditmap_received; }; /* @@ -507,7 +511,6 @@ static void brcmf_fws_hanger_init(struct brcmf_fws_hanger *hanger) { int i; - brcmf_dbg(TRACE, "enter\n"); memset(hanger, 0, sizeof(*hanger)); for (i = 0; i < ARRAY_SIZE(hanger->items); i++) hanger->items[i].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE; @@ -517,7 +520,6 @@ static u32 brcmf_fws_hanger_get_free_slot(struct brcmf_fws_hanger *h) { u32 i; - brcmf_dbg(TRACE, "enter\n"); i = (h->slot_pos + 1) % BRCMF_FWS_HANGER_MAXITEMS; while (i != h->slot_pos) { @@ -533,14 +535,12 @@ static u32 brcmf_fws_hanger_get_free_slot(struct brcmf_fws_hanger *h) h->failed_slotfind++; i = BRCMF_FWS_HANGER_MAXITEMS; done: - brcmf_dbg(TRACE, "exit: %d\n", i); return i; } static int brcmf_fws_hanger_pushpkt(struct brcmf_fws_hanger *h, - struct sk_buff *pkt, u32 slot_id) + struct sk_buff *pkt, u32 slot_id) { - brcmf_dbg(TRACE, "enter\n"); if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS) return -ENOENT; @@ -560,7 +560,6 @@ static int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h, u32 slot_id, struct sk_buff **pktout, bool remove_item) { - brcmf_dbg(TRACE, "enter\n"); if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS) return -ENOENT; @@ -574,23 +573,18 @@ static int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h, if (remove_item) { h->items[slot_id].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE; h->items[slot_id].pkt = NULL; - h->items[slot_id].gen = 0xff; h->popped++; } return 0; } static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h, - u32 slot_id, u8 gen) + u32 slot_id) { - brcmf_dbg(TRACE, "enter\n"); - if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS) return -ENOENT; - h->items[slot_id].gen = gen; - - if (h->items[slot_id].state != BRCMF_FWS_HANGER_ITEM_STATE_INUSE) { + if (h->items[slot_id].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) { brcmf_err("entry not in use\n"); return -EINVAL; } @@ -599,25 +593,6 @@ static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h, return 0; } -static int brcmf_fws_hanger_get_genbit(struct brcmf_fws_hanger *hanger, - struct sk_buff *pkt, u32 slot_id, - int *gen) -{ - brcmf_dbg(TRACE, "enter\n"); - *gen = 0xff; - - if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS) - return -ENOENT; - - if (hanger->items[slot_id].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) { - brcmf_err("slot not in use\n"); - return -EINVAL; - } - - *gen = hanger->items[slot_id].gen; - return 0; -} - static void brcmf_fws_hanger_cleanup(struct brcmf_fws_info *fws, bool (*fn)(struct sk_buff *, void *), int ifidx) @@ -627,7 +602,6 @@ static void brcmf_fws_hanger_cleanup(struct brcmf_fws_info *fws, int i; enum brcmf_fws_hanger_item_state s; - brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx); for (i = 0; i < ARRAY_SIZE(h->items); i++) { s = h->items[i].state; if (s == BRCMF_FWS_HANGER_ITEM_STATE_INUSE || @@ -644,6 +618,19 @@ static void brcmf_fws_hanger_cleanup(struct brcmf_fws_info *fws, } } +static void brcmf_fws_macdesc_set_name(struct brcmf_fws_info *fws, + struct brcmf_fws_mac_descriptor *desc) +{ + if (desc == &fws->desc.other) + strlcpy(desc->name, "MAC-OTHER", sizeof(desc->name)); + else if (desc->mac_handle) + scnprintf(desc->name, sizeof(desc->name), "MAC-%d:%d", + desc->mac_handle, desc->interface_id); + else + scnprintf(desc->name, sizeof(desc->name), "MACIF:%d", + desc->interface_id); +} + static void brcmf_fws_init_mac_descriptor(struct brcmf_fws_mac_descriptor *desc, u8 *addr, u8 ifidx) { @@ -652,6 +639,7 @@ static void brcmf_fws_init_mac_descriptor(struct brcmf_fws_mac_descriptor *desc, desc->occupied = 1; desc->state = BRCMF_FWS_STATE_OPEN; desc->requested_credit = 0; + desc->requested_packet = 0; /* depending on use may need ifp->bssidx instead */ desc->interface_id = ifidx; desc->ac_bitmap = 0xff; /* update this when handling APSD */ @@ -667,6 +655,7 @@ void brcmf_fws_clear_mac_descriptor(struct brcmf_fws_mac_descriptor *desc) desc->occupied = 0; desc->state = BRCMF_FWS_STATE_CLOSE; desc->requested_credit = 0; + desc->requested_packet = 0; } static struct brcmf_fws_mac_descriptor * @@ -675,7 +664,6 @@ brcmf_fws_mac_descriptor_lookup(struct brcmf_fws_info *fws, u8 *ea) struct brcmf_fws_mac_descriptor *entry; int i; - brcmf_dbg(TRACE, "enter: ea=%pM\n", ea); if (ea == NULL) return ERR_PTR(-EINVAL); @@ -695,31 +683,23 @@ brcmf_fws_find_mac_desc(struct brcmf_fws_info *fws, struct brcmf_if *ifp, { struct brcmf_fws_mac_descriptor *entry = &fws->desc.other; bool multicast; - enum nl80211_iftype iftype; - - brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx); multicast = is_multicast_ether_addr(da); - iftype = brcmf_cfg80211_get_iftype(ifp); - /* Multicast destination and P2P clients get the interface entry. - * STA gets the interface entry if there is no exact match. For - * example, TDLS destinations have their own entry. + /* Multicast destination, STA and P2P clients get the interface entry. + * STA/GC gets the Mac Entry for TDLS destinations, TDLS destinations + * have their own entry. */ - entry = NULL; - if ((multicast || iftype == NL80211_IFTYPE_STATION || - iftype == NL80211_IFTYPE_P2P_CLIENT) && ifp->fws_desc) + if (multicast && ifp->fws_desc) { entry = ifp->fws_desc; - - if (entry != NULL && iftype != NL80211_IFTYPE_STATION) goto done; + } entry = brcmf_fws_mac_descriptor_lookup(fws, da); if (IS_ERR(entry)) - entry = &fws->desc.other; + entry = ifp->fws_desc; done: - brcmf_dbg(TRACE, "exit: entry=%p\n", entry); return entry; } @@ -752,11 +732,7 @@ static void brcmf_fws_mac_desc_cleanup(struct brcmf_fws_info *fws, struct brcmf_fws_mac_descriptor *entry, int ifidx) { - brcmf_dbg(TRACE, "enter: entry=(ea=%pM, ifid=%d), ifidx=%d\n", - entry->ea, entry->interface_id, ifidx); if (entry->occupied && (ifidx == -1 || ifidx == entry->interface_id)) { - brcmf_dbg(TRACE, "flush psq: ifidx=%d, qlen=%d\n", - ifidx, entry->psq.len); brcmf_fws_psq_flush(fws, &entry->psq, ifidx); entry->occupied = !!(entry->psq.len); } @@ -772,7 +748,6 @@ static void brcmf_fws_bus_txq_cleanup(struct brcmf_fws_info *fws, int prec; u32 hslot; - brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx); txq = brcmf_bus_gettxq(fws->drvr->bus_if); if (IS_ERR(txq)) { brcmf_dbg(TRACE, "no txq to clean up\n"); @@ -798,7 +773,6 @@ static void brcmf_fws_cleanup(struct brcmf_fws_info *fws, int ifidx) struct brcmf_fws_mac_descriptor *table; bool (*matchfn)(struct sk_buff *, void *) = NULL; - brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx); if (fws == NULL) return; @@ -815,44 +789,115 @@ static void brcmf_fws_cleanup(struct brcmf_fws_info *fws, int ifidx) brcmf_fws_hanger_cleanup(fws, matchfn, ifidx); } -static void brcmf_fws_tim_update(struct brcmf_fws_info *ctx, - struct brcmf_fws_mac_descriptor *entry, - int prec) +static int brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb) { - brcmf_dbg(TRACE, "enter: ea=%pM\n", entry->ea); - if (entry->state == BRCMF_FWS_STATE_CLOSE) { - /* check delayedQ and suppressQ in one call using bitmap */ - if (brcmu_pktq_mlen(&entry->psq, 3 << (prec * 2)) == 0) - entry->traffic_pending_bmp = - entry->traffic_pending_bmp & ~NBITVAL(prec); - else - entry->traffic_pending_bmp = - entry->traffic_pending_bmp | NBITVAL(prec); + struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; + u8 *wlh; + u16 data_offset = 0; + u8 fillers; + __le32 pkttag = cpu_to_le32(brcmf_skbcb(skb)->htod); + + brcmf_dbg(TRACE, "enter: ea=%pM, ifidx=%u (%u), pkttag=0x%08X, hslot=%d\n", + entry->ea, entry->interface_id, + brcmf_skb_if_flags_get_field(skb, INDEX), + le32_to_cpu(pkttag), (le32_to_cpu(pkttag) >> 8) & 0xffff); + if (entry->send_tim_signal) + data_offset += 2 + BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN; + + /* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */ + data_offset += 2 + BRCMF_FWS_TYPE_PKTTAG_LEN; + fillers = round_up(data_offset, 4) - data_offset; + data_offset += fillers; + + skb_push(skb, data_offset); + wlh = skb->data; + + wlh[0] = BRCMF_FWS_TYPE_PKTTAG; + wlh[1] = BRCMF_FWS_TYPE_PKTTAG_LEN; + memcpy(&wlh[2], &pkttag, sizeof(pkttag)); + wlh += BRCMF_FWS_TYPE_PKTTAG_LEN + 2; + + if (entry->send_tim_signal) { + entry->send_tim_signal = 0; + wlh[0] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP; + wlh[1] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN; + wlh[2] = entry->mac_handle; + wlh[3] = entry->traffic_pending_bmp; + brcmf_dbg(TRACE, "adding TIM info: %02X:%02X:%02X:%02X\n", + wlh[0], wlh[1], wlh[2], wlh[3]); + wlh += BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2; + entry->traffic_lastreported_bmp = entry->traffic_pending_bmp; } - /* request a TIM update to firmware at the next piggyback opportunity */ + if (fillers) + memset(wlh, BRCMF_FWS_TYPE_FILLER, fillers); + + brcmf_proto_hdrpush(fws->drvr, brcmf_skb_if_flags_get_field(skb, INDEX), + data_offset >> 2, skb); + return 0; +} + +static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws, + struct brcmf_fws_mac_descriptor *entry, + int prec, bool send_immediately) +{ + struct sk_buff *skb; + struct brcmf_bus *bus; + struct brcmf_skbuff_cb *skcb; + s32 err; + u32 len; + + /* check delayedQ and suppressQ in one call using bitmap */ + if (brcmu_pktq_mlen(&entry->psq, 3 << (prec * 2)) == 0) + entry->traffic_pending_bmp &= ~NBITVAL(prec); + else + entry->traffic_pending_bmp |= NBITVAL(prec); + + entry->send_tim_signal = false; if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp) entry->send_tim_signal = true; + if (send_immediately && entry->send_tim_signal && + entry->state == BRCMF_FWS_STATE_CLOSE) { + /* create a dummy packet and sent that. The traffic */ + /* bitmap info will automatically be attached to that packet */ + len = BRCMF_FWS_TYPE_PKTTAG_LEN + 2 + + BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2 + + 4 + fws->drvr->hdrlen; + skb = brcmu_pkt_buf_get_skb(len); + if (skb == NULL) + return false; + skb_pull(skb, len); + skcb = brcmf_skbcb(skb); + skcb->mac = entry; + skcb->state = BRCMF_FWS_SKBSTATE_TIM; + bus = fws->drvr->bus_if; + err = brcmf_fws_hdrpush(fws, skb); + if (err == 0) + err = brcmf_bus_txdata(bus, skb); + if (err) + brcmu_pkt_buf_free_skb(skb); + return true; + } + return false; } static void brcmf_fws_flow_control_check(struct brcmf_fws_info *fws, struct pktq *pq, u8 if_id) { - struct brcmf_if *ifp = fws->drvr->iflist[if_id]; + struct brcmf_if *ifp = fws->drvr->iflist[!if_id ? 0 : if_id + 1]; if (WARN_ON(!ifp)) return; - brcmf_dbg(TRACE, - "enter: bssidx=%d, ifidx=%d\n", ifp->bssidx, ifp->ifidx); - if ((ifp->netif_stop & BRCMF_NETIF_STOP_REASON_FWS_FC) && pq->len <= BRCMF_FWS_FLOWCONTROL_LOWATER) brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FWS_FC, false); if (!(ifp->netif_stop & BRCMF_NETIF_STOP_REASON_FWS_FC) && - pq->len >= BRCMF_FWS_FLOWCONTROL_HIWATER) + pq->len >= BRCMF_FWS_FLOWCONTROL_HIWATER) { + fws->stats.fws_flow_block++; brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FWS_FC, true); + } return; } @@ -876,8 +921,9 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data) entry = &fws->desc.nodes[mac_handle & 0x1F]; if (type == BRCMF_FWS_TYPE_MACDESC_DEL) { - brcmf_dbg(TRACE, "deleting mac %pM idx %d\n", addr, ifidx); if (entry->occupied) { + brcmf_dbg(TRACE, "deleting %s mac %pM\n", + entry->name, addr); brcmf_fws_mac_desc_cleanup(fws, entry, -1); brcmf_fws_clear_mac_descriptor(entry); } else @@ -885,25 +931,28 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data) return 0; } - brcmf_dbg(TRACE, - "add mac %pM handle %u idx %d\n", addr, mac_handle, ifidx); existing = brcmf_fws_mac_descriptor_lookup(fws, addr); if (IS_ERR(existing)) { if (!entry->occupied) { entry->mac_handle = mac_handle; brcmf_fws_init_mac_descriptor(entry, addr, ifidx); + brcmf_fws_macdesc_set_name(fws, entry); brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT, BRCMF_FWS_PSQ_LEN); + brcmf_dbg(TRACE, "add %s mac %pM\n", entry->name, addr); } else { fws->stats.mac_update_failed++; } } else { if (entry != existing) { - brcmf_dbg(TRACE, "relocate mac\n"); + brcmf_dbg(TRACE, "copy mac %s\n", existing->name); memcpy(entry, existing, offsetof(struct brcmf_fws_mac_descriptor, psq)); entry->mac_handle = mac_handle; brcmf_fws_clear_mac_descriptor(existing); + brcmf_fws_macdesc_set_name(fws, entry); + brcmf_dbg(TRACE, "relocate %s mac %pM\n", entry->name, + addr); } else { brcmf_dbg(TRACE, "use existing\n"); WARN_ON(entry->mac_handle != mac_handle); @@ -918,7 +967,6 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws, { struct brcmf_fws_mac_descriptor *entry; u8 mac_handle; - int i; mac_handle = data[0]; entry = &fws->desc.nodes[mac_handle & 0x1F]; @@ -926,16 +974,18 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws, fws->stats.mac_ps_update_failed++; return -ESRCH; } - - /* a state update should wipe old credits? */ + /* a state update should wipe old credits */ entry->requested_credit = 0; + entry->requested_packet = 0; if (type == BRCMF_FWS_TYPE_MAC_OPEN) { entry->state = BRCMF_FWS_STATE_OPEN; return BRCMF_FWS_RET_OK_SCHEDULE; } else { entry->state = BRCMF_FWS_STATE_CLOSE; - for (i = BRCMF_FWS_FIFO_AC_BE; i < NL80211_NUM_ACS; i++) - brcmf_fws_tim_update(fws, entry, i); + brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_BK, false); + brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_BE, false); + brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VI, false); + brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VO, true); } return BRCMF_FWS_RET_OK_NOSCHEDULE; } @@ -949,7 +999,6 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws, ifidx = data[0]; - brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx); if (ifidx >= BRCMF_MAX_IFS) { ret = -ERANGE; goto fail; @@ -961,6 +1010,8 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws, goto fail; } + brcmf_dbg(TRACE, "%s (%d): %s\n", brcmf_fws_get_tlv_name(type), type, + entry->name); switch (type) { case BRCMF_FWS_TYPE_INTERFACE_OPEN: entry->state = BRCMF_FWS_STATE_OPEN; @@ -991,6 +1042,9 @@ static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type, return -ESRCH; } + brcmf_dbg(TRACE, "%s (%d): %s cnt %d bmp %d\n", + brcmf_fws_get_tlv_name(type), type, entry->name, + data[0], data[2]); if (type == BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT) entry->requested_credit = data[0]; else @@ -1000,6 +1054,37 @@ static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type, return BRCMF_FWS_RET_OK_SCHEDULE; } +static void +brcmf_fws_macdesc_use_req_credit(struct brcmf_fws_mac_descriptor *entry, + struct sk_buff *skb) +{ + if (entry->requested_credit > 0) { + entry->requested_credit--; + brcmf_skb_if_flags_set_field(skb, REQUESTED, 1); + brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 1); + if (entry->state != BRCMF_FWS_STATE_CLOSE) + brcmf_err("requested credit set while mac not closed!\n"); + } else if (entry->requested_packet > 0) { + entry->requested_packet--; + brcmf_skb_if_flags_set_field(skb, REQUESTED, 1); + brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 0); + if (entry->state != BRCMF_FWS_STATE_CLOSE) + brcmf_err("requested packet set while mac not closed!\n"); + } else { + brcmf_skb_if_flags_set_field(skb, REQUESTED, 0); + brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 0); + } +} + +static void brcmf_fws_macdesc_return_req_credit(struct sk_buff *skb) +{ + struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; + + if ((brcmf_skb_if_flags_get_field(skb, REQ_CREDIT)) && + (entry->state == BRCMF_FWS_STATE_CLOSE)) + entry->requested_credit++; +} + static void brcmf_fws_return_credits(struct brcmf_fws_info *fws, u8 fifo, u8 credits) { @@ -1010,6 +1095,8 @@ static void brcmf_fws_return_credits(struct brcmf_fws_info *fws, if (!credits) return; + fws->fifo_credit_map |= 1 << fifo; + if ((fifo == BRCMF_FWS_FIFO_AC_BE) && (fws->credits_borrowed[0])) { for (lender_ac = BRCMF_FWS_FIFO_AC_VO; lender_ac >= 0; @@ -1031,7 +1118,6 @@ static void brcmf_fws_return_credits(struct brcmf_fws_info *fws, } } - fws->fifo_credit_map |= 1 << fifo; fws->fifo_credit[fifo] += credits; } @@ -1042,27 +1128,6 @@ static void brcmf_fws_schedule_deq(struct brcmf_fws_info *fws) queue_work(fws->fws_wq, &fws->fws_dequeue_work); } -static void brcmf_skb_pick_up_credit(struct brcmf_fws_info *fws, int fifo, - struct sk_buff *p) -{ - struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(p)->mac; - - if (brcmf_skbcb(p)->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK) { - if (fws->fcmode != BRCMF_FWS_FCMODE_IMPLIED_CREDIT) - return; - brcmf_fws_return_credits(fws, fifo, 1); - } else { - /* - * if this packet did not count against FIFO credit, it - * must have taken a requested_credit from the destination - * entry (for pspoll etc.) - */ - if (!brcmf_skb_if_flags_get_field(p, REQUESTED)) - entry->requested_credit++; - } - brcmf_fws_schedule_deq(fws); -} - static int brcmf_fws_enq(struct brcmf_fws_info *fws, enum brcmf_fws_skb_state state, int fifo, struct sk_buff *p) @@ -1078,7 +1143,7 @@ static int brcmf_fws_enq(struct brcmf_fws_info *fws, return -ENOENT; } - brcmf_dbg(TRACE, "enter: ea=%pM, qlen=%d\n", entry->ea, entry->psq.len); + brcmf_dbg(DATA, "enter: fifo %d skb %p\n", fifo, p); if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED) { prec += 1; qfull_stat = &fws->stats.supprq_full_error; @@ -1095,14 +1160,12 @@ static int brcmf_fws_enq(struct brcmf_fws_info *fws, /* update the sk_buff state */ brcmf_skbcb(p)->state = state; - if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED) - entry->suppress_count++; /* * A packet has been pushed so update traffic * availability bitmap, if applicable */ - brcmf_fws_tim_update(fws, entry, fifo); + brcmf_fws_tim_update(fws, entry, fifo, true); brcmf_fws_flow_control_check(fws, &entry->psq, brcmf_skb_if_flags_get_field(p, INDEX)); return 0; @@ -1113,7 +1176,6 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo) struct brcmf_fws_mac_descriptor *table; struct brcmf_fws_mac_descriptor *entry; struct sk_buff *p; - int use_credit = 1; int num_nodes; int node_pos; int prec_out; @@ -1137,9 +1199,8 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo) p = brcmu_pktq_mdeq(&entry->psq, pmsk << (fifo * 2), &prec_out); if (p == NULL) { if (entry->suppressed) { - if (entry->suppr_transit_count > - entry->suppress_count) - return NULL; + if (entry->suppr_transit_count) + continue; entry->suppressed = false; p = brcmu_pktq_mdeq(&entry->psq, 1 << (fifo * 2), &prec_out); @@ -1148,26 +1209,7 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo) if (p == NULL) continue; - /* did the packet come from suppress sub-queue? */ - if (entry->requested_credit > 0) { - entry->requested_credit--; - /* - * if the packet was pulled out while destination is in - * closed state but had a non-zero packets requested, - * then this should not count against the FIFO credit. - * That is due to the fact that the firmware will - * most likely hold onto this packet until a suitable - * time later to push it to the appropriate AC FIFO. - */ - if (entry->state == BRCMF_FWS_STATE_CLOSE) - use_credit = 0; - } else if (entry->requested_packet > 0) { - entry->requested_packet--; - brcmf_skb_if_flags_set_field(p, REQUESTED, 1); - if (entry->state == BRCMF_FWS_STATE_CLOSE) - use_credit = 0; - } - brcmf_skb_if_flags_set_field(p, CREDITCHECK, use_credit); + brcmf_fws_macdesc_use_req_credit(entry, p); /* move dequeue position to ensure fair round-robin */ fws->deq_node_pos[fifo] = (node_pos + i + 1) % num_nodes; @@ -1179,7 +1221,7 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo) * A packet has been picked up, update traffic * availability bitmap, if applicable */ - brcmf_fws_tim_update(fws, entry, fifo); + brcmf_fws_tim_update(fws, entry, fifo, false); /* * decrement total enqueued fifo packets and @@ -1192,7 +1234,7 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo) } p = NULL; done: - brcmf_dbg(TRACE, "exit: fifo %d skb %p\n", fifo, p); + brcmf_dbg(DATA, "exit: fifo %d skb %p\n", fifo, p); return p; } @@ -1202,22 +1244,26 @@ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo, struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; u32 hslot; int ret; + u8 ifidx; hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); /* this packet was suppressed */ - if (!entry->suppressed || entry->generation != genbit) { + if (!entry->suppressed) { entry->suppressed = true; - entry->suppress_count = brcmu_pktq_mlen(&entry->psq, - 1 << (fifo * 2 + 1)); entry->suppr_transit_count = entry->transit_count; + brcmf_dbg(DATA, "suppress %s: transit %d\n", + entry->name, entry->transit_count); } entry->generation = genbit; - ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo, skb); + ret = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb); + if (ret == 0) + ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo, + skb); if (ret != 0) { - /* suppress q is full, drop this packet */ + /* suppress q is full or hdrpull failed, drop this packet */ brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, true); } else { @@ -1225,26 +1271,24 @@ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo, * Mark suppressed to avoid a double free during * wlfc cleanup */ - brcmf_fws_hanger_mark_suppressed(&fws->hanger, hslot, - genbit); - entry->suppress_count++; + brcmf_fws_hanger_mark_suppressed(&fws->hanger, hslot); } return ret; } static int -brcmf_fws_txstatus_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot, +brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot, u32 genbit) { u32 fifo; int ret; bool remove_from_hanger = true; struct sk_buff *skb; + struct brcmf_skbuff_cb *skcb; struct brcmf_fws_mac_descriptor *entry = NULL; - brcmf_dbg(TRACE, "status: flags=0x%X, hslot=%d\n", - flags, hslot); + brcmf_dbg(DATA, "flags %d\n", flags); if (flags == BRCMF_FWS_TXSTATUS_DISCARD) fws->stats.txs_discard++; @@ -1256,6 +1300,8 @@ brcmf_fws_txstatus_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot, remove_from_hanger = false; } else if (flags == BRCMF_FWS_TXSTATUS_FW_TOSSED) fws->stats.txs_tossed++; + else if (flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED) + fws->stats.txs_host_tossed++; else brcmf_err("unexpected txstatus\n"); @@ -1266,26 +1312,35 @@ brcmf_fws_txstatus_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot, return ret; } - entry = brcmf_skbcb(skb)->mac; + skcb = brcmf_skbcb(skb); + entry = skcb->mac; if (WARN_ON(!entry)) { brcmu_pkt_buf_free_skb(skb); return -EINVAL; } + entry->transit_count--; + if (entry->suppressed && entry->suppr_transit_count) + entry->suppr_transit_count--; + + brcmf_dbg(DATA, "%s flags %X htod %X\n", entry->name, skcb->if_flags, + skcb->htod); /* pick up the implicit credit from this packet */ fifo = brcmf_skb_htod_tag_get_field(skb, FIFO); - brcmf_skb_pick_up_credit(fws, fifo, skb); + if ((fws->fcmode == BRCMF_FWS_FCMODE_IMPLIED_CREDIT) || + (brcmf_skb_if_flags_get_field(skb, REQ_CREDIT)) || + (flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED)) { + brcmf_fws_return_credits(fws, fifo, 1); + brcmf_fws_schedule_deq(fws); + } + brcmf_fws_macdesc_return_req_credit(skb); if (!remove_from_hanger) ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, genbit); - if (remove_from_hanger || ret) { - entry->transit_count--; - if (entry->suppressed) - entry->suppr_transit_count--; - + if (remove_from_hanger || ret) brcmf_txfinalize(fws->drvr, skb, true); - } + return 0; } @@ -1299,11 +1354,11 @@ static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws, return BRCMF_FWS_RET_OK_NOSCHEDULE; } - brcmf_dbg(TRACE, "enter: data %pM\n", data); + brcmf_dbg(DATA, "enter: data %pM\n", data); for (i = 0; i < BRCMF_FWS_FIFO_COUNT; i++) brcmf_fws_return_credits(fws, i, data[i]); - brcmf_dbg(INFO, "map: credit %x delay %x\n", fws->fifo_credit_map, + brcmf_dbg(DATA, "map: credit %x delay %x\n", fws->fifo_credit_map, fws->fifo_delay_map); return BRCMF_FWS_RET_OK_SCHEDULE; } @@ -1323,7 +1378,7 @@ static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data) hslot = brcmf_txstatus_get_field(status, HSLOT); genbit = brcmf_txstatus_get_field(status, GENERATION); - return brcmf_fws_txstatus_process(fws, flags, hslot, genbit); + return brcmf_fws_txs_process(fws, flags, hslot, genbit); } static int brcmf_fws_dbg_seqnum_check(struct brcmf_fws_info *fws, u8 *data) @@ -1331,7 +1386,7 @@ static int brcmf_fws_dbg_seqnum_check(struct brcmf_fws_info *fws, u8 *data) __le32 timestamp; memcpy(×tamp, &data[2], sizeof(timestamp)); - brcmf_dbg(INFO, "received: seq %d, timestamp %d\n", data[1], + brcmf_dbg(CTL, "received: seq %d, timestamp %d\n", data[1], le32_to_cpu(timestamp)); return 0; } @@ -1364,6 +1419,10 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp, brcmf_err("event payload too small (%d)\n", e->datalen); return -EINVAL; } + if (fws->creditmap_received) + return 0; + + fws->creditmap_received = true; brcmf_dbg(TRACE, "enter: credits %pM\n", credits); brcmf_fws_lock(ifp->drvr, flags); @@ -1392,7 +1451,7 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len, s32 status; s32 err; - brcmf_dbg(TRACE, "enter: ifidx %d, skblen %u, sig %d\n", + brcmf_dbg(HDRS, "enter: ifidx %d, skblen %u, sig %d\n", ifidx, skb->len, signal_len); WARN_ON(signal_len > skb->len); @@ -1426,14 +1485,15 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len, len = signal_data[1]; data = signal_data + 2; - brcmf_dbg(INFO, "tlv type=%d (%s), len=%d, data[0]=%d\n", type, - brcmf_fws_get_tlv_name(type), len, *data); + brcmf_dbg(HDRS, "tlv type=%s (%d), len=%d (%d)\n", + brcmf_fws_get_tlv_name(type), type, len, + brcmf_fws_get_tlv_len(fws, type)); /* abort parsing when length invalid */ if (data_len < len + 2) break; - if (len != brcmf_fws_get_tlv_len(fws, type)) + if (len < brcmf_fws_get_tlv_len(fws, type)) break; err = BRCMF_FWS_RET_OK_NOSCHEDULE; @@ -1502,64 +1562,32 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len, return 0; } -static int brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb) -{ - struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; - u8 *wlh; - u16 data_offset = 0; - u8 fillers; - __le32 pkttag = cpu_to_le32(brcmf_skbcb(skb)->htod); - - brcmf_dbg(TRACE, "enter: ea=%pM, ifidx=%u, pkttag=0x%08X\n", - entry->ea, entry->interface_id, le32_to_cpu(pkttag)); - if (entry->send_tim_signal) - data_offset += 2 + BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN; - - /* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */ - data_offset += 2 + BRCMF_FWS_TYPE_PKTTAG_LEN; - fillers = round_up(data_offset, 4) - data_offset; - data_offset += fillers; - - skb_push(skb, data_offset); - wlh = skb->data; - - wlh[0] = BRCMF_FWS_TYPE_PKTTAG; - wlh[1] = BRCMF_FWS_TYPE_PKTTAG_LEN; - memcpy(&wlh[2], &pkttag, sizeof(pkttag)); - wlh += BRCMF_FWS_TYPE_PKTTAG_LEN + 2; - - if (entry->send_tim_signal) { - entry->send_tim_signal = 0; - wlh[0] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP; - wlh[1] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN; - wlh[2] = entry->mac_handle; - wlh[3] = entry->traffic_pending_bmp; - wlh += BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2; - entry->traffic_lastreported_bmp = entry->traffic_pending_bmp; - } - if (fillers) - memset(wlh, BRCMF_FWS_TYPE_FILLER, fillers); - - brcmf_proto_hdrpush(fws->drvr, brcmf_skb_if_flags_get_field(skb, INDEX), - data_offset >> 2, skb); - return 0; -} - static int brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo, struct sk_buff *p) { struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p); struct brcmf_fws_mac_descriptor *entry = skcb->mac; int rc = 0; - bool header_needed; + bool first_time; int hslot = BRCMF_FWS_HANGER_MAXITEMS; u8 free_ctr; - u8 ifidx; u8 flags; - header_needed = skcb->state != BRCMF_FWS_SKBSTATE_SUPPRESSED; + first_time = skcb->state != BRCMF_FWS_SKBSTATE_SUPPRESSED; - if (header_needed) { + brcmf_skb_if_flags_set_field(p, TRANSMIT, 1); + brcmf_skb_htod_tag_set_field(p, FIFO, fifo); + brcmf_skb_htod_tag_set_field(p, GENERATION, entry->generation); + flags = BRCMF_FWS_HTOD_FLAG_PKTFROMHOST; + if (brcmf_skb_if_flags_get_field(p, REQUESTED)) { + /* + * Indicate that this packet is being sent in response to an + * explicit request from the firmware side. + */ + flags |= BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED; + } + brcmf_skb_htod_tag_set_field(p, FLAGS, flags); + if (first_time) { /* obtaining free slot may fail, but that will be caught * by the hanger push. This assures the packet has a BDC * header upon return. @@ -1568,47 +1596,20 @@ static int brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo, free_ctr = entry->seq[fifo]; brcmf_skb_htod_tag_set_field(p, HSLOT, hslot); brcmf_skb_htod_tag_set_field(p, FREERUN, free_ctr); - brcmf_skb_htod_tag_set_field(p, GENERATION, 1); - entry->transit_count++; - } - brcmf_skb_if_flags_set_field(p, TRANSMIT, 1); - brcmf_skb_htod_tag_set_field(p, FIFO, fifo); - - flags = BRCMF_FWS_HTOD_FLAG_PKTFROMHOST; - if (!(skcb->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK)) { - /* - Indicate that this packet is being sent in response to an - explicit request from the firmware side. - */ - flags |= BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED; - } - brcmf_skb_htod_tag_set_field(p, FLAGS, flags); - if (header_needed) { - brcmf_fws_hdrpush(fws, p); rc = brcmf_fws_hanger_pushpkt(&fws->hanger, p, hslot); if (rc) brcmf_err("hanger push failed: rc=%d\n", rc); - } else { - int gen; - - /* remove old header */ - rc = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, p); - if (rc == 0) { - hslot = brcmf_skb_htod_tag_get_field(p, HSLOT); - brcmf_fws_hanger_get_genbit(&fws->hanger, p, - hslot, &gen); - brcmf_skb_htod_tag_set_field(p, GENERATION, gen); - - /* push new header */ - brcmf_fws_hdrpush(fws, p); - } } + if (rc == 0) + brcmf_fws_hdrpush(fws, p); + return rc; } static void -brcmf_fws_rollback_toq(struct brcmf_fws_info *fws, struct sk_buff *skb) +brcmf_fws_rollback_toq(struct brcmf_fws_info *fws, + struct sk_buff *skb, int fifo) { /* put the packet back to the head of queue @@ -1622,13 +1623,11 @@ brcmf_fws_rollback_toq(struct brcmf_fws_info *fws, struct sk_buff *skb) enum brcmf_fws_skb_state state; struct sk_buff *pktout; int rc = 0; - int fifo; int hslot; - u8 ifidx; - fifo = brcmf_skb_if_flags_get_field(skb, FIFO); state = brcmf_skbcb(skb)->state; entry = brcmf_skbcb(skb)->mac; + hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); if (entry != NULL) { if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED) { @@ -1640,19 +1639,6 @@ brcmf_fws_rollback_toq(struct brcmf_fws_info *fws, struct sk_buff *skb) rc = -ENOSPC; } } else { - hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); - - /* remove header first */ - rc = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb); - if (rc) { - brcmf_err("header removal failed\n"); - /* free the hanger slot */ - brcmf_fws_hanger_poppkt(&fws->hanger, hslot, - &pktout, true); - rc = -EINVAL; - goto fail; - } - /* delay-q packets are going to delay-q */ pktout = brcmu_pktq_penq_head(&entry->psq, 2 * fifo, skb); @@ -1668,33 +1654,30 @@ brcmf_fws_rollback_toq(struct brcmf_fws_info *fws, struct sk_buff *skb) /* decrement sequence count */ entry->seq[fifo]--; } - /* - if this packet did not count against FIFO credit, it must have - taken a requested_credit from the firmware (for pspoll etc.) - */ - if (!(brcmf_skbcb(skb)->if_flags & - BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK)) - entry->requested_credit++; } else { brcmf_err("no mac entry linked\n"); rc = -ENOENT; } - -fail: if (rc) { - brcmf_txfinalize(fws->drvr, skb, false); fws->stats.rollback_failed++; - } else + brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, + hslot, 0); + } else { fws->stats.rollback_success++; + brcmf_fws_return_credits(fws, fifo, 1); + brcmf_fws_macdesc_return_req_credit(skb); + } } static int brcmf_fws_borrow_credit(struct brcmf_fws_info *fws) { int lender_ac; - if (time_after(fws->borrow_defer_timestamp, jiffies)) + if (time_after(fws->borrow_defer_timestamp, jiffies)) { + fws->fifo_credit_map &= ~(1 << BRCMF_FWS_FIFO_AC_BE); return -ENAVAIL; + } for (lender_ac = 0; lender_ac <= BRCMF_FWS_FIFO_AC_VO; lender_ac++) { if (fws->fifo_credit[lender_ac]) { @@ -1702,10 +1685,12 @@ static int brcmf_fws_borrow_credit(struct brcmf_fws_info *fws) fws->fifo_credit[lender_ac]--; if (fws->fifo_credit[lender_ac] == 0) fws->fifo_credit_map &= ~(1 << lender_ac); - brcmf_dbg(TRACE, "borrow credit from: %d\n", lender_ac); + fws->fifo_credit_map |= (1 << BRCMF_FWS_FIFO_AC_BE); + brcmf_dbg(DATA, "borrow credit from: %d\n", lender_ac); return 0; } } + fws->fifo_credit_map &= ~(1 << BRCMF_FWS_FIFO_AC_BE); return -ENAVAIL; } @@ -1714,33 +1699,6 @@ static int brcmf_fws_consume_credit(struct brcmf_fws_info *fws, int fifo, { struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; int *credit = &fws->fifo_credit[fifo]; - int use_credit = 1; - - brcmf_dbg(TRACE, "enter: ac=%d, credits=%d\n", fifo, *credit); - - if (entry->requested_credit > 0) { - /* - * if the packet was pulled out while destination is in - * closed state but had a non-zero packets requested, - * then this should not count against the FIFO credit. - * That is due to the fact that the firmware will - * most likely hold onto this packet until a suitable - * time later to push it to the appropriate AC FIFO. - */ - entry->requested_credit--; - if (entry->state == BRCMF_FWS_STATE_CLOSE) - use_credit = 0; - } else if (entry->requested_packet > 0) { - entry->requested_packet--; - brcmf_skb_if_flags_set_field(skb, REQUESTED, 1); - if (entry->state == BRCMF_FWS_STATE_CLOSE) - use_credit = 0; - } - brcmf_skb_if_flags_set_field(skb, CREDITCHECK, use_credit); - if (!use_credit) { - brcmf_dbg(TRACE, "exit: no creditcheck set\n"); - return 0; - } if (fifo != BRCMF_FWS_FIFO_AC_BE) fws->borrow_defer_timestamp = jiffies + @@ -1748,17 +1706,22 @@ static int brcmf_fws_consume_credit(struct brcmf_fws_info *fws, int fifo, if (!(*credit)) { /* Try to borrow a credit from other queue */ - if (fifo == BRCMF_FWS_FIFO_AC_BE && - brcmf_fws_borrow_credit(fws) == 0) - return 0; - - brcmf_dbg(TRACE, "exit: ac=%d, credits depleted\n", fifo); - return -ENAVAIL; + if (fifo != BRCMF_FWS_FIFO_AC_BE || + (brcmf_fws_borrow_credit(fws) != 0)) { + brcmf_dbg(DATA, "ac=%d, credits depleted\n", fifo); + return -ENAVAIL; + } + } else { + (*credit)--; + if (!(*credit)) + fws->fifo_credit_map &= ~(1 << fifo); } - (*credit)--; - if (!(*credit)) - fws->fifo_credit_map &= ~(1 << fifo); - brcmf_dbg(TRACE, "exit: ac=%d, credits=%d\n", fifo, *credit); + + brcmf_fws_macdesc_use_req_credit(entry, skb); + + brcmf_dbg(DATA, "ac=%d, credits=%02d:%02d:%02d:%02d\n", fifo, + fws->fifo_credit[0], fws->fifo_credit[1], + fws->fifo_credit[2], fws->fifo_credit[3]); return 0; } @@ -1769,6 +1732,7 @@ static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo, struct brcmf_fws_mac_descriptor *entry; struct brcmf_bus *bus = fws->drvr->bus_if; int rc; + u8 ifidx; entry = skcb->mac; if (IS_ERR(entry)) @@ -1780,21 +1744,27 @@ static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo, goto rollback; } + brcmf_dbg(DATA, "%s flags %X htod %X\n", entry->name, skcb->if_flags, + skcb->htod); rc = brcmf_bus_txdata(bus, skb); - if (rc < 0) + if (rc < 0) { + brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb); goto rollback; + } + entry->transit_count++; + if (entry->suppressed) + entry->suppr_transit_count++; entry->seq[fifo]++; fws->stats.pkt2bus++; - if (brcmf_skbcb(skb)->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK) { - fws->stats.send_pkts[fifo]++; - fws->stats.fifo_credits_sent[fifo]++; - } + fws->stats.send_pkts[fifo]++; + if (brcmf_skb_if_flags_get_field(skb, REQUESTED)) + fws->stats.requested_sent[fifo]++; return rc; rollback: - brcmf_fws_rollback_toq(fws, skb); + brcmf_fws_rollback_toq(fws, skb, fifo); return rc; } @@ -1831,13 +1801,13 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb) brcmf_skb_if_flags_set_field(skb, INDEX, ifp->ifidx); if (!multicast) fifo = brcmf_fws_prio2fifo[skb->priority]; - brcmf_skb_if_flags_set_field(skb, FIFO, fifo); - brcmf_dbg(TRACE, "ea=%pM, multi=%d, fifo=%d\n", eh->h_dest, - multicast, fifo); + brcmf_dbg(DATA, "%s mac %pM multi %d fifo %d\n", skcb->mac->name, + eh->h_dest, multicast, fifo); brcmf_fws_lock(drvr, flags); if (skcb->mac->suppressed || + fws->bus_flow_blocked || brcmf_fws_mac_desc_closed(fws, skcb->mac, fifo) || brcmu_pktq_mlen(&skcb->mac->psq, 3 << (fifo * 2)) || (!multicast && @@ -1846,9 +1816,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb) drvr->fws->fifo_delay_map |= 1 << fifo; brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_DELAYED, fifo, skb); } else { - if (brcmf_fws_commit_skb(fws, fifo, skb)) - if (!multicast) - brcmf_skb_pick_up_credit(fws, fifo, skb); + brcmf_fws_commit_skb(fws, fifo, skb); } brcmf_fws_unlock(drvr, flags); return 0; @@ -1870,16 +1838,16 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp) struct brcmf_fws_info *fws = ifp->drvr->fws; struct brcmf_fws_mac_descriptor *entry; - brcmf_dbg(TRACE, "enter: idx=%d, mac=%pM\n", - ifp->bssidx, ifp->mac_addr); if (!ifp->ndev || !ifp->drvr->fw_signals) return; entry = &fws->desc.iface[ifp->ifidx]; ifp->fws_desc = entry; brcmf_fws_init_mac_descriptor(entry, ifp->mac_addr, ifp->ifidx); + brcmf_fws_macdesc_set_name(fws, entry); brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT, BRCMF_FWS_PSQ_LEN); + brcmf_dbg(TRACE, "added %s\n", entry->name); } void brcmf_fws_del_interface(struct brcmf_if *ifp) @@ -1887,12 +1855,12 @@ void brcmf_fws_del_interface(struct brcmf_if *ifp) struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc; ulong flags; - brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx); if (!entry) return; brcmf_fws_lock(ifp->drvr, flags); ifp->fws_desc = NULL; + brcmf_dbg(TRACE, "deleting %s\n", entry->name); brcmf_fws_clear_mac_descriptor(entry); brcmf_fws_cleanup(ifp->drvr->fws, ifp->ifidx); brcmf_fws_unlock(ifp->drvr, flags); @@ -1904,39 +1872,36 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker) struct sk_buff *skb; ulong flags; int fifo; - int credit; fws = container_of(worker, struct brcmf_fws_info, fws_dequeue_work); - brcmf_dbg(TRACE, "enter: fws=%p\n", fws); brcmf_fws_lock(fws->drvr, flags); - for (fifo = NL80211_NUM_ACS; fifo >= 0; fifo--) { - brcmf_dbg(TRACE, "fifo %d credit %d\n", fifo, - fws->fifo_credit[fifo]); - for (credit = 0; credit < fws->fifo_credit[fifo]; /* nop */) { + for (fifo = NL80211_NUM_ACS; fifo >= 0 && !fws->bus_flow_blocked; + fifo--) { + while (fws->fifo_credit[fifo]) { skb = brcmf_fws_deq(fws, fifo); - if (!skb || brcmf_fws_commit_skb(fws, fifo, skb)) + if (!skb) + break; + fws->fifo_credit[fifo]--; + if (brcmf_fws_commit_skb(fws, fifo, skb)) + break; + if (fws->bus_flow_blocked) break; - if (brcmf_skbcb(skb)->if_flags & - BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK) - credit++; } if ((fifo == BRCMF_FWS_FIFO_AC_BE) && - (credit == fws->fifo_credit[fifo])) { - fws->fifo_credit[fifo] -= credit; + (fws->fifo_credit[fifo] == 0) && + (!fws->bus_flow_blocked)) { while (brcmf_fws_borrow_credit(fws) == 0) { skb = brcmf_fws_deq(fws, fifo); if (!skb) { brcmf_fws_return_credits(fws, fifo, 1); break; } - if (brcmf_fws_commit_skb(fws, fifo, skb)) { - brcmf_fws_return_credits(fws, fifo, 1); + if (brcmf_fws_commit_skb(fws, fifo, skb)) + break; + if (fws->bus_flow_blocked) break; - } } - } else { - fws->fifo_credit[fifo] -= credit; } } brcmf_fws_unlock(fws->drvr, flags); @@ -1994,14 +1959,14 @@ int brcmf_fws_init(struct brcmf_pub *drvr) brcmf_fws_hanger_init(&drvr->fws->hanger); brcmf_fws_init_mac_descriptor(&drvr->fws->desc.other, NULL, 0); + brcmf_fws_macdesc_set_name(drvr->fws, &drvr->fws->desc.other); brcmu_pktq_init(&drvr->fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT, BRCMF_FWS_PSQ_LEN); /* create debugfs file for statistics */ brcmf_debugfs_create_fws_stats(drvr, &drvr->fws->stats); - /* TODO: remove upon feature delivery */ - brcmf_err("%s bdcv2 tlv signaling [%x]\n", + brcmf_dbg(INFO, "%s bdcv2 tlv signaling [%x]\n", drvr->fw_signals ? "enabled" : "disabled", tlv); return 0; @@ -2043,25 +2008,31 @@ bool brcmf_fws_fc_active(struct brcmf_fws_info *fws) if (!fws) return false; - brcmf_dbg(TRACE, "enter: mode=%d\n", fws->fcmode); return fws->fcmode != BRCMF_FWS_FCMODE_NONE; } void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb) { ulong flags; + u32 hslot; - brcmf_fws_lock(fws->drvr, flags); - brcmf_fws_txstatus_process(fws, BRCMF_FWS_TXSTATUS_FW_TOSSED, - brcmf_skb_htod_tag_get_field(skb, HSLOT), 0); - /* the packet never reached firmware so reclaim credit */ - if (fws->fcmode == BRCMF_FWS_FCMODE_EXPLICIT_CREDIT && - brcmf_skbcb(skb)->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK) { - brcmf_fws_return_credits(fws, - brcmf_skb_htod_tag_get_field(skb, - FIFO), - 1); - brcmf_fws_schedule_deq(fws); + if (brcmf_skbcb(skb)->state == BRCMF_FWS_SKBSTATE_TIM) { + brcmu_pkt_buf_free_skb(skb); + return; } + brcmf_fws_lock(fws->drvr, flags); + hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); + brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0); brcmf_fws_unlock(fws->drvr, flags); } + +void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked) +{ + struct brcmf_fws_info *fws = drvr->fws; + + fws->bus_flow_blocked = flow_blocked; + if (!flow_blocked) + brcmf_fws_schedule_deq(fws); + else + fws->stats.bus_flow_block++; +} diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h index fbe483d23752..9fc860910bd8 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h @@ -29,5 +29,6 @@ void brcmf_fws_reset_interface(struct brcmf_if *ifp); void brcmf_fws_add_interface(struct brcmf_if *ifp); void brcmf_fws_del_interface(struct brcmf_if *ifp); void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb); +void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked); #endif /* FWSIGNAL_H_ */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h index 9df1f7a681e0..bc2917112899 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h @@ -87,6 +87,27 @@ TRACE_EVENT(brcmf_hexdump, TP_printk("hexdump [length=%lu]", __entry->len) ); +TRACE_EVENT(brcmf_bdchdr, + TP_PROTO(void *data), + TP_ARGS(data), + TP_STRUCT__entry( + __field(u8, flags) + __field(u8, prio) + __field(u8, flags2) + __field(u32, siglen) + __dynamic_array(u8, signal, *((u8 *)data + 3) * 4) + ), + TP_fast_assign( + __entry->flags = *(u8 *)data; + __entry->prio = *((u8 *)data + 1); + __entry->flags2 = *((u8 *)data + 2); + __entry->siglen = *((u8 *)data + 3) * 4; + memcpy(__get_dynamic_array(signal), + (u8 *)data + 4, __entry->siglen); + ), + TP_printk("bdc: prio=%d siglen=%d", __entry->prio, __entry->siglen) +); + #ifdef CONFIG_BRCM_TRACING #undef TRACE_INCLUDE_PATH diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c index 01aed7ad6bec..322cadc51ded 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c @@ -82,6 +82,7 @@ struct brcmf_usbdev_info { int tx_high_watermark; int tx_freecount; bool tx_flowblock; + spinlock_t tx_flowblock_lock; struct brcmf_usbreq *tx_reqs; struct brcmf_usbreq *rx_reqs; @@ -411,6 +412,7 @@ static void brcmf_usb_tx_complete(struct urb *urb) { struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context; struct brcmf_usbdev_info *devinfo = req->devinfo; + unsigned long flags; brcmf_dbg(USB, "Enter, urb->status=%d, skb=%p\n", urb->status, req->skb); @@ -419,11 +421,13 @@ static void brcmf_usb_tx_complete(struct urb *urb) brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0); req->skb = NULL; brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount); + spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags); if (devinfo->tx_freecount > devinfo->tx_high_watermark && devinfo->tx_flowblock) { brcmf_txflowblock(devinfo->dev, false); devinfo->tx_flowblock = false; } + spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags); } static void brcmf_usb_rx_complete(struct urb *urb) @@ -568,6 +572,7 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb) struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); struct brcmf_usbreq *req; int ret; + unsigned long flags; brcmf_dbg(USB, "Enter, skb=%p\n", skb); if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) { @@ -599,11 +604,13 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb) goto fail; } + spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags); if (devinfo->tx_freecount < devinfo->tx_low_watermark && !devinfo->tx_flowblock) { brcmf_txflowblock(dev, true); devinfo->tx_flowblock = true; } + spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags); return 0; fail: @@ -1164,6 +1171,7 @@ struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo, /* Initialize the spinlocks */ spin_lock_init(&devinfo->qlock); + spin_lock_init(&devinfo->tx_flowblock_lock); INIT_LIST_HEAD(&devinfo->rx_freeq); INIT_LIST_HEAD(&devinfo->rx_postq); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 301e572e8923..71f4db5fde99 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -3982,6 +3982,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, struct brcmf_fil_af_params_le *af_params; bool ack; s32 chan_nr; + u32 freq; brcmf_dbg(TRACE, "Enter\n"); @@ -3994,6 +3995,8 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, return -EPERM; } + vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); + if (ieee80211_is_probe_resp(mgmt->frame_control)) { /* Right now the only reason to get a probe response */ /* is for p2p listen response or for p2p GO from */ @@ -4009,7 +4012,6 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN; ie_len = len - ie_offset; - vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif) vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif; err = brcmf_vif_set_mgmt_ie(vif, @@ -4033,8 +4035,15 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, memcpy(&af_params->bssid[0], &mgmt->bssid[0], ETH_ALEN); /* Add the length exepted for 802.11 header */ action_frame->len = cpu_to_le16(len - DOT11_MGMT_HDR_LEN); - /* Add the channel */ - chan_nr = ieee80211_frequency_to_channel(chan->center_freq); + /* Add the channel. Use the one specified as parameter if any or + * the current one (got from the firmware) otherwise + */ + if (chan) + freq = chan->center_freq; + else + brcmf_fil_cmd_int_get(vif->ifp, BRCMF_C_GET_CHANNEL, + &freq); + chan_nr = ieee80211_frequency_to_channel(freq); af_params->channel = cpu_to_le32(chan_nr); memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN], diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c index 1585cc5bf866..bd982856d385 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c @@ -900,7 +900,7 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb, if (supr_status) { update_rate = false; if (supr_status == TX_STATUS_SUPR_BADCH) { - brcms_err(wlc->hw->d11core, + brcms_dbg_ht(wlc->hw->d11core, "%s: Pkt tx suppressed, illegal channel possibly %d\n", __func__, CHSPEC_CHANNEL( wlc->default_bss->chanspec)); diff --git a/drivers/net/wireless/cw1200/Kconfig b/drivers/net/wireless/cw1200/Kconfig new file mode 100644 index 000000000000..0880742eab17 --- /dev/null +++ b/drivers/net/wireless/cw1200/Kconfig @@ -0,0 +1,30 @@ +config CW1200 + tristate "CW1200 WLAN support" + depends on MAC80211 && CFG80211 + help + This is a driver for the ST-E CW1100 & CW1200 WLAN chipsets. + This option just enables the driver core, see below for + specific bus support. + +if CW1200 + +config CW1200_WLAN_SDIO + tristate "Support SDIO platforms" + depends on CW1200 && MMC + help + Enable support for the CW1200 connected via an SDIO bus. + By default this driver only supports the Sagrad SG901-1091/1098 EVK + and similar designs that utilize a hardware reset circuit. To + support different CW1200 SDIO designs you will need to override + the default platform data by calling cw1200_sdio_set_platform_data() + in your board setup file. + +config CW1200_WLAN_SPI + tristate "Support SPI platforms" + depends on CW1200 && SPI + help + Enables support for the CW1200 connected via a SPI bus. You will + need to add appropriate platform data glue in your board setup + file. + +endif diff --git a/drivers/net/wireless/cw1200/Makefile b/drivers/net/wireless/cw1200/Makefile new file mode 100644 index 000000000000..b086aac6547a --- /dev/null +++ b/drivers/net/wireless/cw1200/Makefile @@ -0,0 +1,21 @@ +cw1200_core-y := \ + fwio.o \ + txrx.o \ + main.o \ + queue.o \ + hwio.o \ + bh.o \ + wsm.o \ + sta.o \ + scan.o \ + debug.o +cw1200_core-$(CONFIG_PM) += pm.o + +# CFLAGS_sta.o += -DDEBUG + +cw1200_wlan_sdio-y := cw1200_sdio.o +cw1200_wlan_spi-y := cw1200_spi.o + +obj-$(CONFIG_CW1200) += cw1200_core.o +obj-$(CONFIG_CW1200_WLAN_SDIO) += cw1200_wlan_sdio.o +obj-$(CONFIG_CW1200_WLAN_SPI) += cw1200_wlan_spi.o diff --git a/drivers/net/wireless/cw1200/bh.c b/drivers/net/wireless/cw1200/bh.c new file mode 100644 index 000000000000..c1ec2a4dd8c0 --- /dev/null +++ b/drivers/net/wireless/cw1200/bh.c @@ -0,0 +1,619 @@ +/* + * Device handling thread implementation for mac80211 ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * Based on: + * ST-Ericsson UMAC CW1200 driver, which is + * Copyright (c) 2010, ST-Ericsson + * Author: Ajitpal Singh <ajitpal.singh@stericsson.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <net/mac80211.h> +#include <linux/kthread.h> +#include <linux/timer.h> + +#include "cw1200.h" +#include "bh.h" +#include "hwio.h" +#include "wsm.h" +#include "hwbus.h" +#include "debug.h" +#include "fwio.h" + +static int cw1200_bh(void *arg); + +#define DOWNLOAD_BLOCK_SIZE_WR (0x1000 - 4) +/* an SPI message cannot be bigger than (2"12-1)*2 bytes + * "*2" to cvt to bytes + */ +#define MAX_SZ_RD_WR_BUFFERS (DOWNLOAD_BLOCK_SIZE_WR*2) +#define PIGGYBACK_CTRL_REG (2) +#define EFFECTIVE_BUF_SIZE (MAX_SZ_RD_WR_BUFFERS - PIGGYBACK_CTRL_REG) + +/* Suspend state privates */ +enum cw1200_bh_pm_state { + CW1200_BH_RESUMED = 0, + CW1200_BH_SUSPEND, + CW1200_BH_SUSPENDED, + CW1200_BH_RESUME, +}; + +typedef int (*cw1200_wsm_handler)(struct cw1200_common *priv, + u8 *data, size_t size); + +static void cw1200_bh_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, bh_work); + cw1200_bh(priv); +} + +int cw1200_register_bh(struct cw1200_common *priv) +{ + int err = 0; + /* Realtime workqueue */ + priv->bh_workqueue = alloc_workqueue("cw1200_bh", + WQ_MEM_RECLAIM | WQ_HIGHPRI + | WQ_CPU_INTENSIVE, 1); + + if (!priv->bh_workqueue) + return -ENOMEM; + + INIT_WORK(&priv->bh_work, cw1200_bh_work); + + pr_debug("[BH] register.\n"); + + atomic_set(&priv->bh_rx, 0); + atomic_set(&priv->bh_tx, 0); + atomic_set(&priv->bh_term, 0); + atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED); + priv->bh_error = 0; + priv->hw_bufs_used = 0; + priv->buf_id_tx = 0; + priv->buf_id_rx = 0; + init_waitqueue_head(&priv->bh_wq); + init_waitqueue_head(&priv->bh_evt_wq); + + err = !queue_work(priv->bh_workqueue, &priv->bh_work); + WARN_ON(err); + return err; +} + +void cw1200_unregister_bh(struct cw1200_common *priv) +{ + atomic_add(1, &priv->bh_term); + wake_up(&priv->bh_wq); + + flush_workqueue(priv->bh_workqueue); + + destroy_workqueue(priv->bh_workqueue); + priv->bh_workqueue = NULL; + + pr_debug("[BH] unregistered.\n"); +} + +void cw1200_irq_handler(struct cw1200_common *priv) +{ + pr_debug("[BH] irq.\n"); + + /* Disable Interrupts! */ + /* NOTE: hwbus_ops->lock already held */ + __cw1200_irq_enable(priv, 0); + + if (/* WARN_ON */(priv->bh_error)) + return; + + if (atomic_add_return(1, &priv->bh_rx) == 1) + wake_up(&priv->bh_wq); +} +EXPORT_SYMBOL_GPL(cw1200_irq_handler); + +void cw1200_bh_wakeup(struct cw1200_common *priv) +{ + pr_debug("[BH] wakeup.\n"); + if (priv->bh_error) { + pr_err("[BH] wakeup failed (BH error)\n"); + return; + } + + if (atomic_add_return(1, &priv->bh_tx) == 1) + wake_up(&priv->bh_wq); +} + +int cw1200_bh_suspend(struct cw1200_common *priv) +{ + pr_debug("[BH] suspend.\n"); + if (priv->bh_error) { + wiphy_warn(priv->hw->wiphy, "BH error -- can't suspend\n"); + return -EINVAL; + } + + atomic_set(&priv->bh_suspend, CW1200_BH_SUSPEND); + wake_up(&priv->bh_wq); + return wait_event_timeout(priv->bh_evt_wq, priv->bh_error || + (CW1200_BH_SUSPENDED == atomic_read(&priv->bh_suspend)), + 1 * HZ) ? 0 : -ETIMEDOUT; +} + +int cw1200_bh_resume(struct cw1200_common *priv) +{ + pr_debug("[BH] resume.\n"); + if (priv->bh_error) { + wiphy_warn(priv->hw->wiphy, "BH error -- can't resume\n"); + return -EINVAL; + } + + atomic_set(&priv->bh_suspend, CW1200_BH_RESUME); + wake_up(&priv->bh_wq); + return wait_event_timeout(priv->bh_evt_wq, priv->bh_error || + (CW1200_BH_RESUMED == atomic_read(&priv->bh_suspend)), + 1 * HZ) ? 0 : -ETIMEDOUT; +} + +static inline void wsm_alloc_tx_buffer(struct cw1200_common *priv) +{ + ++priv->hw_bufs_used; +} + +int wsm_release_tx_buffer(struct cw1200_common *priv, int count) +{ + int ret = 0; + int hw_bufs_used = priv->hw_bufs_used; + + priv->hw_bufs_used -= count; + if (WARN_ON(priv->hw_bufs_used < 0)) + ret = -1; + else if (hw_bufs_used >= priv->wsm_caps.input_buffers) + ret = 1; + if (!priv->hw_bufs_used) + wake_up(&priv->bh_evt_wq); + return ret; +} + +static int cw1200_bh_read_ctrl_reg(struct cw1200_common *priv, + u16 *ctrl_reg) +{ + int ret; + + ret = cw1200_reg_read_16(priv, + ST90TDS_CONTROL_REG_ID, ctrl_reg); + if (ret) { + ret = cw1200_reg_read_16(priv, + ST90TDS_CONTROL_REG_ID, ctrl_reg); + if (ret) + pr_err("[BH] Failed to read control register.\n"); + } + + return ret; +} + +static int cw1200_device_wakeup(struct cw1200_common *priv) +{ + u16 ctrl_reg; + int ret; + + pr_debug("[BH] Device wakeup.\n"); + + /* First, set the dpll register */ + ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID, + cw1200_dpll_from_clk(priv->hw_refclk)); + if (WARN_ON(ret)) + return ret; + + /* To force the device to be always-on, the host sets WLAN_UP to 1 */ + ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, + ST90TDS_CONT_WUP_BIT); + if (WARN_ON(ret)) + return ret; + + ret = cw1200_bh_read_ctrl_reg(priv, &ctrl_reg); + if (WARN_ON(ret)) + return ret; + + /* If the device returns WLAN_RDY as 1, the device is active and will + * remain active. + */ + if (ctrl_reg & ST90TDS_CONT_RDY_BIT) { + pr_debug("[BH] Device awake.\n"); + return 1; + } + + return 0; +} + +/* Must be called from BH thraed. */ +void cw1200_enable_powersave(struct cw1200_common *priv, + bool enable) +{ + pr_debug("[BH] Powerave is %s.\n", + enable ? "enabled" : "disabled"); + priv->powersave_enabled = enable; +} + +static int cw1200_bh_rx_helper(struct cw1200_common *priv, + uint16_t *ctrl_reg, + int *tx) +{ + size_t read_len = 0; + struct sk_buff *skb_rx = NULL; + struct wsm_hdr *wsm; + size_t wsm_len; + u16 wsm_id; + u8 wsm_seq; + int rx_resync = 1; + + size_t alloc_len; + u8 *data; + + read_len = (*ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) * 2; + if (!read_len) + return 0; /* No more work */ + + if (WARN_ON((read_len < sizeof(struct wsm_hdr)) || + (read_len > EFFECTIVE_BUF_SIZE))) { + pr_debug("Invalid read len: %zu (%04x)", + read_len, *ctrl_reg); + goto err; + } + + /* Add SIZE of PIGGYBACK reg (CONTROL Reg) + * to the NEXT Message length + 2 Bytes for SKB + */ + read_len = read_len + 2; + + alloc_len = priv->hwbus_ops->align_size( + priv->hwbus_priv, read_len); + + /* Check if not exceeding CW1200 capabilities */ + if (WARN_ON_ONCE(alloc_len > EFFECTIVE_BUF_SIZE)) { + pr_debug("Read aligned len: %zu\n", + alloc_len); + } + + skb_rx = dev_alloc_skb(alloc_len); + if (WARN_ON(!skb_rx)) + goto err; + + skb_trim(skb_rx, 0); + skb_put(skb_rx, read_len); + data = skb_rx->data; + if (WARN_ON(!data)) + goto err; + + if (WARN_ON(cw1200_data_read(priv, data, alloc_len))) { + pr_err("rx blew up, len %zu\n", alloc_len); + goto err; + } + + /* Piggyback */ + *ctrl_reg = __le16_to_cpu( + ((__le16 *)data)[alloc_len / 2 - 1]); + + wsm = (struct wsm_hdr *)data; + wsm_len = __le16_to_cpu(wsm->len); + if (WARN_ON(wsm_len > read_len)) + goto err; + + if (priv->wsm_enable_wsm_dumps) + print_hex_dump_bytes("<-- ", + DUMP_PREFIX_NONE, + data, wsm_len); + + wsm_id = __le16_to_cpu(wsm->id) & 0xFFF; + wsm_seq = (__le16_to_cpu(wsm->id) >> 13) & 7; + + skb_trim(skb_rx, wsm_len); + + if (wsm_id == 0x0800) { + wsm_handle_exception(priv, + &data[sizeof(*wsm)], + wsm_len - sizeof(*wsm)); + goto err; + } else if (!rx_resync) { + if (WARN_ON(wsm_seq != priv->wsm_rx_seq)) + goto err; + } + priv->wsm_rx_seq = (wsm_seq + 1) & 7; + rx_resync = 0; + + if (wsm_id & 0x0400) { + int rc = wsm_release_tx_buffer(priv, 1); + if (WARN_ON(rc < 0)) + return rc; + else if (rc > 0) + *tx = 1; + } + + /* cw1200_wsm_rx takes care on SKB livetime */ + if (WARN_ON(wsm_handle_rx(priv, wsm_id, wsm, &skb_rx))) + goto err; + + if (skb_rx) { + dev_kfree_skb(skb_rx); + skb_rx = NULL; + } + + return 0; + +err: + if (skb_rx) { + dev_kfree_skb(skb_rx); + skb_rx = NULL; + } + return -1; +} + +static int cw1200_bh_tx_helper(struct cw1200_common *priv, + int *pending_tx, + int *tx_burst) +{ + size_t tx_len; + u8 *data; + int ret; + struct wsm_hdr *wsm; + + if (priv->device_can_sleep) { + ret = cw1200_device_wakeup(priv); + if (WARN_ON(ret < 0)) { /* Error in wakeup */ + *pending_tx = 1; + return 0; + } else if (ret) { /* Woke up */ + priv->device_can_sleep = false; + } else { /* Did not awake */ + *pending_tx = 1; + return 0; + } + } + + wsm_alloc_tx_buffer(priv); + ret = wsm_get_tx(priv, &data, &tx_len, tx_burst); + if (ret <= 0) { + wsm_release_tx_buffer(priv, 1); + if (WARN_ON(ret < 0)) + return ret; /* Error */ + return 0; /* No work */ + } + + wsm = (struct wsm_hdr *)data; + BUG_ON(tx_len < sizeof(*wsm)); + BUG_ON(__le16_to_cpu(wsm->len) != tx_len); + + atomic_add(1, &priv->bh_tx); + + tx_len = priv->hwbus_ops->align_size( + priv->hwbus_priv, tx_len); + + /* Check if not exceeding CW1200 capabilities */ + if (WARN_ON_ONCE(tx_len > EFFECTIVE_BUF_SIZE)) + pr_debug("Write aligned len: %zu\n", tx_len); + + wsm->id &= __cpu_to_le16(0xffff ^ WSM_TX_SEQ(WSM_TX_SEQ_MAX)); + wsm->id |= __cpu_to_le16(WSM_TX_SEQ(priv->wsm_tx_seq)); + + if (WARN_ON(cw1200_data_write(priv, data, tx_len))) { + pr_err("tx blew up, len %zu\n", tx_len); + wsm_release_tx_buffer(priv, 1); + return -1; /* Error */ + } + + if (priv->wsm_enable_wsm_dumps) + print_hex_dump_bytes("--> ", + DUMP_PREFIX_NONE, + data, + __le16_to_cpu(wsm->len)); + + wsm_txed(priv, data); + priv->wsm_tx_seq = (priv->wsm_tx_seq + 1) & WSM_TX_SEQ_MAX; + + if (*tx_burst > 1) { + cw1200_debug_tx_burst(priv); + return 1; /* Work remains */ + } + + return 0; +} + +static int cw1200_bh(void *arg) +{ + struct cw1200_common *priv = arg; + int rx, tx, term, suspend; + u16 ctrl_reg = 0; + int tx_allowed; + int pending_tx = 0; + int tx_burst; + long status; + u32 dummy; + int ret; + + for (;;) { + if (!priv->hw_bufs_used && + priv->powersave_enabled && + !priv->device_can_sleep && + !atomic_read(&priv->recent_scan)) { + status = 1 * HZ; + pr_debug("[BH] Device wakedown. No data.\n"); + cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, 0); + priv->device_can_sleep = true; + } else if (priv->hw_bufs_used) { + /* Interrupt loss detection */ + status = 1 * HZ; + } else { + status = MAX_SCHEDULE_TIMEOUT; + } + + /* Dummy Read for SDIO retry mechanism*/ + if ((priv->hw_type != -1) && + (atomic_read(&priv->bh_rx) == 0) && + (atomic_read(&priv->bh_tx) == 0)) + cw1200_reg_read(priv, ST90TDS_CONFIG_REG_ID, + &dummy, sizeof(dummy)); + + pr_debug("[BH] waiting ...\n"); + status = wait_event_interruptible_timeout(priv->bh_wq, ({ + rx = atomic_xchg(&priv->bh_rx, 0); + tx = atomic_xchg(&priv->bh_tx, 0); + term = atomic_xchg(&priv->bh_term, 0); + suspend = pending_tx ? + 0 : atomic_read(&priv->bh_suspend); + (rx || tx || term || suspend || priv->bh_error); + }), status); + + pr_debug("[BH] - rx: %d, tx: %d, term: %d, suspend: %d, status: %ld\n", + rx, tx, term, suspend, status); + + /* Did an error occur? */ + if ((status < 0 && status != -ERESTARTSYS) || + term || priv->bh_error) { + break; + } + if (!status) { /* wait_event timed out */ + unsigned long timestamp = jiffies; + long timeout; + int pending = 0; + int i; + + /* Check to see if we have any outstanding frames */ + if (priv->hw_bufs_used && (!rx || !tx)) { + wiphy_warn(priv->hw->wiphy, + "Missed interrupt? (%d frames outstanding)\n", + priv->hw_bufs_used); + rx = 1; + + /* Get a timestamp of "oldest" frame */ + for (i = 0; i < 4; ++i) + pending += cw1200_queue_get_xmit_timestamp( + &priv->tx_queue[i], + ×tamp, + priv->pending_frame_id); + + /* Check if frame transmission is timed out. + * Add an extra second with respect to possible + * interrupt loss. + */ + timeout = timestamp + + WSM_CMD_LAST_CHANCE_TIMEOUT + + 1 * HZ - + jiffies; + + /* And terminate BH thread if the frame is "stuck" */ + if (pending && timeout < 0) { + wiphy_warn(priv->hw->wiphy, + "Timeout waiting for TX confirm (%d/%d pending, %ld vs %lu).\n", + priv->hw_bufs_used, pending, + timestamp, jiffies); + break; + } + } else if (!priv->device_can_sleep && + !atomic_read(&priv->recent_scan)) { + pr_debug("[BH] Device wakedown. Timeout.\n"); + cw1200_reg_write_16(priv, + ST90TDS_CONTROL_REG_ID, 0); + priv->device_can_sleep = true; + } + goto done; + } else if (suspend) { + pr_debug("[BH] Device suspend.\n"); + if (priv->powersave_enabled) { + pr_debug("[BH] Device wakedown. Suspend.\n"); + cw1200_reg_write_16(priv, + ST90TDS_CONTROL_REG_ID, 0); + priv->device_can_sleep = true; + } + + atomic_set(&priv->bh_suspend, CW1200_BH_SUSPENDED); + wake_up(&priv->bh_evt_wq); + status = wait_event_interruptible(priv->bh_wq, + CW1200_BH_RESUME == atomic_read(&priv->bh_suspend)); + if (status < 0) { + wiphy_err(priv->hw->wiphy, + "Failed to wait for resume: %ld.\n", + status); + break; + } + pr_debug("[BH] Device resume.\n"); + atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED); + wake_up(&priv->bh_evt_wq); + atomic_add(1, &priv->bh_rx); + goto done; + } + + rx: + tx += pending_tx; + pending_tx = 0; + + if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg)) + break; + + /* Don't bother trying to rx unless we have data to read */ + if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) { + ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx); + if (ret < 0) + break; + /* Double up here if there's more data.. */ + if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) { + ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx); + if (ret < 0) + break; + } + } + + tx: + if (tx) { + tx = 0; + + BUG_ON(priv->hw_bufs_used > priv->wsm_caps.input_buffers); + tx_burst = priv->wsm_caps.input_buffers - priv->hw_bufs_used; + tx_allowed = tx_burst > 0; + + if (!tx_allowed) { + /* Buffers full. Ensure we process tx + * after we handle rx.. + */ + pending_tx = tx; + goto done_rx; + } + ret = cw1200_bh_tx_helper(priv, &pending_tx, &tx_burst); + if (ret < 0) + break; + if (ret > 0) /* More to transmit */ + tx = ret; + + /* Re-read ctrl reg */ + if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg)) + break; + } + + done_rx: + if (priv->bh_error) + break; + if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) + goto rx; + if (tx) + goto tx; + + done: + /* Re-enable device interrupts */ + priv->hwbus_ops->lock(priv->hwbus_priv); + __cw1200_irq_enable(priv, 1); + priv->hwbus_ops->unlock(priv->hwbus_priv); + } + + /* Explicitly disable device interrupts */ + priv->hwbus_ops->lock(priv->hwbus_priv); + __cw1200_irq_enable(priv, 0); + priv->hwbus_ops->unlock(priv->hwbus_priv); + + if (!term) { + pr_err("[BH] Fatal error, exiting.\n"); + priv->bh_error = 1; + /* TODO: schedule_work(recovery) */ + } + return 0; +} diff --git a/drivers/net/wireless/cw1200/bh.h b/drivers/net/wireless/cw1200/bh.h new file mode 100644 index 000000000000..af6a4853728f --- /dev/null +++ b/drivers/net/wireless/cw1200/bh.h @@ -0,0 +1,28 @@ +/* + * Device handling thread interface for mac80211 ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CW1200_BH_H +#define CW1200_BH_H + +/* extern */ struct cw1200_common; + +int cw1200_register_bh(struct cw1200_common *priv); +void cw1200_unregister_bh(struct cw1200_common *priv); +void cw1200_irq_handler(struct cw1200_common *priv); +void cw1200_bh_wakeup(struct cw1200_common *priv); +int cw1200_bh_suspend(struct cw1200_common *priv); +int cw1200_bh_resume(struct cw1200_common *priv); +/* Must be called from BH thread. */ +void cw1200_enable_powersave(struct cw1200_common *priv, + bool enable); +int wsm_release_tx_buffer(struct cw1200_common *priv, int count); + +#endif /* CW1200_BH_H */ diff --git a/drivers/net/wireless/cw1200/cw1200.h b/drivers/net/wireless/cw1200/cw1200.h new file mode 100644 index 000000000000..243e96353d13 --- /dev/null +++ b/drivers/net/wireless/cw1200/cw1200.h @@ -0,0 +1,323 @@ +/* + * Common private data for ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * Based on the mac80211 Prism54 code, which is + * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> + * + * Based on the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CW1200_H +#define CW1200_H + +#include <linux/wait.h> +#include <linux/mutex.h> +#include <linux/workqueue.h> +#include <net/mac80211.h> + +#include "queue.h" +#include "wsm.h" +#include "scan.h" +#include "txrx.h" +#include "pm.h" + +/* Forward declarations */ +struct hwbus_ops; +struct task_struct; +struct cw1200_debug_priv; +struct firmware; + +#define CW1200_MAX_CTRL_FRAME_LEN (0x1000) + +#define CW1200_MAX_STA_IN_AP_MODE (5) +#define CW1200_LINK_ID_AFTER_DTIM (CW1200_MAX_STA_IN_AP_MODE + 1) +#define CW1200_LINK_ID_UAPSD (CW1200_MAX_STA_IN_AP_MODE + 2) +#define CW1200_LINK_ID_MAX (CW1200_MAX_STA_IN_AP_MODE + 3) +#define CW1200_MAX_REQUEUE_ATTEMPTS (5) + +#define CW1200_MAX_TID (8) + +#define CW1200_BLOCK_ACK_CNT (30) +#define CW1200_BLOCK_ACK_THLD (800) +#define CW1200_BLOCK_ACK_HIST (3) +#define CW1200_BLOCK_ACK_INTERVAL (1 * HZ / CW1200_BLOCK_ACK_HIST) + +#define CW1200_JOIN_TIMEOUT (1 * HZ) +#define CW1200_AUTH_TIMEOUT (5 * HZ) + +struct cw1200_ht_info { + struct ieee80211_sta_ht_cap ht_cap; + enum nl80211_channel_type channel_type; + u16 operation_mode; +}; + +/* Please keep order */ +enum cw1200_join_status { + CW1200_JOIN_STATUS_PASSIVE = 0, + CW1200_JOIN_STATUS_MONITOR, + CW1200_JOIN_STATUS_JOINING, + CW1200_JOIN_STATUS_PRE_STA, + CW1200_JOIN_STATUS_STA, + CW1200_JOIN_STATUS_IBSS, + CW1200_JOIN_STATUS_AP, +}; + +enum cw1200_link_status { + CW1200_LINK_OFF, + CW1200_LINK_RESERVE, + CW1200_LINK_SOFT, + CW1200_LINK_HARD, + CW1200_LINK_RESET, + CW1200_LINK_RESET_REMAP, +}; + +extern int cw1200_power_mode; +extern const char * const cw1200_fw_types[]; + +struct cw1200_link_entry { + unsigned long timestamp; + enum cw1200_link_status status; + enum cw1200_link_status prev_status; + u8 mac[ETH_ALEN]; + u8 buffered[CW1200_MAX_TID]; + struct sk_buff_head rx_queue; +}; + +struct cw1200_common { + /* interfaces to the rest of the stack */ + struct ieee80211_hw *hw; + struct ieee80211_vif *vif; + struct device *pdev; + + /* Statistics */ + struct ieee80211_low_level_stats stats; + + /* Our macaddr */ + u8 mac_addr[ETH_ALEN]; + + /* Hardware interface */ + const struct hwbus_ops *hwbus_ops; + struct hwbus_priv *hwbus_priv; + + /* Hardware information */ + enum { + HIF_9000_SILICON_VERSATILE = 0, + HIF_8601_VERSATILE, + HIF_8601_SILICON, + } hw_type; + enum { + CW1200_HW_REV_CUT10 = 10, + CW1200_HW_REV_CUT11 = 11, + CW1200_HW_REV_CUT20 = 20, + CW1200_HW_REV_CUT22 = 22, + CW1X60_HW_REV = 40, + } hw_revision; + int hw_refclk; + bool hw_have_5ghz; + const struct firmware *sdd; + char *sdd_path; + + struct cw1200_debug_priv *debug; + + struct workqueue_struct *workqueue; + struct mutex conf_mutex; + + struct cw1200_queue tx_queue[4]; + struct cw1200_queue_stats tx_queue_stats; + int tx_burst_idx; + + /* firmware/hardware info */ + unsigned int tx_hdr_len; + + /* Radio data */ + int output_power; + + /* BBP/MAC state */ + struct ieee80211_rate *rates; + struct ieee80211_rate *mcs_rates; + struct ieee80211_channel *channel; + struct wsm_edca_params edca; + struct wsm_tx_queue_params tx_queue_params; + struct wsm_mib_association_mode association_mode; + struct wsm_set_bss_params bss_params; + struct cw1200_ht_info ht_info; + struct wsm_set_pm powersave_mode; + struct wsm_set_pm firmware_ps_mode; + int cqm_rssi_thold; + unsigned cqm_rssi_hyst; + bool cqm_use_rssi; + int cqm_beacon_loss_count; + int channel_switch_in_progress; + wait_queue_head_t channel_switch_done; + u8 long_frame_max_tx_count; + u8 short_frame_max_tx_count; + int mode; + bool enable_beacon; + int beacon_int; + bool listening; + struct wsm_rx_filter rx_filter; + struct wsm_mib_multicast_filter multicast_filter; + bool has_multicast_subscription; + bool disable_beacon_filter; + struct work_struct update_filtering_work; + struct work_struct set_beacon_wakeup_period_work; + + u8 ba_rx_tid_mask; + u8 ba_tx_tid_mask; + + struct cw1200_pm_state pm_state; + + struct wsm_p2p_ps_modeinfo p2p_ps_modeinfo; + struct wsm_uapsd_info uapsd_info; + bool setbssparams_done; + bool bt_present; + u8 conf_listen_interval; + u32 listen_interval; + u32 erp_info; + u32 rts_threshold; + + /* BH */ + atomic_t bh_rx; + atomic_t bh_tx; + atomic_t bh_term; + atomic_t bh_suspend; + + struct workqueue_struct *bh_workqueue; + struct work_struct bh_work; + + int bh_error; + wait_queue_head_t bh_wq; + wait_queue_head_t bh_evt_wq; + u8 buf_id_tx; + u8 buf_id_rx; + u8 wsm_rx_seq; + u8 wsm_tx_seq; + int hw_bufs_used; + bool powersave_enabled; + bool device_can_sleep; + + /* Scan status */ + struct cw1200_scan scan; + /* Keep cw1200 awake (WUP = 1) 1 second after each scan to avoid + * FW issue with sleeping/waking up. + */ + atomic_t recent_scan; + struct delayed_work clear_recent_scan_work; + + /* WSM */ + struct wsm_startup_ind wsm_caps; + struct mutex wsm_cmd_mux; + struct wsm_buf wsm_cmd_buf; + struct wsm_cmd wsm_cmd; + wait_queue_head_t wsm_cmd_wq; + wait_queue_head_t wsm_startup_done; + int firmware_ready; + atomic_t tx_lock; + + /* WSM debug */ + int wsm_enable_wsm_dumps; + + /* WSM Join */ + enum cw1200_join_status join_status; + u32 pending_frame_id; + bool join_pending; + struct delayed_work join_timeout; + struct work_struct unjoin_work; + struct work_struct join_complete_work; + int join_complete_status; + int join_dtim_period; + bool delayed_unjoin; + + /* TX/RX and security */ + s8 wep_default_key_id; + struct work_struct wep_key_work; + u32 key_map; + struct wsm_add_key keys[WSM_KEY_MAX_INDEX + 1]; + + /* AP powersave */ + u32 link_id_map; + struct cw1200_link_entry link_id_db[CW1200_MAX_STA_IN_AP_MODE]; + struct work_struct link_id_work; + struct delayed_work link_id_gc_work; + u32 sta_asleep_mask; + u32 pspoll_mask; + bool aid0_bit_set; + spinlock_t ps_state_lock; /* Protect power save state */ + bool buffered_multicasts; + bool tx_multicast; + struct work_struct set_tim_work; + struct work_struct set_cts_work; + struct work_struct multicast_start_work; + struct work_struct multicast_stop_work; + struct timer_list mcast_timeout; + + /* WSM events and CQM implementation */ + spinlock_t event_queue_lock; /* Protect event queue */ + struct list_head event_queue; + struct work_struct event_handler; + + struct delayed_work bss_loss_work; + spinlock_t bss_loss_lock; /* Protect BSS loss state */ + int bss_loss_state; + int bss_loss_confirm_id; + int delayed_link_loss; + struct work_struct bss_params_work; + + /* TX rate policy cache */ + struct tx_policy_cache tx_policy_cache; + struct work_struct tx_policy_upload_work; + + /* legacy PS mode switch in suspend */ + int ps_mode_switch_in_progress; + wait_queue_head_t ps_mode_switch_done; + + /* Workaround for WFD testcase 6.1.10*/ + struct work_struct linkid_reset_work; + u8 action_frame_sa[ETH_ALEN]; + u8 action_linkid; +}; + +struct cw1200_sta_priv { + int link_id; +}; + +/* interfaces for the drivers */ +int cw1200_core_probe(const struct hwbus_ops *hwbus_ops, + struct hwbus_priv *hwbus, + struct device *pdev, + struct cw1200_common **pself, + int ref_clk, const u8 *macaddr, + const char *sdd_path, bool have_5ghz); +void cw1200_core_release(struct cw1200_common *self); + +#define FWLOAD_BLOCK_SIZE (1024) + +static inline int cw1200_is_ht(const struct cw1200_ht_info *ht_info) +{ + return ht_info->channel_type != NL80211_CHAN_NO_HT; +} + +static inline int cw1200_ht_greenfield(const struct cw1200_ht_info *ht_info) +{ + return cw1200_is_ht(ht_info) && + (ht_info->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && + !(ht_info->operation_mode & + IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); +} + +static inline int cw1200_ht_ampdu_density(const struct cw1200_ht_info *ht_info) +{ + if (!cw1200_is_ht(ht_info)) + return 0; + return ht_info->ht_cap.ampdu_density; +} + +#endif /* CW1200_H */ diff --git a/drivers/net/wireless/cw1200/cw1200_sdio.c b/drivers/net/wireless/cw1200/cw1200_sdio.c new file mode 100644 index 000000000000..ebdcdf44f155 --- /dev/null +++ b/drivers/net/wireless/cw1200/cw1200_sdio.c @@ -0,0 +1,425 @@ +/* + * Mac80211 SDIO driver for ST-Ericsson CW1200 device + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/gpio.h> +#include <linux/delay.h> +#include <linux/mmc/host.h> +#include <linux/mmc/sdio_func.h> +#include <linux/mmc/card.h> +#include <linux/mmc/sdio.h> +#include <net/mac80211.h> + +#include "cw1200.h" +#include "hwbus.h" +#include <linux/platform_data/net-cw1200.h> +#include "hwio.h" + +MODULE_AUTHOR("Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>"); +MODULE_DESCRIPTION("mac80211 ST-Ericsson CW1200 SDIO driver"); +MODULE_LICENSE("GPL"); + +#define SDIO_BLOCK_SIZE (512) + +/* Default platform data for Sagrad modules */ +static struct cw1200_platform_data_sdio sagrad_109x_evk_platform_data = { + .ref_clk = 38400, + .have_5ghz = false, + .sdd_file = "sdd_sagrad_1091_1098.bin", +}; + +/* Allow platform data to be overridden */ +static struct cw1200_platform_data_sdio *global_plat_data = &sagrad_109x_evk_platform_data; + +void __init cw1200_sdio_set_platform_data(struct cw1200_platform_data_sdio *pdata) +{ + global_plat_data = pdata; +} + +struct hwbus_priv { + struct sdio_func *func; + struct cw1200_common *core; + const struct cw1200_platform_data_sdio *pdata; +}; + +#ifndef SDIO_VENDOR_ID_STE +#define SDIO_VENDOR_ID_STE 0x0020 +#endif + +#ifndef SDIO_DEVICE_ID_STE_CW1200 +#define SDIO_DEVICE_ID_STE_CW1200 0x2280 +#endif + +static const struct sdio_device_id cw1200_sdio_ids[] = { + { SDIO_DEVICE(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200) }, + { /* end: all zeroes */ }, +}; + +/* hwbus_ops implemetation */ + +static int cw1200_sdio_memcpy_fromio(struct hwbus_priv *self, + unsigned int addr, + void *dst, int count) +{ + return sdio_memcpy_fromio(self->func, dst, addr, count); +} + +static int cw1200_sdio_memcpy_toio(struct hwbus_priv *self, + unsigned int addr, + const void *src, int count) +{ + return sdio_memcpy_toio(self->func, addr, (void *)src, count); +} + +static void cw1200_sdio_lock(struct hwbus_priv *self) +{ + sdio_claim_host(self->func); +} + +static void cw1200_sdio_unlock(struct hwbus_priv *self) +{ + sdio_release_host(self->func); +} + +static void cw1200_sdio_irq_handler(struct sdio_func *func) +{ + struct hwbus_priv *self = sdio_get_drvdata(func); + + /* note: sdio_host already claimed here. */ + if (self->core) + cw1200_irq_handler(self->core); +} + +static irqreturn_t cw1200_gpio_hardirq(int irq, void *dev_id) +{ + return IRQ_WAKE_THREAD; +} + +static irqreturn_t cw1200_gpio_irq(int irq, void *dev_id) +{ + struct hwbus_priv *self = dev_id; + + if (self->core) { + sdio_claim_host(self->func); + cw1200_irq_handler(self->core); + sdio_release_host(self->func); + return IRQ_HANDLED; + } else { + return IRQ_NONE; + } +} + +static int cw1200_request_irq(struct hwbus_priv *self) +{ + int ret; + u8 cccr; + + cccr = sdio_f0_readb(self->func, SDIO_CCCR_IENx, &ret); + if (WARN_ON(ret)) + goto err; + + /* Master interrupt enable ... */ + cccr |= BIT(0); + + /* ... for our function */ + cccr |= BIT(self->func->num); + + sdio_f0_writeb(self->func, cccr, SDIO_CCCR_IENx, &ret); + if (WARN_ON(ret)) + goto err; + + ret = enable_irq_wake(self->pdata->irq); + if (WARN_ON(ret)) + goto err; + + /* Request the IRQ */ + ret = request_threaded_irq(self->pdata->irq, cw1200_gpio_hardirq, + cw1200_gpio_irq, + IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + "cw1200_wlan_irq", self); + if (WARN_ON(ret)) + goto err; + + return 0; + +err: + return ret; +} + +static int cw1200_sdio_irq_subscribe(struct hwbus_priv *self) +{ + int ret = 0; + + pr_debug("SW IRQ subscribe\n"); + sdio_claim_host(self->func); + if (self->pdata->irq) + ret = cw1200_request_irq(self); + else + ret = sdio_claim_irq(self->func, cw1200_sdio_irq_handler); + + sdio_release_host(self->func); + return ret; +} + +static int cw1200_sdio_irq_unsubscribe(struct hwbus_priv *self) +{ + int ret = 0; + + pr_debug("SW IRQ unsubscribe\n"); + + if (self->pdata->irq) { + disable_irq_wake(self->pdata->irq); + free_irq(self->pdata->irq, self); + } else { + sdio_claim_host(self->func); + ret = sdio_release_irq(self->func); + sdio_release_host(self->func); + } + return ret; +} + +static int cw1200_sdio_off(const struct cw1200_platform_data_sdio *pdata) +{ + if (pdata->reset) { + gpio_set_value(pdata->reset, 0); + msleep(30); /* Min is 2 * CLK32K cycles */ + gpio_free(pdata->reset); + } + + if (pdata->power_ctrl) + pdata->power_ctrl(pdata, false); + if (pdata->clk_ctrl) + pdata->clk_ctrl(pdata, false); + + return 0; +} + +static int cw1200_sdio_on(const struct cw1200_platform_data_sdio *pdata) +{ + /* Ensure I/Os are pulled low */ + if (pdata->reset) { + gpio_request(pdata->reset, "cw1200_wlan_reset"); + gpio_direction_output(pdata->reset, 0); + } + if (pdata->powerup) { + gpio_request(pdata->powerup, "cw1200_wlan_powerup"); + gpio_direction_output(pdata->powerup, 0); + } + if (pdata->reset || pdata->powerup) + msleep(10); /* Settle time? */ + + /* Enable 3v3 and 1v8 to hardware */ + if (pdata->power_ctrl) { + if (pdata->power_ctrl(pdata, true)) { + pr_err("power_ctrl() failed!\n"); + return -1; + } + } + + /* Enable CLK32K */ + if (pdata->clk_ctrl) { + if (pdata->clk_ctrl(pdata, true)) { + pr_err("clk_ctrl() failed!\n"); + return -1; + } + msleep(10); /* Delay until clock is stable for 2 cycles */ + } + + /* Enable POWERUP signal */ + if (pdata->powerup) { + gpio_set_value(pdata->powerup, 1); + msleep(250); /* or more..? */ + } + /* Enable RSTn signal */ + if (pdata->reset) { + gpio_set_value(pdata->reset, 1); + msleep(50); /* Or more..? */ + } + return 0; +} + +static size_t cw1200_sdio_align_size(struct hwbus_priv *self, size_t size) +{ + if (self->pdata->no_nptb) + size = round_up(size, SDIO_BLOCK_SIZE); + else + size = sdio_align_size(self->func, size); + + return size; +} + +static int cw1200_sdio_pm(struct hwbus_priv *self, bool suspend) +{ + int ret = 0; + + if (self->pdata->irq) + ret = irq_set_irq_wake(self->pdata->irq, suspend); + return ret; +} + +static struct hwbus_ops cw1200_sdio_hwbus_ops = { + .hwbus_memcpy_fromio = cw1200_sdio_memcpy_fromio, + .hwbus_memcpy_toio = cw1200_sdio_memcpy_toio, + .lock = cw1200_sdio_lock, + .unlock = cw1200_sdio_unlock, + .align_size = cw1200_sdio_align_size, + .power_mgmt = cw1200_sdio_pm, +}; + +/* Probe Function to be called by SDIO stack when device is discovered */ +static int cw1200_sdio_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + struct hwbus_priv *self; + int status; + + pr_info("cw1200_wlan_sdio: Probe called\n"); + + /* We are only able to handle the wlan function */ + if (func->num != 0x01) + return -ENODEV; + + self = kzalloc(sizeof(*self), GFP_KERNEL); + if (!self) { + pr_err("Can't allocate SDIO hwbus_priv.\n"); + return -ENOMEM; + } + + func->card->quirks |= MMC_QUIRK_LENIENT_FN0; + + self->pdata = global_plat_data; /* FIXME */ + self->func = func; + sdio_set_drvdata(func, self); + sdio_claim_host(func); + sdio_enable_func(func); + sdio_release_host(func); + + status = cw1200_sdio_irq_subscribe(self); + + status = cw1200_core_probe(&cw1200_sdio_hwbus_ops, + self, &func->dev, &self->core, + self->pdata->ref_clk, + self->pdata->macaddr, + self->pdata->sdd_file, + self->pdata->have_5ghz); + if (status) { + cw1200_sdio_irq_unsubscribe(self); + sdio_claim_host(func); + sdio_disable_func(func); + sdio_release_host(func); + sdio_set_drvdata(func, NULL); + kfree(self); + } + + return status; +} + +/* Disconnect Function to be called by SDIO stack when + * device is disconnected + */ +static void cw1200_sdio_disconnect(struct sdio_func *func) +{ + struct hwbus_priv *self = sdio_get_drvdata(func); + + if (self) { + cw1200_sdio_irq_unsubscribe(self); + if (self->core) { + cw1200_core_release(self->core); + self->core = NULL; + } + sdio_claim_host(func); + sdio_disable_func(func); + sdio_release_host(func); + sdio_set_drvdata(func, NULL); + kfree(self); + } +} + +#ifdef CONFIG_PM +static int cw1200_sdio_suspend(struct device *dev) +{ + int ret; + struct sdio_func *func = dev_to_sdio_func(dev); + struct hwbus_priv *self = sdio_get_drvdata(func); + + if (!cw1200_can_suspend(self->core)) + return -EAGAIN; + + /* Notify SDIO that CW1200 will remain powered during suspend */ + ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + if (ret) + pr_err("Error setting SDIO pm flags: %i\n", ret); + + return ret; +} + +static int cw1200_sdio_resume(struct device *dev) +{ + return 0; +} + +static const struct dev_pm_ops cw1200_pm_ops = { + .suspend = cw1200_sdio_suspend, + .resume = cw1200_sdio_resume, +}; +#endif + +static struct sdio_driver sdio_driver = { + .name = "cw1200_wlan_sdio", + .id_table = cw1200_sdio_ids, + .probe = cw1200_sdio_probe, + .remove = cw1200_sdio_disconnect, +#ifdef CONFIG_PM + .drv = { + .pm = &cw1200_pm_ops, + } +#endif +}; + +/* Init Module function -> Called by insmod */ +static int __init cw1200_sdio_init(void) +{ + const struct cw1200_platform_data_sdio *pdata; + int ret; + + /* FIXME -- this won't support multiple devices */ + pdata = global_plat_data; + + if (cw1200_sdio_on(pdata)) { + ret = -1; + goto err; + } + + ret = sdio_register_driver(&sdio_driver); + if (ret) + goto err; + + return 0; + +err: + cw1200_sdio_off(pdata); + return ret; +} + +/* Called at Driver Unloading */ +static void __exit cw1200_sdio_exit(void) +{ + const struct cw1200_platform_data_sdio *pdata; + + /* FIXME -- this won't support multiple devices */ + pdata = global_plat_data; + sdio_unregister_driver(&sdio_driver); + cw1200_sdio_off(pdata); +} + + +module_init(cw1200_sdio_init); +module_exit(cw1200_sdio_exit); diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c new file mode 100644 index 000000000000..953bd1904d3d --- /dev/null +++ b/drivers/net/wireless/cw1200/cw1200_spi.c @@ -0,0 +1,463 @@ +/* + * Mac80211 SPI driver for ST-Ericsson CW1200 device + * + * Copyright (c) 2011, Sagrad Inc. + * Author: Solomon Peachy <speachy@sagrad.com> + * + * Based on cw1200_sdio.c + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/gpio.h> +#include <linux/delay.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <net/mac80211.h> + +#include <linux/spi/spi.h> +#include <linux/device.h> + +#include "cw1200.h" +#include "hwbus.h" +#include <linux/platform_data/net-cw1200.h> +#include "hwio.h" + +MODULE_AUTHOR("Solomon Peachy <speachy@sagrad.com>"); +MODULE_DESCRIPTION("mac80211 ST-Ericsson CW1200 SPI driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:cw1200_wlan_spi"); + +/* #define SPI_DEBUG */ + +struct hwbus_priv { + struct spi_device *func; + struct cw1200_common *core; + const struct cw1200_platform_data_spi *pdata; + spinlock_t lock; /* Serialize all bus operations */ + int claimed; +}; + +#define SDIO_TO_SPI_ADDR(addr) ((addr & 0x1f)>>2) +#define SET_WRITE 0x7FFF /* usage: and operation */ +#define SET_READ 0x8000 /* usage: or operation */ + +/* Notes on byte ordering: + LE: B0 B1 B2 B3 + BE: B3 B2 B1 B0 + + Hardware expects 32-bit data to be written as 16-bit BE words: + + B1 B0 B3 B2 +*/ + +static int cw1200_spi_memcpy_fromio(struct hwbus_priv *self, + unsigned int addr, + void *dst, int count) +{ + int ret, i; + uint16_t regaddr; + struct spi_message m; + + struct spi_transfer t_addr = { + .tx_buf = ®addr, + .len = sizeof(regaddr), + }; + struct spi_transfer t_msg = { + .rx_buf = dst, + .len = count, + }; + + regaddr = (SDIO_TO_SPI_ADDR(addr))<<12; + regaddr |= SET_READ; + regaddr |= (count>>1); + regaddr = cpu_to_le16(regaddr); + +#ifdef SPI_DEBUG + pr_info("READ : %04d from 0x%02x (%04x)\n", count, addr, + le16_to_cpu(regaddr)); +#endif + +#if defined(__LITTLE_ENDIAN) + /* We have to byteswap if the SPI bus is limited to 8b operation */ + if (self->func->bits_per_word == 8) +#endif + regaddr = swab16(regaddr); + + spi_message_init(&m); + spi_message_add_tail(&t_addr, &m); + spi_message_add_tail(&t_msg, &m); + ret = spi_sync(self->func, &m); + +#ifdef SPI_DEBUG + pr_info("READ : "); + for (i = 0; i < t_addr.len; i++) + printk("%02x ", ((u8 *)t_addr.tx_buf)[i]); + printk(" : "); + for (i = 0; i < t_msg.len; i++) + printk("%02x ", ((u8 *)t_msg.rx_buf)[i]); + printk("\n"); +#endif + +#if defined(__LITTLE_ENDIAN) + /* We have to byteswap if the SPI bus is limited to 8b operation */ + if (self->func->bits_per_word == 8) +#endif + { + uint16_t *buf = (uint16_t *)dst; + for (i = 0; i < ((count + 1) >> 1); i++) + buf[i] = swab16(buf[i]); + } + + return ret; +} + +static int cw1200_spi_memcpy_toio(struct hwbus_priv *self, + unsigned int addr, + const void *src, int count) +{ + int rval, i; + uint16_t regaddr; + struct spi_transfer t_addr = { + .tx_buf = ®addr, + .len = sizeof(regaddr), + }; + struct spi_transfer t_msg = { + .tx_buf = src, + .len = count, + }; + struct spi_message m; + + regaddr = (SDIO_TO_SPI_ADDR(addr))<<12; + regaddr &= SET_WRITE; + regaddr |= (count>>1); + regaddr = cpu_to_le16(regaddr); + +#ifdef SPI_DEBUG + pr_info("WRITE: %04d to 0x%02x (%04x)\n", count, addr, + le16_to_cpu(regaddr)); +#endif + +#if defined(__LITTLE_ENDIAN) + /* We have to byteswap if the SPI bus is limited to 8b operation */ + if (self->func->bits_per_word == 8) +#endif + { + uint16_t *buf = (uint16_t *)src; + regaddr = swab16(regaddr); + for (i = 0; i < ((count + 1) >> 1); i++) + buf[i] = swab16(buf[i]); + } + +#ifdef SPI_DEBUG + pr_info("WRITE: "); + for (i = 0; i < t_addr.len; i++) + printk("%02x ", ((u8 *)t_addr.tx_buf)[i]); + printk(" : "); + for (i = 0; i < t_msg.len; i++) + printk("%02x ", ((u8 *)t_msg.tx_buf)[i]); + printk("\n"); +#endif + + spi_message_init(&m); + spi_message_add_tail(&t_addr, &m); + spi_message_add_tail(&t_msg, &m); + rval = spi_sync(self->func, &m); + +#ifdef SPI_DEBUG + pr_info("WROTE: %d\n", m.actual_length); +#endif + +#if defined(__LITTLE_ENDIAN) + /* We have to byteswap if the SPI bus is limited to 8b operation */ + if (self->func->bits_per_word == 8) +#endif + { + uint16_t *buf = (uint16_t *)src; + for (i = 0; i < ((count + 1) >> 1); i++) + buf[i] = swab16(buf[i]); + } + return rval; +} + +static void cw1200_spi_lock(struct hwbus_priv *self) +{ + unsigned long flags; + + might_sleep(); + + spin_lock_irqsave(&self->lock, flags); + while (1) { + set_current_state(TASK_UNINTERRUPTIBLE); + if (!self->claimed) + break; + spin_unlock_irqrestore(&self->lock, flags); + schedule(); + spin_lock_irqsave(&self->lock, flags); + } + set_current_state(TASK_RUNNING); + self->claimed = 1; + spin_unlock_irqrestore(&self->lock, flags); + + return; +} + +static void cw1200_spi_unlock(struct hwbus_priv *self) +{ + unsigned long flags; + + spin_lock_irqsave(&self->lock, flags); + self->claimed = 0; + spin_unlock_irqrestore(&self->lock, flags); + return; +} + +static irqreturn_t cw1200_spi_irq_handler(int irq, void *dev_id) +{ + struct hwbus_priv *self = dev_id; + + if (self->core) { + cw1200_irq_handler(self->core); + return IRQ_HANDLED; + } else { + return IRQ_NONE; + } +} + +static int cw1200_spi_irq_subscribe(struct hwbus_priv *self) +{ + int ret; + + pr_debug("SW IRQ subscribe\n"); + + ret = request_any_context_irq(self->func->irq, cw1200_spi_irq_handler, + IRQF_TRIGGER_HIGH, + "cw1200_wlan_irq", self); + if (WARN_ON(ret < 0)) + goto exit; + + ret = enable_irq_wake(self->func->irq); + if (WARN_ON(ret)) + goto free_irq; + + return 0; + +free_irq: + free_irq(self->func->irq, self); +exit: + return ret; +} + +static int cw1200_spi_irq_unsubscribe(struct hwbus_priv *self) +{ + int ret = 0; + + pr_debug("SW IRQ unsubscribe\n"); + disable_irq_wake(self->func->irq); + free_irq(self->func->irq, self); + + return ret; +} + +static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata) +{ + if (pdata->reset) { + gpio_set_value(pdata->reset, 0); + msleep(30); /* Min is 2 * CLK32K cycles */ + gpio_free(pdata->reset); + } + + if (pdata->power_ctrl) + pdata->power_ctrl(pdata, false); + if (pdata->clk_ctrl) + pdata->clk_ctrl(pdata, false); + + return 0; +} + +static int cw1200_spi_on(const struct cw1200_platform_data_spi *pdata) +{ + /* Ensure I/Os are pulled low */ + if (pdata->reset) { + gpio_request(pdata->reset, "cw1200_wlan_reset"); + gpio_direction_output(pdata->reset, 0); + } + if (pdata->powerup) { + gpio_request(pdata->powerup, "cw1200_wlan_powerup"); + gpio_direction_output(pdata->powerup, 0); + } + if (pdata->reset || pdata->powerup) + msleep(10); /* Settle time? */ + + /* Enable 3v3 and 1v8 to hardware */ + if (pdata->power_ctrl) { + if (pdata->power_ctrl(pdata, true)) { + pr_err("power_ctrl() failed!\n"); + return -1; + } + } + + /* Enable CLK32K */ + if (pdata->clk_ctrl) { + if (pdata->clk_ctrl(pdata, true)) { + pr_err("clk_ctrl() failed!\n"); + return -1; + } + msleep(10); /* Delay until clock is stable for 2 cycles */ + } + + /* Enable POWERUP signal */ + if (pdata->powerup) { + gpio_set_value(pdata->powerup, 1); + msleep(250); /* or more..? */ + } + /* Enable RSTn signal */ + if (pdata->reset) { + gpio_set_value(pdata->reset, 1); + msleep(50); /* Or more..? */ + } + return 0; +} + +static size_t cw1200_spi_align_size(struct hwbus_priv *self, size_t size) +{ + return size & 1 ? size + 1 : size; +} + +static int cw1200_spi_pm(struct hwbus_priv *self, bool suspend) +{ + return irq_set_irq_wake(self->func->irq, suspend); +} + +static struct hwbus_ops cw1200_spi_hwbus_ops = { + .hwbus_memcpy_fromio = cw1200_spi_memcpy_fromio, + .hwbus_memcpy_toio = cw1200_spi_memcpy_toio, + .lock = cw1200_spi_lock, + .unlock = cw1200_spi_unlock, + .align_size = cw1200_spi_align_size, + .power_mgmt = cw1200_spi_pm, +}; + +/* Probe Function to be called by SPI stack when device is discovered */ +static int cw1200_spi_probe(struct spi_device *func) +{ + const struct cw1200_platform_data_spi *plat_data = + func->dev.platform_data; + struct hwbus_priv *self; + int status; + + /* Sanity check speed */ + if (func->max_speed_hz > 52000000) + func->max_speed_hz = 52000000; + if (func->max_speed_hz < 1000000) + func->max_speed_hz = 1000000; + + /* Fix up transfer size */ + if (plat_data->spi_bits_per_word) + func->bits_per_word = plat_data->spi_bits_per_word; + if (!func->bits_per_word) + func->bits_per_word = 16; + + /* And finally.. */ + func->mode = SPI_MODE_0; + + pr_info("cw1200_wlan_spi: Probe called (CS %d M %d BPW %d CLK %d)\n", + func->chip_select, func->mode, func->bits_per_word, + func->max_speed_hz); + + if (cw1200_spi_on(plat_data)) { + pr_err("spi_on() failed!\n"); + return -1; + } + + if (spi_setup(func)) { + pr_err("spi_setup() failed!\n"); + return -1; + } + + self = kzalloc(sizeof(*self), GFP_KERNEL); + if (!self) { + pr_err("Can't allocate SPI hwbus_priv."); + return -ENOMEM; + } + + self->pdata = plat_data; + self->func = func; + spin_lock_init(&self->lock); + + spi_set_drvdata(func, self); + + status = cw1200_spi_irq_subscribe(self); + + status = cw1200_core_probe(&cw1200_spi_hwbus_ops, + self, &func->dev, &self->core, + self->pdata->ref_clk, + self->pdata->macaddr, + self->pdata->sdd_file, + self->pdata->have_5ghz); + + if (status) { + cw1200_spi_irq_unsubscribe(self); + cw1200_spi_off(plat_data); + kfree(self); + } + + return status; +} + +/* Disconnect Function to be called by SPI stack when device is disconnected */ +static int cw1200_spi_disconnect(struct spi_device *func) +{ + struct hwbus_priv *self = spi_get_drvdata(func); + + if (self) { + cw1200_spi_irq_unsubscribe(self); + if (self->core) { + cw1200_core_release(self->core); + self->core = NULL; + } + kfree(self); + } + cw1200_spi_off(func->dev.platform_data); + + return 0; +} + +#ifdef CONFIG_PM +static int cw1200_spi_suspend(struct device *dev, pm_message_t state) +{ + struct hwbus_priv *self = spi_get_drvdata(to_spi_device(dev)); + + if (!cw1200_can_suspend(self->core)) + return -EAGAIN; + + /* XXX notify host that we have to keep CW1200 powered on? */ + return 0; +} + +static int cw1200_spi_resume(struct device *dev) +{ + return 0; +} +#endif + +static struct spi_driver spi_driver = { + .probe = cw1200_spi_probe, + .remove = cw1200_spi_disconnect, + .driver = { + .name = "cw1200_wlan_spi", + .bus = &spi_bus_type, + .owner = THIS_MODULE, +#ifdef CONFIG_PM + .suspend = cw1200_spi_suspend, + .resume = cw1200_spi_resume, +#endif + }, +}; + +module_spi_driver(spi_driver); diff --git a/drivers/net/wireless/cw1200/debug.c b/drivers/net/wireless/cw1200/debug.c new file mode 100644 index 000000000000..e323b4d54338 --- /dev/null +++ b/drivers/net/wireless/cw1200/debug.c @@ -0,0 +1,428 @@ +/* + * mac80211 glue code for mac80211 ST-Ericsson CW1200 drivers + * DebugFS code + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include "cw1200.h" +#include "debug.h" +#include "fwio.h" + +/* join_status */ +static const char * const cw1200_debug_join_status[] = { + "passive", + "monitor", + "station (joining)", + "station (not authenticated yet)", + "station", + "adhoc", + "access point", +}; + +/* WSM_JOIN_PREAMBLE_... */ +static const char * const cw1200_debug_preamble[] = { + "long", + "short", + "long on 1 and 2 Mbps", +}; + + +static const char * const cw1200_debug_link_id[] = { + "OFF", + "REQ", + "SOFT", + "HARD", +}; + +static const char *cw1200_debug_mode(int mode) +{ + switch (mode) { + case NL80211_IFTYPE_UNSPECIFIED: + return "unspecified"; + case NL80211_IFTYPE_MONITOR: + return "monitor"; + case NL80211_IFTYPE_STATION: + return "station"; + case NL80211_IFTYPE_ADHOC: + return "adhoc"; + case NL80211_IFTYPE_MESH_POINT: + return "mesh point"; + case NL80211_IFTYPE_AP: + return "access point"; + case NL80211_IFTYPE_P2P_CLIENT: + return "p2p client"; + case NL80211_IFTYPE_P2P_GO: + return "p2p go"; + default: + return "unsupported"; + } +} + +static void cw1200_queue_status_show(struct seq_file *seq, + struct cw1200_queue *q) +{ + int i; + seq_printf(seq, "Queue %d:\n", q->queue_id); + seq_printf(seq, " capacity: %zu\n", q->capacity); + seq_printf(seq, " queued: %zu\n", q->num_queued); + seq_printf(seq, " pending: %zu\n", q->num_pending); + seq_printf(seq, " sent: %zu\n", q->num_sent); + seq_printf(seq, " locked: %s\n", q->tx_locked_cnt ? "yes" : "no"); + seq_printf(seq, " overfull: %s\n", q->overfull ? "yes" : "no"); + seq_puts(seq, " link map: 0-> "); + for (i = 0; i < q->stats->map_capacity; ++i) + seq_printf(seq, "%.2d ", q->link_map_cache[i]); + seq_printf(seq, "<-%zu\n", q->stats->map_capacity); +} + +static void cw1200_debug_print_map(struct seq_file *seq, + struct cw1200_common *priv, + const char *label, + u32 map) +{ + int i; + seq_printf(seq, "%s0-> ", label); + for (i = 0; i < priv->tx_queue_stats.map_capacity; ++i) + seq_printf(seq, "%s ", (map & BIT(i)) ? "**" : ".."); + seq_printf(seq, "<-%zu\n", priv->tx_queue_stats.map_capacity - 1); +} + +static int cw1200_status_show(struct seq_file *seq, void *v) +{ + int i; + struct list_head *item; + struct cw1200_common *priv = seq->private; + struct cw1200_debug_priv *d = priv->debug; + + seq_puts(seq, "CW1200 Wireless LAN driver status\n"); + seq_printf(seq, "Hardware: %d.%d\n", + priv->wsm_caps.hw_id, + priv->wsm_caps.hw_subid); + seq_printf(seq, "Firmware: %s %d.%d\n", + cw1200_fw_types[priv->wsm_caps.fw_type], + priv->wsm_caps.fw_ver, + priv->wsm_caps.fw_build); + seq_printf(seq, "FW API: %d\n", + priv->wsm_caps.fw_api); + seq_printf(seq, "FW caps: 0x%.4X\n", + priv->wsm_caps.fw_cap); + seq_printf(seq, "FW label: '%s'\n", + priv->wsm_caps.fw_label); + seq_printf(seq, "Mode: %s%s\n", + cw1200_debug_mode(priv->mode), + priv->listening ? " (listening)" : ""); + seq_printf(seq, "Join state: %s\n", + cw1200_debug_join_status[priv->join_status]); + if (priv->channel) + seq_printf(seq, "Channel: %d%s\n", + priv->channel->hw_value, + priv->channel_switch_in_progress ? + " (switching)" : ""); + if (priv->rx_filter.promiscuous) + seq_puts(seq, "Filter: promisc\n"); + else if (priv->rx_filter.fcs) + seq_puts(seq, "Filter: fcs\n"); + if (priv->rx_filter.bssid) + seq_puts(seq, "Filter: bssid\n"); + if (!priv->disable_beacon_filter) + seq_puts(seq, "Filter: beacons\n"); + + if (priv->enable_beacon || + priv->mode == NL80211_IFTYPE_AP || + priv->mode == NL80211_IFTYPE_ADHOC || + priv->mode == NL80211_IFTYPE_MESH_POINT || + priv->mode == NL80211_IFTYPE_P2P_GO) + seq_printf(seq, "Beaconing: %s\n", + priv->enable_beacon ? + "enabled" : "disabled"); + + for (i = 0; i < 4; ++i) + seq_printf(seq, "EDCA(%d): %d, %d, %d, %d, %d\n", i, + priv->edca.params[i].cwmin, + priv->edca.params[i].cwmax, + priv->edca.params[i].aifns, + priv->edca.params[i].txop_limit, + priv->edca.params[i].max_rx_lifetime); + + if (priv->join_status == CW1200_JOIN_STATUS_STA) { + static const char *pm_mode = "unknown"; + switch (priv->powersave_mode.mode) { + case WSM_PSM_ACTIVE: + pm_mode = "off"; + break; + case WSM_PSM_PS: + pm_mode = "on"; + break; + case WSM_PSM_FAST_PS: + pm_mode = "dynamic"; + break; + } + seq_printf(seq, "Preamble: %s\n", + cw1200_debug_preamble[priv->association_mode.preamble]); + seq_printf(seq, "AMPDU spcn: %d\n", + priv->association_mode.mpdu_start_spacing); + seq_printf(seq, "Basic rate: 0x%.8X\n", + le32_to_cpu(priv->association_mode.basic_rate_set)); + seq_printf(seq, "Bss lost: %d beacons\n", + priv->bss_params.beacon_lost_count); + seq_printf(seq, "AID: %d\n", + priv->bss_params.aid); + seq_printf(seq, "Rates: 0x%.8X\n", + priv->bss_params.operational_rate_set); + seq_printf(seq, "Powersave: %s\n", pm_mode); + } + seq_printf(seq, "HT: %s\n", + cw1200_is_ht(&priv->ht_info) ? "on" : "off"); + if (cw1200_is_ht(&priv->ht_info)) { + seq_printf(seq, "Greenfield: %s\n", + cw1200_ht_greenfield(&priv->ht_info) ? "yes" : "no"); + seq_printf(seq, "AMPDU dens: %d\n", + cw1200_ht_ampdu_density(&priv->ht_info)); + } + seq_printf(seq, "RSSI thold: %d\n", + priv->cqm_rssi_thold); + seq_printf(seq, "RSSI hyst: %d\n", + priv->cqm_rssi_hyst); + seq_printf(seq, "Long retr: %d\n", + priv->long_frame_max_tx_count); + seq_printf(seq, "Short retr: %d\n", + priv->short_frame_max_tx_count); + spin_lock_bh(&priv->tx_policy_cache.lock); + i = 0; + list_for_each(item, &priv->tx_policy_cache.used) + ++i; + spin_unlock_bh(&priv->tx_policy_cache.lock); + seq_printf(seq, "RC in use: %d\n", i); + + seq_puts(seq, "\n"); + for (i = 0; i < 4; ++i) { + cw1200_queue_status_show(seq, &priv->tx_queue[i]); + seq_puts(seq, "\n"); + } + + cw1200_debug_print_map(seq, priv, "Link map: ", + priv->link_id_map); + cw1200_debug_print_map(seq, priv, "Asleep map: ", + priv->sta_asleep_mask); + cw1200_debug_print_map(seq, priv, "PSPOLL map: ", + priv->pspoll_mask); + + seq_puts(seq, "\n"); + + for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) { + if (priv->link_id_db[i].status) { + seq_printf(seq, "Link %d: %s, %pM\n", + i + 1, + cw1200_debug_link_id[priv->link_id_db[i].status], + priv->link_id_db[i].mac); + } + } + + seq_puts(seq, "\n"); + + seq_printf(seq, "BH status: %s\n", + atomic_read(&priv->bh_term) ? "terminated" : "alive"); + seq_printf(seq, "Pending RX: %d\n", + atomic_read(&priv->bh_rx)); + seq_printf(seq, "Pending TX: %d\n", + atomic_read(&priv->bh_tx)); + if (priv->bh_error) + seq_printf(seq, "BH errcode: %d\n", + priv->bh_error); + seq_printf(seq, "TX bufs: %d x %d bytes\n", + priv->wsm_caps.input_buffers, + priv->wsm_caps.input_buffer_size); + seq_printf(seq, "Used bufs: %d\n", + priv->hw_bufs_used); + seq_printf(seq, "Powermgmt: %s\n", + priv->powersave_enabled ? "on" : "off"); + seq_printf(seq, "Device: %s\n", + priv->device_can_sleep ? "asleep" : "awake"); + + spin_lock(&priv->wsm_cmd.lock); + seq_printf(seq, "WSM status: %s\n", + priv->wsm_cmd.done ? "idle" : "active"); + seq_printf(seq, "WSM cmd: 0x%.4X (%td bytes)\n", + priv->wsm_cmd.cmd, priv->wsm_cmd.len); + seq_printf(seq, "WSM retval: %d\n", + priv->wsm_cmd.ret); + spin_unlock(&priv->wsm_cmd.lock); + + seq_printf(seq, "Datapath: %s\n", + atomic_read(&priv->tx_lock) ? "locked" : "unlocked"); + if (atomic_read(&priv->tx_lock)) + seq_printf(seq, "TXlock cnt: %d\n", + atomic_read(&priv->tx_lock)); + + seq_printf(seq, "TXed: %d\n", + d->tx); + seq_printf(seq, "AGG TXed: %d\n", + d->tx_agg); + seq_printf(seq, "MULTI TXed: %d (%d)\n", + d->tx_multi, d->tx_multi_frames); + seq_printf(seq, "RXed: %d\n", + d->rx); + seq_printf(seq, "AGG RXed: %d\n", + d->rx_agg); + seq_printf(seq, "TX miss: %d\n", + d->tx_cache_miss); + seq_printf(seq, "TX align: %d\n", + d->tx_align); + seq_printf(seq, "TX burst: %d\n", + d->tx_burst); + seq_printf(seq, "TX TTL: %d\n", + d->tx_ttl); + seq_printf(seq, "Scan: %s\n", + atomic_read(&priv->scan.in_progress) ? "active" : "idle"); + + return 0; +} + +static int cw1200_status_open(struct inode *inode, struct file *file) +{ + return single_open(file, &cw1200_status_show, + inode->i_private); +} + +static const struct file_operations fops_status = { + .open = cw1200_status_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int cw1200_counters_show(struct seq_file *seq, void *v) +{ + int ret; + struct cw1200_common *priv = seq->private; + struct wsm_mib_counters_table counters; + + ret = wsm_get_counters_table(priv, &counters); + if (ret) + return ret; + +#define PUT_COUNTER(tab, name) \ + seq_printf(seq, "%s:" tab "%d\n", #name, \ + __le32_to_cpu(counters.name)) + + PUT_COUNTER("\t\t", plcp_errors); + PUT_COUNTER("\t\t", fcs_errors); + PUT_COUNTER("\t\t", tx_packets); + PUT_COUNTER("\t\t", rx_packets); + PUT_COUNTER("\t\t", rx_packet_errors); + PUT_COUNTER("\t", rx_decryption_failures); + PUT_COUNTER("\t\t", rx_mic_failures); + PUT_COUNTER("\t", rx_no_key_failures); + PUT_COUNTER("\t", tx_multicast_frames); + PUT_COUNTER("\t", tx_frames_success); + PUT_COUNTER("\t", tx_frame_failures); + PUT_COUNTER("\t", tx_frames_retried); + PUT_COUNTER("\t", tx_frames_multi_retried); + PUT_COUNTER("\t", rx_frame_duplicates); + PUT_COUNTER("\t\t", rts_success); + PUT_COUNTER("\t\t", rts_failures); + PUT_COUNTER("\t\t", ack_failures); + PUT_COUNTER("\t", rx_multicast_frames); + PUT_COUNTER("\t", rx_frames_success); + PUT_COUNTER("\t", rx_cmac_icv_errors); + PUT_COUNTER("\t\t", rx_cmac_replays); + PUT_COUNTER("\t", rx_mgmt_ccmp_replays); + +#undef PUT_COUNTER + + return 0; +} + +static int cw1200_counters_open(struct inode *inode, struct file *file) +{ + return single_open(file, &cw1200_counters_show, + inode->i_private); +} + +static const struct file_operations fops_counters = { + .open = cw1200_counters_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static ssize_t cw1200_wsm_dumps(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct cw1200_common *priv = file->private_data; + char buf[1]; + + if (!count) + return -EINVAL; + if (copy_from_user(buf, user_buf, 1)) + return -EFAULT; + + if (buf[0] == '1') + priv->wsm_enable_wsm_dumps = 1; + else + priv->wsm_enable_wsm_dumps = 0; + + return count; +} + +static const struct file_operations fops_wsm_dumps = { + .open = simple_open, + .write = cw1200_wsm_dumps, + .llseek = default_llseek, +}; + +int cw1200_debug_init(struct cw1200_common *priv) +{ + int ret = -ENOMEM; + struct cw1200_debug_priv *d = kzalloc(sizeof(struct cw1200_debug_priv), + GFP_KERNEL); + priv->debug = d; + if (!d) + return ret; + + d->debugfs_phy = debugfs_create_dir("cw1200", + priv->hw->wiphy->debugfsdir); + if (!d->debugfs_phy) + goto err; + + if (!debugfs_create_file("status", S_IRUSR, d->debugfs_phy, + priv, &fops_status)) + goto err; + + if (!debugfs_create_file("counters", S_IRUSR, d->debugfs_phy, + priv, &fops_counters)) + goto err; + + if (!debugfs_create_file("wsm_dumps", S_IWUSR, d->debugfs_phy, + priv, &fops_wsm_dumps)) + goto err; + + return 0; + +err: + priv->debug = NULL; + debugfs_remove_recursive(d->debugfs_phy); + kfree(d); + return ret; +} + +void cw1200_debug_release(struct cw1200_common *priv) +{ + struct cw1200_debug_priv *d = priv->debug; + if (d) { + debugfs_remove_recursive(d->debugfs_phy); + priv->debug = NULL; + kfree(d); + } +} diff --git a/drivers/net/wireless/cw1200/debug.h b/drivers/net/wireless/cw1200/debug.h new file mode 100644 index 000000000000..b525aba53bfc --- /dev/null +++ b/drivers/net/wireless/cw1200/debug.h @@ -0,0 +1,93 @@ +/* + * DebugFS code for ST-Ericsson CW1200 mac80211 driver + * + * Copyright (c) 2011, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CW1200_DEBUG_H_INCLUDED +#define CW1200_DEBUG_H_INCLUDED + +struct cw1200_debug_priv { + struct dentry *debugfs_phy; + int tx; + int tx_agg; + int rx; + int rx_agg; + int tx_multi; + int tx_multi_frames; + int tx_cache_miss; + int tx_align; + int tx_ttl; + int tx_burst; + int ba_cnt; + int ba_acc; + int ba_cnt_rx; + int ba_acc_rx; +}; + +int cw1200_debug_init(struct cw1200_common *priv); +void cw1200_debug_release(struct cw1200_common *priv); + +static inline void cw1200_debug_txed(struct cw1200_common *priv) +{ + ++priv->debug->tx; +} + +static inline void cw1200_debug_txed_agg(struct cw1200_common *priv) +{ + ++priv->debug->tx_agg; +} + +static inline void cw1200_debug_txed_multi(struct cw1200_common *priv, + int count) +{ + ++priv->debug->tx_multi; + priv->debug->tx_multi_frames += count; +} + +static inline void cw1200_debug_rxed(struct cw1200_common *priv) +{ + ++priv->debug->rx; +} + +static inline void cw1200_debug_rxed_agg(struct cw1200_common *priv) +{ + ++priv->debug->rx_agg; +} + +static inline void cw1200_debug_tx_cache_miss(struct cw1200_common *priv) +{ + ++priv->debug->tx_cache_miss; +} + +static inline void cw1200_debug_tx_align(struct cw1200_common *priv) +{ + ++priv->debug->tx_align; +} + +static inline void cw1200_debug_tx_ttl(struct cw1200_common *priv) +{ + ++priv->debug->tx_ttl; +} + +static inline void cw1200_debug_tx_burst(struct cw1200_common *priv) +{ + ++priv->debug->tx_burst; +} + +static inline void cw1200_debug_ba(struct cw1200_common *priv, + int ba_cnt, int ba_acc, + int ba_cnt_rx, int ba_acc_rx) +{ + priv->debug->ba_cnt = ba_cnt; + priv->debug->ba_acc = ba_acc; + priv->debug->ba_cnt_rx = ba_cnt_rx; + priv->debug->ba_acc_rx = ba_acc_rx; +} + +#endif /* CW1200_DEBUG_H_INCLUDED */ diff --git a/drivers/net/wireless/cw1200/fwio.c b/drivers/net/wireless/cw1200/fwio.c new file mode 100644 index 000000000000..acdff0f7f952 --- /dev/null +++ b/drivers/net/wireless/cw1200/fwio.c @@ -0,0 +1,520 @@ +/* + * Firmware I/O code for mac80211 ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * Based on: + * ST-Ericsson UMAC CW1200 driver which is + * Copyright (c) 2010, ST-Ericsson + * Author: Ajitpal Singh <ajitpal.singh@stericsson.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/init.h> +#include <linux/vmalloc.h> +#include <linux/sched.h> +#include <linux/firmware.h> + +#include "cw1200.h" +#include "fwio.h" +#include "hwio.h" +#include "hwbus.h" +#include "bh.h" + +static int cw1200_get_hw_type(u32 config_reg_val, int *major_revision) +{ + int hw_type = -1; + u32 silicon_type = (config_reg_val >> 24) & 0x7; + u32 silicon_vers = (config_reg_val >> 31) & 0x1; + + switch (silicon_type) { + case 0x00: + *major_revision = 1; + hw_type = HIF_9000_SILICON_VERSATILE; + break; + case 0x01: + case 0x02: /* CW1x00 */ + case 0x04: /* CW1x60 */ + *major_revision = silicon_type; + if (silicon_vers) + hw_type = HIF_8601_VERSATILE; + else + hw_type = HIF_8601_SILICON; + break; + default: + break; + } + + return hw_type; +} + +static int cw1200_load_firmware_cw1200(struct cw1200_common *priv) +{ + int ret, block, num_blocks; + unsigned i; + u32 val32; + u32 put = 0, get = 0; + u8 *buf = NULL; + const char *fw_path; + const struct firmware *firmware = NULL; + + /* Macroses are local. */ +#define APB_WRITE(reg, val) \ + do { \ + ret = cw1200_apb_write_32(priv, CW1200_APB(reg), (val)); \ + if (ret < 0) \ + goto error; \ + } while (0) +#define APB_READ(reg, val) \ + do { \ + ret = cw1200_apb_read_32(priv, CW1200_APB(reg), &(val)); \ + if (ret < 0) \ + goto error; \ + } while (0) +#define REG_WRITE(reg, val) \ + do { \ + ret = cw1200_reg_write_32(priv, (reg), (val)); \ + if (ret < 0) \ + goto error; \ + } while (0) +#define REG_READ(reg, val) \ + do { \ + ret = cw1200_reg_read_32(priv, (reg), &(val)); \ + if (ret < 0) \ + goto error; \ + } while (0) + + switch (priv->hw_revision) { + case CW1200_HW_REV_CUT10: + fw_path = FIRMWARE_CUT10; + if (!priv->sdd_path) + priv->sdd_path = SDD_FILE_10; + break; + case CW1200_HW_REV_CUT11: + fw_path = FIRMWARE_CUT11; + if (!priv->sdd_path) + priv->sdd_path = SDD_FILE_11; + break; + case CW1200_HW_REV_CUT20: + fw_path = FIRMWARE_CUT20; + if (!priv->sdd_path) + priv->sdd_path = SDD_FILE_20; + break; + case CW1200_HW_REV_CUT22: + fw_path = FIRMWARE_CUT22; + if (!priv->sdd_path) + priv->sdd_path = SDD_FILE_22; + break; + case CW1X60_HW_REV: + fw_path = FIRMWARE_CW1X60; + if (!priv->sdd_path) + priv->sdd_path = SDD_FILE_CW1X60; + break; + default: + pr_err("Invalid silicon revision %d.\n", priv->hw_revision); + return -EINVAL; + } + + /* Initialize common registers */ + APB_WRITE(DOWNLOAD_IMAGE_SIZE_REG, DOWNLOAD_ARE_YOU_HERE); + APB_WRITE(DOWNLOAD_PUT_REG, 0); + APB_WRITE(DOWNLOAD_GET_REG, 0); + APB_WRITE(DOWNLOAD_STATUS_REG, DOWNLOAD_PENDING); + APB_WRITE(DOWNLOAD_FLAGS_REG, 0); + + /* Write the NOP Instruction */ + REG_WRITE(ST90TDS_SRAM_BASE_ADDR_REG_ID, 0xFFF20000); + REG_WRITE(ST90TDS_AHB_DPORT_REG_ID, 0xEAFFFFFE); + + /* Release CPU from RESET */ + REG_READ(ST90TDS_CONFIG_REG_ID, val32); + val32 &= ~ST90TDS_CONFIG_CPU_RESET_BIT; + REG_WRITE(ST90TDS_CONFIG_REG_ID, val32); + + /* Enable Clock */ + val32 &= ~ST90TDS_CONFIG_CPU_CLK_DIS_BIT; + REG_WRITE(ST90TDS_CONFIG_REG_ID, val32); + + /* Load a firmware file */ + ret = request_firmware(&firmware, fw_path, priv->pdev); + if (ret) { + pr_err("Can't load firmware file %s.\n", fw_path); + goto error; + } + + buf = kmalloc(DOWNLOAD_BLOCK_SIZE, GFP_KERNEL | GFP_DMA); + if (!buf) { + pr_err("Can't allocate firmware load buffer.\n"); + ret = -ENOMEM; + goto error; + } + + /* Check if the bootloader is ready */ + for (i = 0; i < 100; i += 1 + i / 2) { + APB_READ(DOWNLOAD_IMAGE_SIZE_REG, val32); + if (val32 == DOWNLOAD_I_AM_HERE) + break; + mdelay(i); + } /* End of for loop */ + + if (val32 != DOWNLOAD_I_AM_HERE) { + pr_err("Bootloader is not ready.\n"); + ret = -ETIMEDOUT; + goto error; + } + + /* Calculcate number of download blocks */ + num_blocks = (firmware->size - 1) / DOWNLOAD_BLOCK_SIZE + 1; + + /* Updating the length in Download Ctrl Area */ + val32 = firmware->size; /* Explicit cast from size_t to u32 */ + APB_WRITE(DOWNLOAD_IMAGE_SIZE_REG, val32); + + /* Firmware downloading loop */ + for (block = 0; block < num_blocks; block++) { + size_t tx_size; + size_t block_size; + + /* check the download status */ + APB_READ(DOWNLOAD_STATUS_REG, val32); + if (val32 != DOWNLOAD_PENDING) { + pr_err("Bootloader reported error %d.\n", val32); + ret = -EIO; + goto error; + } + + /* loop until put - get <= 24K */ + for (i = 0; i < 100; i++) { + APB_READ(DOWNLOAD_GET_REG, get); + if ((put - get) <= + (DOWNLOAD_FIFO_SIZE - DOWNLOAD_BLOCK_SIZE)) + break; + mdelay(i); + } + + if ((put - get) > (DOWNLOAD_FIFO_SIZE - DOWNLOAD_BLOCK_SIZE)) { + pr_err("Timeout waiting for FIFO.\n"); + ret = -ETIMEDOUT; + goto error; + } + + /* calculate the block size */ + tx_size = block_size = min((size_t)(firmware->size - put), + (size_t)DOWNLOAD_BLOCK_SIZE); + + memcpy(buf, &firmware->data[put], block_size); + if (block_size < DOWNLOAD_BLOCK_SIZE) { + memset(&buf[block_size], 0, + DOWNLOAD_BLOCK_SIZE - block_size); + tx_size = DOWNLOAD_BLOCK_SIZE; + } + + /* send the block to sram */ + ret = cw1200_apb_write(priv, + CW1200_APB(DOWNLOAD_FIFO_OFFSET + + (put & (DOWNLOAD_FIFO_SIZE - 1))), + buf, tx_size); + if (ret < 0) { + pr_err("Can't write firmware block @ %d!\n", + put & (DOWNLOAD_FIFO_SIZE - 1)); + goto error; + } + + /* update the put register */ + put += block_size; + APB_WRITE(DOWNLOAD_PUT_REG, put); + } /* End of firmware download loop */ + + /* Wait for the download completion */ + for (i = 0; i < 300; i += 1 + i / 2) { + APB_READ(DOWNLOAD_STATUS_REG, val32); + if (val32 != DOWNLOAD_PENDING) + break; + mdelay(i); + } + if (val32 != DOWNLOAD_SUCCESS) { + pr_err("Wait for download completion failed: 0x%.8X\n", val32); + ret = -ETIMEDOUT; + goto error; + } else { + pr_info("Firmware download completed.\n"); + ret = 0; + } + +error: + kfree(buf); + if (firmware) + release_firmware(firmware); + return ret; + +#undef APB_WRITE +#undef APB_READ +#undef REG_WRITE +#undef REG_READ +} + + +static int config_reg_read(struct cw1200_common *priv, u32 *val) +{ + switch (priv->hw_type) { + case HIF_9000_SILICON_VERSATILE: { + u16 val16; + int ret = cw1200_reg_read_16(priv, + ST90TDS_CONFIG_REG_ID, + &val16); + if (ret < 0) + return ret; + *val = val16; + return 0; + } + case HIF_8601_VERSATILE: + case HIF_8601_SILICON: + default: + cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, val); + break; + } + return 0; +} + +static int config_reg_write(struct cw1200_common *priv, u32 val) +{ + switch (priv->hw_type) { + case HIF_9000_SILICON_VERSATILE: + return cw1200_reg_write_16(priv, + ST90TDS_CONFIG_REG_ID, + (u16)val); + case HIF_8601_VERSATILE: + case HIF_8601_SILICON: + default: + return cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID, val); + break; + } + return 0; +} + +int cw1200_load_firmware(struct cw1200_common *priv) +{ + int ret; + int i; + u32 val32; + u16 val16; + int major_revision = -1; + + /* Read CONFIG Register */ + ret = cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); + if (ret < 0) { + pr_err("Can't read config register.\n"); + goto out; + } + + if (val32 == 0 || val32 == 0xffffffff) { + pr_err("Bad config register value (0x%08x)\n", val32); + ret = -EIO; + goto out; + } + + priv->hw_type = cw1200_get_hw_type(val32, &major_revision); + if (priv->hw_type < 0) { + pr_err("Can't deduce hardware type.\n"); + ret = -ENOTSUPP; + goto out; + } + + /* Set DPLL Reg value, and read back to confirm writes work */ + ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID, + cw1200_dpll_from_clk(priv->hw_refclk)); + if (ret < 0) { + pr_err("Can't write DPLL register.\n"); + goto out; + } + + msleep(20); + + ret = cw1200_reg_read_32(priv, + ST90TDS_TSET_GEN_R_W_REG_ID, &val32); + if (ret < 0) { + pr_err("Can't read DPLL register.\n"); + goto out; + } + + if (val32 != cw1200_dpll_from_clk(priv->hw_refclk)) { + pr_err("Unable to initialise DPLL register. Wrote 0x%.8X, Read 0x%.8X.\n", + cw1200_dpll_from_clk(priv->hw_refclk), val32); + ret = -EIO; + goto out; + } + + /* Set wakeup bit in device */ + ret = cw1200_reg_read_16(priv, ST90TDS_CONTROL_REG_ID, &val16); + if (ret < 0) { + pr_err("set_wakeup: can't read control register.\n"); + goto out; + } + + ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, + val16 | ST90TDS_CONT_WUP_BIT); + if (ret < 0) { + pr_err("set_wakeup: can't write control register.\n"); + goto out; + } + + /* Wait for wakeup */ + for (i = 0; i < 300; i += (1 + i / 2)) { + ret = cw1200_reg_read_16(priv, + ST90TDS_CONTROL_REG_ID, &val16); + if (ret < 0) { + pr_err("wait_for_wakeup: can't read control register.\n"); + goto out; + } + + if (val16 & ST90TDS_CONT_RDY_BIT) + break; + + msleep(i); + } + + if ((val16 & ST90TDS_CONT_RDY_BIT) == 0) { + pr_err("wait_for_wakeup: device is not responding.\n"); + ret = -ETIMEDOUT; + goto out; + } + + switch (major_revision) { + case 1: + /* CW1200 Hardware detection logic : Check for CUT1.1 */ + ret = cw1200_ahb_read_32(priv, CW1200_CUT_ID_ADDR, &val32); + if (ret) { + pr_err("HW detection: can't read CUT ID.\n"); + goto out; + } + + switch (val32) { + case CW1200_CUT_11_ID_STR: + pr_info("CW1x00 Cut 1.1 silicon detected.\n"); + priv->hw_revision = CW1200_HW_REV_CUT11; + break; + default: + pr_info("CW1x00 Cut 1.0 silicon detected.\n"); + priv->hw_revision = CW1200_HW_REV_CUT10; + break; + } + + /* According to ST-E, CUT<2.0 has busted BA TID0-3. + Just disable it entirely... + */ + priv->ba_rx_tid_mask = 0; + priv->ba_tx_tid_mask = 0; + break; + case 2: { + u32 ar1, ar2, ar3; + ret = cw1200_ahb_read_32(priv, CW1200_CUT2_ID_ADDR, &ar1); + if (ret) { + pr_err("(1) HW detection: can't read CUT ID\n"); + goto out; + } + ret = cw1200_ahb_read_32(priv, CW1200_CUT2_ID_ADDR + 4, &ar2); + if (ret) { + pr_err("(2) HW detection: can't read CUT ID.\n"); + goto out; + } + + ret = cw1200_ahb_read_32(priv, CW1200_CUT2_ID_ADDR + 8, &ar3); + if (ret) { + pr_err("(3) HW detection: can't read CUT ID.\n"); + goto out; + } + + if (ar1 == CW1200_CUT_22_ID_STR1 && + ar2 == CW1200_CUT_22_ID_STR2 && + ar3 == CW1200_CUT_22_ID_STR3) { + pr_info("CW1x00 Cut 2.2 silicon detected.\n"); + priv->hw_revision = CW1200_HW_REV_CUT22; + } else { + pr_info("CW1x00 Cut 2.0 silicon detected.\n"); + priv->hw_revision = CW1200_HW_REV_CUT20; + } + break; + } + case 4: + pr_info("CW1x60 silicon detected.\n"); + priv->hw_revision = CW1X60_HW_REV; + break; + default: + pr_err("Unsupported silicon major revision %d.\n", + major_revision); + ret = -ENOTSUPP; + goto out; + } + + /* Checking for access mode */ + ret = config_reg_read(priv, &val32); + if (ret < 0) { + pr_err("Can't read config register.\n"); + goto out; + } + + if (!(val32 & ST90TDS_CONFIG_ACCESS_MODE_BIT)) { + pr_err("Device is already in QUEUE mode!\n"); + ret = -EINVAL; + goto out; + } + + switch (priv->hw_type) { + case HIF_8601_SILICON: + if (priv->hw_revision == CW1X60_HW_REV) { + pr_err("Can't handle CW1160/1260 firmware load yet.\n"); + ret = -ENOTSUPP; + goto out; + } + ret = cw1200_load_firmware_cw1200(priv); + break; + default: + pr_err("Can't perform firmware load for hw type %d.\n", + priv->hw_type); + ret = -ENOTSUPP; + goto out; + } + if (ret < 0) { + pr_err("Firmware load error.\n"); + goto out; + } + + /* Enable interrupt signalling */ + priv->hwbus_ops->lock(priv->hwbus_priv); + ret = __cw1200_irq_enable(priv, 1); + priv->hwbus_ops->unlock(priv->hwbus_priv); + if (ret < 0) + goto unsubscribe; + + /* Configure device for MESSSAGE MODE */ + ret = config_reg_read(priv, &val32); + if (ret < 0) { + pr_err("Can't read config register.\n"); + goto unsubscribe; + } + ret = config_reg_write(priv, val32 & ~ST90TDS_CONFIG_ACCESS_MODE_BIT); + if (ret < 0) { + pr_err("Can't write config register.\n"); + goto unsubscribe; + } + + /* Unless we read the CONFIG Register we are + * not able to get an interrupt + */ + mdelay(10); + config_reg_read(priv, &val32); + +out: + return ret; + +unsubscribe: + /* Disable interrupt signalling */ + priv->hwbus_ops->lock(priv->hwbus_priv); + ret = __cw1200_irq_enable(priv, 0); + priv->hwbus_ops->unlock(priv->hwbus_priv); + return ret; +} diff --git a/drivers/net/wireless/cw1200/fwio.h b/drivers/net/wireless/cw1200/fwio.h new file mode 100644 index 000000000000..ea3099362cdf --- /dev/null +++ b/drivers/net/wireless/cw1200/fwio.h @@ -0,0 +1,39 @@ +/* + * Firmware API for mac80211 ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * Based on: + * ST-Ericsson UMAC CW1200 driver which is + * Copyright (c) 2010, ST-Ericsson + * Author: Ajitpal Singh <ajitpal.singh@stericsson.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef FWIO_H_INCLUDED +#define FWIO_H_INCLUDED + +#define BOOTLOADER_CW1X60 "boot_cw1x60.bin" +#define FIRMWARE_CW1X60 "wsm_cw1x60.bin" +#define FIRMWARE_CUT22 "wsm_22.bin" +#define FIRMWARE_CUT20 "wsm_20.bin" +#define FIRMWARE_CUT11 "wsm_11.bin" +#define FIRMWARE_CUT10 "wsm_10.bin" +#define SDD_FILE_CW1X60 "sdd_cw1x60.bin" +#define SDD_FILE_22 "sdd_22.bin" +#define SDD_FILE_20 "sdd_20.bin" +#define SDD_FILE_11 "sdd_11.bin" +#define SDD_FILE_10 "sdd_10.bin" + +int cw1200_load_firmware(struct cw1200_common *priv); + +/* SDD definitions */ +#define SDD_PTA_CFG_ELT_ID 0xEB +#define SDD_REFERENCE_FREQUENCY_ELT_ID 0xc5 +u32 cw1200_dpll_from_clk(u16 clk); + +#endif diff --git a/drivers/net/wireless/cw1200/hwbus.h b/drivers/net/wireless/cw1200/hwbus.h new file mode 100644 index 000000000000..8b2fc831c3de --- /dev/null +++ b/drivers/net/wireless/cw1200/hwbus.h @@ -0,0 +1,33 @@ +/* + * Common hwbus abstraction layer interface for cw1200 wireless driver + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CW1200_HWBUS_H +#define CW1200_HWBUS_H + +struct hwbus_priv; + +void cw1200_irq_handler(struct cw1200_common *priv); + +/* This MUST be wrapped with hwbus_ops->lock/unlock! */ +int __cw1200_irq_enable(struct cw1200_common *priv, int enable); + +struct hwbus_ops { + int (*hwbus_memcpy_fromio)(struct hwbus_priv *self, unsigned int addr, + void *dst, int count); + int (*hwbus_memcpy_toio)(struct hwbus_priv *self, unsigned int addr, + const void *src, int count); + void (*lock)(struct hwbus_priv *self); + void (*unlock)(struct hwbus_priv *self); + size_t (*align_size)(struct hwbus_priv *self, size_t size); + int (*power_mgmt)(struct hwbus_priv *self, bool suspend); +}; + +#endif /* CW1200_HWBUS_H */ diff --git a/drivers/net/wireless/cw1200/hwio.c b/drivers/net/wireless/cw1200/hwio.c new file mode 100644 index 000000000000..dad3fb331818 --- /dev/null +++ b/drivers/net/wireless/cw1200/hwio.c @@ -0,0 +1,310 @@ +/* + * Low-level device IO routines for ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * Based on: + * ST-Ericsson UMAC CW1200 driver, which is + * Copyright (c) 2010, ST-Ericsson + * Author: Ajitpal Singh <ajitpal.singh@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/types.h> + +#include "cw1200.h" +#include "hwio.h" +#include "hwbus.h" + + /* Sdio addr is 4*spi_addr */ +#define SPI_REG_ADDR_TO_SDIO(spi_reg_addr) ((spi_reg_addr) << 2) +#define SDIO_ADDR17BIT(buf_id, mpf, rfu, reg_id_ofs) \ + ((((buf_id) & 0x1F) << 7) \ + | (((mpf) & 1) << 6) \ + | (((rfu) & 1) << 5) \ + | (((reg_id_ofs) & 0x1F) << 0)) +#define MAX_RETRY 3 + + +static int __cw1200_reg_read(struct cw1200_common *priv, u16 addr, + void *buf, size_t buf_len, int buf_id) +{ + u16 addr_sdio; + u32 sdio_reg_addr_17bit; + + /* Check if buffer is aligned to 4 byte boundary */ + if (WARN_ON(((unsigned long)buf & 3) && (buf_len > 4))) { + pr_err("buffer is not aligned.\n"); + return -EINVAL; + } + + /* Convert to SDIO Register Address */ + addr_sdio = SPI_REG_ADDR_TO_SDIO(addr); + sdio_reg_addr_17bit = SDIO_ADDR17BIT(buf_id, 0, 0, addr_sdio); + + return priv->hwbus_ops->hwbus_memcpy_fromio(priv->hwbus_priv, + sdio_reg_addr_17bit, + buf, buf_len); +} + +static int __cw1200_reg_write(struct cw1200_common *priv, u16 addr, + const void *buf, size_t buf_len, int buf_id) +{ + u16 addr_sdio; + u32 sdio_reg_addr_17bit; + + /* Convert to SDIO Register Address */ + addr_sdio = SPI_REG_ADDR_TO_SDIO(addr); + sdio_reg_addr_17bit = SDIO_ADDR17BIT(buf_id, 0, 0, addr_sdio); + + return priv->hwbus_ops->hwbus_memcpy_toio(priv->hwbus_priv, + sdio_reg_addr_17bit, + buf, buf_len); +} + +static inline int __cw1200_reg_read_32(struct cw1200_common *priv, + u16 addr, u32 *val) +{ + int i = __cw1200_reg_read(priv, addr, val, sizeof(*val), 0); + *val = le32_to_cpu(*val); + return i; +} + +static inline int __cw1200_reg_write_32(struct cw1200_common *priv, + u16 addr, u32 val) +{ + val = cpu_to_le32(val); + return __cw1200_reg_write(priv, addr, &val, sizeof(val), 0); +} + +static inline int __cw1200_reg_read_16(struct cw1200_common *priv, + u16 addr, u16 *val) +{ + int i = __cw1200_reg_read(priv, addr, val, sizeof(*val), 0); + *val = le16_to_cpu(*val); + return i; +} + +static inline int __cw1200_reg_write_16(struct cw1200_common *priv, + u16 addr, u16 val) +{ + val = cpu_to_le16(val); + return __cw1200_reg_write(priv, addr, &val, sizeof(val), 0); +} + +int cw1200_reg_read(struct cw1200_common *priv, u16 addr, void *buf, + size_t buf_len) +{ + int ret; + priv->hwbus_ops->lock(priv->hwbus_priv); + ret = __cw1200_reg_read(priv, addr, buf, buf_len, 0); + priv->hwbus_ops->unlock(priv->hwbus_priv); + return ret; +} + +int cw1200_reg_write(struct cw1200_common *priv, u16 addr, const void *buf, + size_t buf_len) +{ + int ret; + priv->hwbus_ops->lock(priv->hwbus_priv); + ret = __cw1200_reg_write(priv, addr, buf, buf_len, 0); + priv->hwbus_ops->unlock(priv->hwbus_priv); + return ret; +} + +int cw1200_data_read(struct cw1200_common *priv, void *buf, size_t buf_len) +{ + int ret, retry = 1; + int buf_id_rx = priv->buf_id_rx; + + priv->hwbus_ops->lock(priv->hwbus_priv); + + while (retry <= MAX_RETRY) { + ret = __cw1200_reg_read(priv, + ST90TDS_IN_OUT_QUEUE_REG_ID, buf, + buf_len, buf_id_rx + 1); + if (!ret) { + buf_id_rx = (buf_id_rx + 1) & 3; + priv->buf_id_rx = buf_id_rx; + break; + } else { + retry++; + mdelay(1); + pr_err("error :[%d]\n", ret); + } + } + + priv->hwbus_ops->unlock(priv->hwbus_priv); + return ret; +} + +int cw1200_data_write(struct cw1200_common *priv, const void *buf, + size_t buf_len) +{ + int ret, retry = 1; + int buf_id_tx = priv->buf_id_tx; + + priv->hwbus_ops->lock(priv->hwbus_priv); + + while (retry <= MAX_RETRY) { + ret = __cw1200_reg_write(priv, + ST90TDS_IN_OUT_QUEUE_REG_ID, buf, + buf_len, buf_id_tx); + if (!ret) { + buf_id_tx = (buf_id_tx + 1) & 31; + priv->buf_id_tx = buf_id_tx; + break; + } else { + retry++; + mdelay(1); + pr_err("error :[%d]\n", ret); + } + } + + priv->hwbus_ops->unlock(priv->hwbus_priv); + return ret; +} + +int cw1200_indirect_read(struct cw1200_common *priv, u32 addr, void *buf, + size_t buf_len, u32 prefetch, u16 port_addr) +{ + u32 val32 = 0; + int i, ret; + + if ((buf_len / 2) >= 0x1000) { + pr_err("Can't read more than 0xfff words.\n"); + return -EINVAL; + } + + priv->hwbus_ops->lock(priv->hwbus_priv); + /* Write address */ + ret = __cw1200_reg_write_32(priv, ST90TDS_SRAM_BASE_ADDR_REG_ID, addr); + if (ret < 0) { + pr_err("Can't write address register.\n"); + goto out; + } + + /* Read CONFIG Register Value - We will read 32 bits */ + ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); + if (ret < 0) { + pr_err("Can't read config register.\n"); + goto out; + } + + /* Set PREFETCH bit */ + ret = __cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID, + val32 | prefetch); + if (ret < 0) { + pr_err("Can't write prefetch bit.\n"); + goto out; + } + + /* Check for PRE-FETCH bit to be cleared */ + for (i = 0; i < 20; i++) { + ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); + if (ret < 0) { + pr_err("Can't check prefetch bit.\n"); + goto out; + } + if (!(val32 & prefetch)) + break; + + mdelay(i); + } + + if (val32 & prefetch) { + pr_err("Prefetch bit is not cleared.\n"); + goto out; + } + + /* Read data port */ + ret = __cw1200_reg_read(priv, port_addr, buf, buf_len, 0); + if (ret < 0) { + pr_err("Can't read data port.\n"); + goto out; + } + +out: + priv->hwbus_ops->unlock(priv->hwbus_priv); + return ret; +} + +int cw1200_apb_write(struct cw1200_common *priv, u32 addr, const void *buf, + size_t buf_len) +{ + int ret; + + if ((buf_len / 2) >= 0x1000) { + pr_err("Can't write more than 0xfff words.\n"); + return -EINVAL; + } + + priv->hwbus_ops->lock(priv->hwbus_priv); + + /* Write address */ + ret = __cw1200_reg_write_32(priv, ST90TDS_SRAM_BASE_ADDR_REG_ID, addr); + if (ret < 0) { + pr_err("Can't write address register.\n"); + goto out; + } + + /* Write data port */ + ret = __cw1200_reg_write(priv, ST90TDS_SRAM_DPORT_REG_ID, + buf, buf_len, 0); + if (ret < 0) { + pr_err("Can't write data port.\n"); + goto out; + } + +out: + priv->hwbus_ops->unlock(priv->hwbus_priv); + return ret; +} + +int __cw1200_irq_enable(struct cw1200_common *priv, int enable) +{ + u32 val32; + u16 val16; + int ret; + + if (HIF_8601_SILICON == priv->hw_type) { + ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); + if (ret < 0) { + pr_err("Can't read config register.\n"); + return ret; + } + + if (enable) + val32 |= ST90TDS_CONF_IRQ_RDY_ENABLE; + else + val32 &= ~ST90TDS_CONF_IRQ_RDY_ENABLE; + + ret = __cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID, val32); + if (ret < 0) { + pr_err("Can't write config register.\n"); + return ret; + } + } else { + ret = __cw1200_reg_read_16(priv, ST90TDS_CONFIG_REG_ID, &val16); + if (ret < 0) { + pr_err("Can't read control register.\n"); + return ret; + } + + if (enable) + val16 |= ST90TDS_CONT_IRQ_RDY_ENABLE; + else + val16 &= ~ST90TDS_CONT_IRQ_RDY_ENABLE; + + ret = __cw1200_reg_write_16(priv, ST90TDS_CONFIG_REG_ID, val16); + if (ret < 0) { + pr_err("Can't write control register.\n"); + return ret; + } + } + return 0; +} diff --git a/drivers/net/wireless/cw1200/hwio.h b/drivers/net/wireless/cw1200/hwio.h new file mode 100644 index 000000000000..563329cfead6 --- /dev/null +++ b/drivers/net/wireless/cw1200/hwio.h @@ -0,0 +1,246 @@ +/* + * Low-level API for mac80211 ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * Based on: + * ST-Ericsson UMAC CW1200 driver which is + * Copyright (c) 2010, ST-Ericsson + * Author: Ajitpal Singh <ajitpal.singh@stericsson.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CW1200_HWIO_H_INCLUDED +#define CW1200_HWIO_H_INCLUDED + +/* extern */ struct cw1200_common; + +#define CW1200_CUT_11_ID_STR (0x302E3830) +#define CW1200_CUT_22_ID_STR1 (0x302e3132) +#define CW1200_CUT_22_ID_STR2 (0x32302e30) +#define CW1200_CUT_22_ID_STR3 (0x3335) +#define CW1200_CUT_ID_ADDR (0xFFF17F90) +#define CW1200_CUT2_ID_ADDR (0xFFF1FF90) + +/* Download control area */ +/* boot loader start address in SRAM */ +#define DOWNLOAD_BOOT_LOADER_OFFSET (0x00000000) +/* 32K, 0x4000 to 0xDFFF */ +#define DOWNLOAD_FIFO_OFFSET (0x00004000) +/* 32K */ +#define DOWNLOAD_FIFO_SIZE (0x00008000) +/* 128 bytes, 0xFF80 to 0xFFFF */ +#define DOWNLOAD_CTRL_OFFSET (0x0000FF80) +#define DOWNLOAD_CTRL_DATA_DWORDS (32-6) + +struct download_cntl_t { + /* size of whole firmware file (including Cheksum), host init */ + u32 image_size; + /* downloading flags */ + u32 flags; + /* No. of bytes put into the download, init & updated by host */ + u32 put; + /* last traced program counter, last ARM reg_pc */ + u32 trace_pc; + /* No. of bytes read from the download, host init, device updates */ + u32 get; + /* r0, boot losader status, host init to pending, device updates */ + u32 status; + /* Extra debug info, r1 to r14 if status=r0=DOWNLOAD_EXCEPTION */ + u32 debug_data[DOWNLOAD_CTRL_DATA_DWORDS]; +}; + +#define DOWNLOAD_IMAGE_SIZE_REG \ + (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, image_size)) +#define DOWNLOAD_FLAGS_REG \ + (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, flags)) +#define DOWNLOAD_PUT_REG \ + (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, put)) +#define DOWNLOAD_TRACE_PC_REG \ + (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, trace_pc)) +#define DOWNLOAD_GET_REG \ + (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, get)) +#define DOWNLOAD_STATUS_REG \ + (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, status)) +#define DOWNLOAD_DEBUG_DATA_REG \ + (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, debug_data)) +#define DOWNLOAD_DEBUG_DATA_LEN (108) + +#define DOWNLOAD_BLOCK_SIZE (1024) + +/* For boot loader detection */ +#define DOWNLOAD_ARE_YOU_HERE (0x87654321) +#define DOWNLOAD_I_AM_HERE (0x12345678) + +/* Download error code */ +#define DOWNLOAD_PENDING (0xFFFFFFFF) +#define DOWNLOAD_SUCCESS (0) +#define DOWNLOAD_EXCEPTION (1) +#define DOWNLOAD_ERR_MEM_1 (2) +#define DOWNLOAD_ERR_MEM_2 (3) +#define DOWNLOAD_ERR_SOFTWARE (4) +#define DOWNLOAD_ERR_FILE_SIZE (5) +#define DOWNLOAD_ERR_CHECKSUM (6) +#define DOWNLOAD_ERR_OVERFLOW (7) +#define DOWNLOAD_ERR_IMAGE (8) +#define DOWNLOAD_ERR_HOST (9) +#define DOWNLOAD_ERR_ABORT (10) + + +#define SYS_BASE_ADDR_SILICON (0) +#define PAC_BASE_ADDRESS_SILICON (SYS_BASE_ADDR_SILICON + 0x09000000) +#define PAC_SHARED_MEMORY_SILICON (PAC_BASE_ADDRESS_SILICON) + +#define CW1200_APB(addr) (PAC_SHARED_MEMORY_SILICON + (addr)) + +/* Device register definitions */ + +/* WBF - SPI Register Addresses */ +#define ST90TDS_ADDR_ID_BASE (0x0000) +/* 16/32 bits */ +#define ST90TDS_CONFIG_REG_ID (0x0000) +/* 16/32 bits */ +#define ST90TDS_CONTROL_REG_ID (0x0001) +/* 16 bits, Q mode W/R */ +#define ST90TDS_IN_OUT_QUEUE_REG_ID (0x0002) +/* 32 bits, AHB bus R/W */ +#define ST90TDS_AHB_DPORT_REG_ID (0x0003) +/* 16/32 bits */ +#define ST90TDS_SRAM_BASE_ADDR_REG_ID (0x0004) +/* 32 bits, APB bus R/W */ +#define ST90TDS_SRAM_DPORT_REG_ID (0x0005) +/* 32 bits, t_settle/general */ +#define ST90TDS_TSET_GEN_R_W_REG_ID (0x0006) +/* 16 bits, Q mode read, no length */ +#define ST90TDS_FRAME_OUT_REG_ID (0x0007) +#define ST90TDS_ADDR_ID_MAX (ST90TDS_FRAME_OUT_REG_ID) + +/* WBF - Control register bit set */ +/* next o/p length, bit 11 to 0 */ +#define ST90TDS_CONT_NEXT_LEN_MASK (0x0FFF) +#define ST90TDS_CONT_WUP_BIT (BIT(12)) +#define ST90TDS_CONT_RDY_BIT (BIT(13)) +#define ST90TDS_CONT_IRQ_ENABLE (BIT(14)) +#define ST90TDS_CONT_RDY_ENABLE (BIT(15)) +#define ST90TDS_CONT_IRQ_RDY_ENABLE (BIT(14)|BIT(15)) + +/* SPI Config register bit set */ +#define ST90TDS_CONFIG_FRAME_BIT (BIT(2)) +#define ST90TDS_CONFIG_WORD_MODE_BITS (BIT(3)|BIT(4)) +#define ST90TDS_CONFIG_WORD_MODE_1 (BIT(3)) +#define ST90TDS_CONFIG_WORD_MODE_2 (BIT(4)) +#define ST90TDS_CONFIG_ERROR_0_BIT (BIT(5)) +#define ST90TDS_CONFIG_ERROR_1_BIT (BIT(6)) +#define ST90TDS_CONFIG_ERROR_2_BIT (BIT(7)) +/* TBD: Sure??? */ +#define ST90TDS_CONFIG_CSN_FRAME_BIT (BIT(7)) +#define ST90TDS_CONFIG_ERROR_3_BIT (BIT(8)) +#define ST90TDS_CONFIG_ERROR_4_BIT (BIT(9)) +/* QueueM */ +#define ST90TDS_CONFIG_ACCESS_MODE_BIT (BIT(10)) +/* AHB bus */ +#define ST90TDS_CONFIG_AHB_PRFETCH_BIT (BIT(11)) +#define ST90TDS_CONFIG_CPU_CLK_DIS_BIT (BIT(12)) +/* APB bus */ +#define ST90TDS_CONFIG_PRFETCH_BIT (BIT(13)) +/* cpu reset */ +#define ST90TDS_CONFIG_CPU_RESET_BIT (BIT(14)) +#define ST90TDS_CONFIG_CLEAR_INT_BIT (BIT(15)) + +/* For CW1200 the IRQ Enable and Ready Bits are in CONFIG register */ +#define ST90TDS_CONF_IRQ_ENABLE (BIT(16)) +#define ST90TDS_CONF_RDY_ENABLE (BIT(17)) +#define ST90TDS_CONF_IRQ_RDY_ENABLE (BIT(16)|BIT(17)) + +int cw1200_data_read(struct cw1200_common *priv, + void *buf, size_t buf_len); +int cw1200_data_write(struct cw1200_common *priv, + const void *buf, size_t buf_len); + +int cw1200_reg_read(struct cw1200_common *priv, u16 addr, + void *buf, size_t buf_len); +int cw1200_reg_write(struct cw1200_common *priv, u16 addr, + const void *buf, size_t buf_len); + +static inline int cw1200_reg_read_16(struct cw1200_common *priv, + u16 addr, u16 *val) +{ + u32 tmp; + int i; + i = cw1200_reg_read(priv, addr, &tmp, sizeof(tmp)); + tmp = le32_to_cpu(tmp); + *val = tmp & 0xffff; + return i; +} + +static inline int cw1200_reg_write_16(struct cw1200_common *priv, + u16 addr, u16 val) +{ + u32 tmp = val; + tmp = cpu_to_le32(tmp); + return cw1200_reg_write(priv, addr, &tmp, sizeof(tmp)); +} + +static inline int cw1200_reg_read_32(struct cw1200_common *priv, + u16 addr, u32 *val) +{ + int i = cw1200_reg_read(priv, addr, val, sizeof(*val)); + *val = le32_to_cpu(*val); + return i; +} + +static inline int cw1200_reg_write_32(struct cw1200_common *priv, + u16 addr, u32 val) +{ + val = cpu_to_le32(val); + return cw1200_reg_write(priv, addr, &val, sizeof(val)); +} + +int cw1200_indirect_read(struct cw1200_common *priv, u32 addr, void *buf, + size_t buf_len, u32 prefetch, u16 port_addr); +int cw1200_apb_write(struct cw1200_common *priv, u32 addr, const void *buf, + size_t buf_len); + +static inline int cw1200_apb_read(struct cw1200_common *priv, u32 addr, + void *buf, size_t buf_len) +{ + return cw1200_indirect_read(priv, addr, buf, buf_len, + ST90TDS_CONFIG_PRFETCH_BIT, + ST90TDS_SRAM_DPORT_REG_ID); +} + +static inline int cw1200_ahb_read(struct cw1200_common *priv, u32 addr, + void *buf, size_t buf_len) +{ + return cw1200_indirect_read(priv, addr, buf, buf_len, + ST90TDS_CONFIG_AHB_PRFETCH_BIT, + ST90TDS_AHB_DPORT_REG_ID); +} + +static inline int cw1200_apb_read_32(struct cw1200_common *priv, + u32 addr, u32 *val) +{ + int i = cw1200_apb_read(priv, addr, val, sizeof(*val)); + *val = le32_to_cpu(*val); + return i; +} + +static inline int cw1200_apb_write_32(struct cw1200_common *priv, + u32 addr, u32 val) +{ + val = cpu_to_le32(val); + return cw1200_apb_write(priv, addr, &val, sizeof(val)); +} +static inline int cw1200_ahb_read_32(struct cw1200_common *priv, + u32 addr, u32 *val) +{ + int i = cw1200_ahb_read(priv, addr, val, sizeof(*val)); + *val = le32_to_cpu(*val); + return i; +} + +#endif /* CW1200_HWIO_H_INCLUDED */ diff --git a/drivers/net/wireless/cw1200/main.c b/drivers/net/wireless/cw1200/main.c new file mode 100644 index 000000000000..da885036ca5f --- /dev/null +++ b/drivers/net/wireless/cw1200/main.c @@ -0,0 +1,605 @@ +/* + * mac80211 glue code for mac80211 ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * Based on: + * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> + * Copyright (c) 2007-2009, Christian Lamparter <chunkeey@web.de> + * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> + * + * Based on: + * - the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al. + * - stlc45xx driver + * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/firmware.h> +#include <linux/etherdevice.h> +#include <linux/vmalloc.h> +#include <linux/random.h> +#include <linux/sched.h> +#include <net/mac80211.h> + +#include "cw1200.h" +#include "txrx.h" +#include "hwbus.h" +#include "fwio.h" +#include "hwio.h" +#include "bh.h" +#include "sta.h" +#include "scan.h" +#include "debug.h" +#include "pm.h" + +MODULE_AUTHOR("Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>"); +MODULE_DESCRIPTION("Softmac ST-Ericsson CW1200 common code"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("cw1200_core"); + +/* Accept MAC address of the form macaddr=0x00,0x80,0xE1,0x30,0x40,0x50 */ +static u8 cw1200_mac_template[ETH_ALEN] = {0x02, 0x80, 0xe1, 0x00, 0x00, 0x00}; +module_param_array_named(macaddr, cw1200_mac_template, byte, NULL, S_IRUGO); +MODULE_PARM_DESC(macaddr, "Override platform_data MAC address"); + +static char *cw1200_sdd_path; +module_param(cw1200_sdd_path, charp, 0644); +MODULE_PARM_DESC(cw1200_sdd_path, "Override platform_data SDD file"); +static int cw1200_refclk; +module_param(cw1200_refclk, int, 0644); +MODULE_PARM_DESC(cw1200_refclk, "Override platform_data reference clock"); + +int cw1200_power_mode = wsm_power_mode_quiescent; +module_param(cw1200_power_mode, int, 0644); +MODULE_PARM_DESC(cw1200_power_mode, "WSM power mode. 0 == active, 1 == doze, 2 == quiescent (default)"); + +#define RATETAB_ENT(_rate, _rateid, _flags) \ + { \ + .bitrate = (_rate), \ + .hw_value = (_rateid), \ + .flags = (_flags), \ + } + +static struct ieee80211_rate cw1200_rates[] = { + RATETAB_ENT(10, 0, 0), + RATETAB_ENT(20, 1, 0), + RATETAB_ENT(55, 2, 0), + RATETAB_ENT(110, 3, 0), + RATETAB_ENT(60, 6, 0), + RATETAB_ENT(90, 7, 0), + RATETAB_ENT(120, 8, 0), + RATETAB_ENT(180, 9, 0), + RATETAB_ENT(240, 10, 0), + RATETAB_ENT(360, 11, 0), + RATETAB_ENT(480, 12, 0), + RATETAB_ENT(540, 13, 0), +}; + +static struct ieee80211_rate cw1200_mcs_rates[] = { + RATETAB_ENT(65, 14, IEEE80211_TX_RC_MCS), + RATETAB_ENT(130, 15, IEEE80211_TX_RC_MCS), + RATETAB_ENT(195, 16, IEEE80211_TX_RC_MCS), + RATETAB_ENT(260, 17, IEEE80211_TX_RC_MCS), + RATETAB_ENT(390, 18, IEEE80211_TX_RC_MCS), + RATETAB_ENT(520, 19, IEEE80211_TX_RC_MCS), + RATETAB_ENT(585, 20, IEEE80211_TX_RC_MCS), + RATETAB_ENT(650, 21, IEEE80211_TX_RC_MCS), +}; + +#define cw1200_a_rates (cw1200_rates + 4) +#define cw1200_a_rates_size (ARRAY_SIZE(cw1200_rates) - 4) +#define cw1200_g_rates (cw1200_rates + 0) +#define cw1200_g_rates_size (ARRAY_SIZE(cw1200_rates)) +#define cw1200_n_rates (cw1200_mcs_rates) +#define cw1200_n_rates_size (ARRAY_SIZE(cw1200_mcs_rates)) + + +#define CHAN2G(_channel, _freq, _flags) { \ + .band = IEEE80211_BAND_2GHZ, \ + .center_freq = (_freq), \ + .hw_value = (_channel), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +#define CHAN5G(_channel, _flags) { \ + .band = IEEE80211_BAND_5GHZ, \ + .center_freq = 5000 + (5 * (_channel)), \ + .hw_value = (_channel), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +static struct ieee80211_channel cw1200_2ghz_chantable[] = { + CHAN2G(1, 2412, 0), + CHAN2G(2, 2417, 0), + CHAN2G(3, 2422, 0), + CHAN2G(4, 2427, 0), + CHAN2G(5, 2432, 0), + CHAN2G(6, 2437, 0), + CHAN2G(7, 2442, 0), + CHAN2G(8, 2447, 0), + CHAN2G(9, 2452, 0), + CHAN2G(10, 2457, 0), + CHAN2G(11, 2462, 0), + CHAN2G(12, 2467, 0), + CHAN2G(13, 2472, 0), + CHAN2G(14, 2484, 0), +}; + +static struct ieee80211_channel cw1200_5ghz_chantable[] = { + CHAN5G(34, 0), CHAN5G(36, 0), + CHAN5G(38, 0), CHAN5G(40, 0), + CHAN5G(42, 0), CHAN5G(44, 0), + CHAN5G(46, 0), CHAN5G(48, 0), + CHAN5G(52, 0), CHAN5G(56, 0), + CHAN5G(60, 0), CHAN5G(64, 0), + CHAN5G(100, 0), CHAN5G(104, 0), + CHAN5G(108, 0), CHAN5G(112, 0), + CHAN5G(116, 0), CHAN5G(120, 0), + CHAN5G(124, 0), CHAN5G(128, 0), + CHAN5G(132, 0), CHAN5G(136, 0), + CHAN5G(140, 0), CHAN5G(149, 0), + CHAN5G(153, 0), CHAN5G(157, 0), + CHAN5G(161, 0), CHAN5G(165, 0), + CHAN5G(184, 0), CHAN5G(188, 0), + CHAN5G(192, 0), CHAN5G(196, 0), + CHAN5G(200, 0), CHAN5G(204, 0), + CHAN5G(208, 0), CHAN5G(212, 0), + CHAN5G(216, 0), +}; + +static struct ieee80211_supported_band cw1200_band_2ghz = { + .channels = cw1200_2ghz_chantable, + .n_channels = ARRAY_SIZE(cw1200_2ghz_chantable), + .bitrates = cw1200_g_rates, + .n_bitrates = cw1200_g_rates_size, + .ht_cap = { + .cap = IEEE80211_HT_CAP_GRN_FLD | + (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | + IEEE80211_HT_CAP_MAX_AMSDU, + .ht_supported = 1, + .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K, + .ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE, + .mcs = { + .rx_mask[0] = 0xFF, + .rx_highest = __cpu_to_le16(0x41), + .tx_params = IEEE80211_HT_MCS_TX_DEFINED, + }, + }, +}; + +static struct ieee80211_supported_band cw1200_band_5ghz = { + .channels = cw1200_5ghz_chantable, + .n_channels = ARRAY_SIZE(cw1200_5ghz_chantable), + .bitrates = cw1200_a_rates, + .n_bitrates = cw1200_a_rates_size, + .ht_cap = { + .cap = IEEE80211_HT_CAP_GRN_FLD | + (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | + IEEE80211_HT_CAP_MAX_AMSDU, + .ht_supported = 1, + .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K, + .ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE, + .mcs = { + .rx_mask[0] = 0xFF, + .rx_highest = __cpu_to_le16(0x41), + .tx_params = IEEE80211_HT_MCS_TX_DEFINED, + }, + }, +}; + +static const unsigned long cw1200_ttl[] = { + 1 * HZ, /* VO */ + 2 * HZ, /* VI */ + 5 * HZ, /* BE */ + 10 * HZ /* BK */ +}; + +static const struct ieee80211_ops cw1200_ops = { + .start = cw1200_start, + .stop = cw1200_stop, + .add_interface = cw1200_add_interface, + .remove_interface = cw1200_remove_interface, + .change_interface = cw1200_change_interface, + .tx = cw1200_tx, + .hw_scan = cw1200_hw_scan, + .set_tim = cw1200_set_tim, + .sta_notify = cw1200_sta_notify, + .sta_add = cw1200_sta_add, + .sta_remove = cw1200_sta_remove, + .set_key = cw1200_set_key, + .set_rts_threshold = cw1200_set_rts_threshold, + .config = cw1200_config, + .bss_info_changed = cw1200_bss_info_changed, + .prepare_multicast = cw1200_prepare_multicast, + .configure_filter = cw1200_configure_filter, + .conf_tx = cw1200_conf_tx, + .get_stats = cw1200_get_stats, + .ampdu_action = cw1200_ampdu_action, + .flush = cw1200_flush, +#ifdef CONFIG_PM + .suspend = cw1200_wow_suspend, + .resume = cw1200_wow_resume, +#endif + /* Intentionally not offloaded: */ + /*.channel_switch = cw1200_channel_switch, */ + /*.remain_on_channel = cw1200_remain_on_channel, */ + /*.cancel_remain_on_channel = cw1200_cancel_remain_on_channel, */ +}; + +int cw1200_ba_rx_tids = -1; +int cw1200_ba_tx_tids = -1; +module_param(cw1200_ba_rx_tids, int, 0644); +module_param(cw1200_ba_tx_tids, int, 0644); +MODULE_PARM_DESC(cw1200_ba_rx_tids, "Block ACK RX TIDs"); +MODULE_PARM_DESC(cw1200_ba_tx_tids, "Block ACK TX TIDs"); + +#ifdef CONFIG_PM +static const struct wiphy_wowlan_support cw1200_wowlan_support = { + /* Support only for limited wowlan functionalities */ + .flags = WIPHY_WOWLAN_ANY | WIPHY_WOWLAN_DISCONNECT, +}; +#endif + + +static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr, + const bool have_5ghz) +{ + int i, band; + struct ieee80211_hw *hw; + struct cw1200_common *priv; + + hw = ieee80211_alloc_hw(sizeof(struct cw1200_common), &cw1200_ops); + if (!hw) + return NULL; + + priv = hw->priv; + priv->hw = hw; + priv->hw_type = -1; + priv->mode = NL80211_IFTYPE_UNSPECIFIED; + priv->rates = cw1200_rates; /* TODO: fetch from FW */ + priv->mcs_rates = cw1200_n_rates; + if (cw1200_ba_rx_tids != -1) + priv->ba_rx_tid_mask = cw1200_ba_rx_tids; + else + priv->ba_rx_tid_mask = 0xFF; /* Enable RX BLKACK for all TIDs */ + if (cw1200_ba_tx_tids != -1) + priv->ba_tx_tid_mask = cw1200_ba_tx_tids; + else + priv->ba_tx_tid_mask = 0xff; /* Enable TX BLKACK for all TIDs */ + + hw->flags = IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_SUPPORTS_PS | + IEEE80211_HW_SUPPORTS_DYNAMIC_PS | + IEEE80211_HW_REPORTS_TX_ACK_STATUS | + IEEE80211_HW_SUPPORTS_UAPSD | + IEEE80211_HW_CONNECTION_MONITOR | + IEEE80211_HW_AMPDU_AGGREGATION | + IEEE80211_HW_TX_AMPDU_SETUP_IN_HW | + IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC; + + hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC) | + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO); + +#ifdef CONFIG_PM + hw->wiphy->wowlan = &cw1200_wowlan_support; +#endif + + hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; + + hw->channel_change_time = 1000; /* TODO: find actual value */ + hw->queues = 4; + + priv->rts_threshold = -1; + + hw->max_rates = 8; + hw->max_rate_tries = 15; + hw->extra_tx_headroom = WSM_TX_EXTRA_HEADROOM + + 8; /* TKIP IV */ + + hw->sta_data_size = sizeof(struct cw1200_sta_priv); + + hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &cw1200_band_2ghz; + if (have_5ghz) + hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &cw1200_band_5ghz; + + /* Channel params have to be cleared before registering wiphy again */ + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + struct ieee80211_supported_band *sband = hw->wiphy->bands[band]; + if (!sband) + continue; + for (i = 0; i < sband->n_channels; i++) { + sband->channels[i].flags = 0; + sband->channels[i].max_antenna_gain = 0; + sband->channels[i].max_power = 30; + } + } + + hw->wiphy->max_scan_ssids = 2; + hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; + + if (macaddr) + SET_IEEE80211_PERM_ADDR(hw, (u8 *)macaddr); + else + SET_IEEE80211_PERM_ADDR(hw, cw1200_mac_template); + + /* Fix up mac address if necessary */ + if (hw->wiphy->perm_addr[3] == 0 && + hw->wiphy->perm_addr[4] == 0 && + hw->wiphy->perm_addr[5] == 0) { + get_random_bytes(&hw->wiphy->perm_addr[3], 3); + } + + mutex_init(&priv->wsm_cmd_mux); + mutex_init(&priv->conf_mutex); + priv->workqueue = create_singlethread_workqueue("cw1200_wq"); + sema_init(&priv->scan.lock, 1); + INIT_WORK(&priv->scan.work, cw1200_scan_work); + INIT_DELAYED_WORK(&priv->scan.probe_work, cw1200_probe_work); + INIT_DELAYED_WORK(&priv->scan.timeout, cw1200_scan_timeout); + INIT_DELAYED_WORK(&priv->clear_recent_scan_work, + cw1200_clear_recent_scan_work); + INIT_DELAYED_WORK(&priv->join_timeout, cw1200_join_timeout); + INIT_WORK(&priv->unjoin_work, cw1200_unjoin_work); + INIT_WORK(&priv->join_complete_work, cw1200_join_complete_work); + INIT_WORK(&priv->wep_key_work, cw1200_wep_key_work); + INIT_WORK(&priv->tx_policy_upload_work, tx_policy_upload_work); + spin_lock_init(&priv->event_queue_lock); + INIT_LIST_HEAD(&priv->event_queue); + INIT_WORK(&priv->event_handler, cw1200_event_handler); + INIT_DELAYED_WORK(&priv->bss_loss_work, cw1200_bss_loss_work); + INIT_WORK(&priv->bss_params_work, cw1200_bss_params_work); + spin_lock_init(&priv->bss_loss_lock); + spin_lock_init(&priv->ps_state_lock); + INIT_WORK(&priv->set_cts_work, cw1200_set_cts_work); + INIT_WORK(&priv->set_tim_work, cw1200_set_tim_work); + INIT_WORK(&priv->multicast_start_work, cw1200_multicast_start_work); + INIT_WORK(&priv->multicast_stop_work, cw1200_multicast_stop_work); + INIT_WORK(&priv->link_id_work, cw1200_link_id_work); + INIT_DELAYED_WORK(&priv->link_id_gc_work, cw1200_link_id_gc_work); + INIT_WORK(&priv->linkid_reset_work, cw1200_link_id_reset); + INIT_WORK(&priv->update_filtering_work, cw1200_update_filtering_work); + INIT_WORK(&priv->set_beacon_wakeup_period_work, + cw1200_set_beacon_wakeup_period_work); + init_timer(&priv->mcast_timeout); + priv->mcast_timeout.data = (unsigned long)priv; + priv->mcast_timeout.function = cw1200_mcast_timeout; + + if (cw1200_queue_stats_init(&priv->tx_queue_stats, + CW1200_LINK_ID_MAX, + cw1200_skb_dtor, + priv)) { + ieee80211_free_hw(hw); + return NULL; + } + + for (i = 0; i < 4; ++i) { + if (cw1200_queue_init(&priv->tx_queue[i], + &priv->tx_queue_stats, i, 16, + cw1200_ttl[i])) { + for (; i > 0; i--) + cw1200_queue_deinit(&priv->tx_queue[i - 1]); + cw1200_queue_stats_deinit(&priv->tx_queue_stats); + ieee80211_free_hw(hw); + return NULL; + } + } + + init_waitqueue_head(&priv->channel_switch_done); + init_waitqueue_head(&priv->wsm_cmd_wq); + init_waitqueue_head(&priv->wsm_startup_done); + init_waitqueue_head(&priv->ps_mode_switch_done); + wsm_buf_init(&priv->wsm_cmd_buf); + spin_lock_init(&priv->wsm_cmd.lock); + priv->wsm_cmd.done = 1; + tx_policy_init(priv); + + return hw; +} + +static int cw1200_register_common(struct ieee80211_hw *dev) +{ + struct cw1200_common *priv = dev->priv; + int err; + +#ifdef CONFIG_PM + err = cw1200_pm_init(&priv->pm_state, priv); + if (err) { + pr_err("Cannot init PM. (%d).\n", + err); + return err; + } +#endif + + err = ieee80211_register_hw(dev); + if (err) { + pr_err("Cannot register device (%d).\n", + err); +#ifdef CONFIG_PM + cw1200_pm_deinit(&priv->pm_state); +#endif + return err; + } + + cw1200_debug_init(priv); + + pr_info("Registered as '%s'\n", wiphy_name(dev->wiphy)); + return 0; +} + +static void cw1200_free_common(struct ieee80211_hw *dev) +{ + ieee80211_free_hw(dev); +} + +static void cw1200_unregister_common(struct ieee80211_hw *dev) +{ + struct cw1200_common *priv = dev->priv; + int i; + + ieee80211_unregister_hw(dev); + + del_timer_sync(&priv->mcast_timeout); + cw1200_unregister_bh(priv); + + cw1200_debug_release(priv); + + mutex_destroy(&priv->conf_mutex); + + wsm_buf_deinit(&priv->wsm_cmd_buf); + + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + + if (priv->sdd) { + release_firmware(priv->sdd); + priv->sdd = NULL; + } + + for (i = 0; i < 4; ++i) + cw1200_queue_deinit(&priv->tx_queue[i]); + + cw1200_queue_stats_deinit(&priv->tx_queue_stats); +#ifdef CONFIG_PM + cw1200_pm_deinit(&priv->pm_state); +#endif +} + +/* Clock is in KHz */ +u32 cw1200_dpll_from_clk(u16 clk_khz) +{ + switch (clk_khz) { + case 0x32C8: /* 13000 KHz */ + return 0x1D89D241; + case 0x3E80: /* 16000 KHz */ + return 0x000001E1; + case 0x41A0: /* 16800 KHz */ + return 0x124931C1; + case 0x4B00: /* 19200 KHz */ + return 0x00000191; + case 0x5DC0: /* 24000 KHz */ + return 0x00000141; + case 0x6590: /* 26000 KHz */ + return 0x0EC4F121; + case 0x8340: /* 33600 KHz */ + return 0x092490E1; + case 0x9600: /* 38400 KHz */ + return 0x100010C1; + case 0x9C40: /* 40000 KHz */ + return 0x000000C1; + case 0xBB80: /* 48000 KHz */ + return 0x000000A1; + case 0xCB20: /* 52000 KHz */ + return 0x07627091; + default: + pr_err("Unknown Refclk freq (0x%04x), using 2600KHz\n", + clk_khz); + return 0x0EC4F121; + } +} + +int cw1200_core_probe(const struct hwbus_ops *hwbus_ops, + struct hwbus_priv *hwbus, + struct device *pdev, + struct cw1200_common **core, + int ref_clk, const u8 *macaddr, + const char *sdd_path, bool have_5ghz) +{ + int err = -EINVAL; + struct ieee80211_hw *dev; + struct cw1200_common *priv; + struct wsm_operational_mode mode = { + .power_mode = cw1200_power_mode, + .disable_more_flag_usage = true, + }; + + dev = cw1200_init_common(macaddr, have_5ghz); + if (!dev) + goto err; + + priv = dev->priv; + priv->hw_refclk = ref_clk; + if (cw1200_refclk) + priv->hw_refclk = cw1200_refclk; + + priv->sdd_path = (char *)sdd_path; + if (cw1200_sdd_path) + priv->sdd_path = cw1200_sdd_path; + + priv->hwbus_ops = hwbus_ops; + priv->hwbus_priv = hwbus; + priv->pdev = pdev; + SET_IEEE80211_DEV(priv->hw, pdev); + + /* Pass struct cw1200_common back up */ + *core = priv; + + err = cw1200_register_bh(priv); + if (err) + goto err1; + + err = cw1200_load_firmware(priv); + if (err) + goto err2; + + if (wait_event_interruptible_timeout(priv->wsm_startup_done, + priv->firmware_ready, + 3*HZ) <= 0) { + /* TODO: Need to find how to reset device + in QUEUE mode properly. + */ + pr_err("Timeout waiting on device startup\n"); + err = -ETIMEDOUT; + goto err2; + } + + /* Set low-power mode. */ + wsm_set_operational_mode(priv, &mode); + + /* Enable multi-TX confirmation */ + wsm_use_multi_tx_conf(priv, true); + + err = cw1200_register_common(dev); + if (err) + goto err2; + + return err; + +err2: + cw1200_unregister_bh(priv); +err1: + cw1200_free_common(dev); +err: + *core = NULL; + return err; +} +EXPORT_SYMBOL_GPL(cw1200_core_probe); + +void cw1200_core_release(struct cw1200_common *self) +{ + /* Disable device interrupts */ + self->hwbus_ops->lock(self->hwbus_priv); + __cw1200_irq_enable(self, 0); + self->hwbus_ops->unlock(self->hwbus_priv); + + /* And then clean up */ + cw1200_unregister_common(self->hw); + cw1200_free_common(self->hw); + return; +} +EXPORT_SYMBOL_GPL(cw1200_core_release); diff --git a/drivers/net/wireless/cw1200/pm.c b/drivers/net/wireless/cw1200/pm.c new file mode 100644 index 000000000000..b37abb9f0453 --- /dev/null +++ b/drivers/net/wireless/cw1200/pm.c @@ -0,0 +1,367 @@ +/* + * Mac80211 power management API for ST-Ericsson CW1200 drivers + * + * Copyright (c) 2011, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/if_ether.h> +#include "cw1200.h" +#include "pm.h" +#include "sta.h" +#include "bh.h" +#include "hwbus.h" + +#define CW1200_BEACON_SKIPPING_MULTIPLIER 3 + +struct cw1200_udp_port_filter { + struct wsm_udp_port_filter_hdr hdr; + /* Up to 4 filters are allowed. */ + struct wsm_udp_port_filter filters[WSM_MAX_FILTER_ELEMENTS]; +} __packed; + +struct cw1200_ether_type_filter { + struct wsm_ether_type_filter_hdr hdr; + /* Up to 4 filters are allowed. */ + struct wsm_ether_type_filter filters[WSM_MAX_FILTER_ELEMENTS]; +} __packed; + +static struct cw1200_udp_port_filter cw1200_udp_port_filter_on = { + .hdr.num = 2, + .filters = { + [0] = { + .action = WSM_FILTER_ACTION_FILTER_OUT, + .type = WSM_FILTER_PORT_TYPE_DST, + .port = __cpu_to_le16(67), /* DHCP Bootps */ + }, + [1] = { + .action = WSM_FILTER_ACTION_FILTER_OUT, + .type = WSM_FILTER_PORT_TYPE_DST, + .port = __cpu_to_le16(68), /* DHCP Bootpc */ + }, + } +}; + +static struct wsm_udp_port_filter_hdr cw1200_udp_port_filter_off = { + .num = 0, +}; + +#ifndef ETH_P_WAPI +#define ETH_P_WAPI 0x88B4 +#endif + +static struct cw1200_ether_type_filter cw1200_ether_type_filter_on = { + .hdr.num = 4, + .filters = { + [0] = { + .action = WSM_FILTER_ACTION_FILTER_IN, + .type = __cpu_to_le16(ETH_P_IP), + }, + [1] = { + .action = WSM_FILTER_ACTION_FILTER_IN, + .type = __cpu_to_le16(ETH_P_PAE), + }, + [2] = { + .action = WSM_FILTER_ACTION_FILTER_IN, + .type = __cpu_to_le16(ETH_P_WAPI), + }, + [3] = { + .action = WSM_FILTER_ACTION_FILTER_IN, + .type = __cpu_to_le16(ETH_P_ARP), + }, + }, +}; + +static struct wsm_ether_type_filter_hdr cw1200_ether_type_filter_off = { + .num = 0, +}; + +/* private */ +struct cw1200_suspend_state { + unsigned long bss_loss_tmo; + unsigned long join_tmo; + unsigned long direct_probe; + unsigned long link_id_gc; + bool beacon_skipping; + u8 prev_ps_mode; +}; + +static void cw1200_pm_stay_awake_tmo(unsigned long arg) +{ + /* XXX what's the point of this ? */ +} + +int cw1200_pm_init(struct cw1200_pm_state *pm, + struct cw1200_common *priv) +{ + spin_lock_init(&pm->lock); + + init_timer(&pm->stay_awake); + pm->stay_awake.data = (unsigned long)pm; + pm->stay_awake.function = cw1200_pm_stay_awake_tmo; + + return 0; +} + +void cw1200_pm_deinit(struct cw1200_pm_state *pm) +{ + del_timer_sync(&pm->stay_awake); +} + +void cw1200_pm_stay_awake(struct cw1200_pm_state *pm, + unsigned long tmo) +{ + long cur_tmo; + spin_lock_bh(&pm->lock); + cur_tmo = pm->stay_awake.expires - jiffies; + if (!timer_pending(&pm->stay_awake) || cur_tmo < (long)tmo) + mod_timer(&pm->stay_awake, jiffies + tmo); + spin_unlock_bh(&pm->lock); +} + +static long cw1200_suspend_work(struct delayed_work *work) +{ + int ret = cancel_delayed_work(work); + long tmo; + if (ret > 0) { + /* Timer is pending */ + tmo = work->timer.expires - jiffies; + if (tmo < 0) + tmo = 0; + } else { + tmo = -1; + } + return tmo; +} + +static int cw1200_resume_work(struct cw1200_common *priv, + struct delayed_work *work, + unsigned long tmo) +{ + if ((long)tmo < 0) + return 1; + + return queue_delayed_work(priv->workqueue, work, tmo); +} + +int cw1200_can_suspend(struct cw1200_common *priv) +{ + if (atomic_read(&priv->bh_rx)) { + wiphy_dbg(priv->hw->wiphy, "Suspend interrupted.\n"); + return 0; + } + return 1; +} +EXPORT_SYMBOL_GPL(cw1200_can_suspend); + +int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) +{ + struct cw1200_common *priv = hw->priv; + struct cw1200_pm_state *pm_state = &priv->pm_state; + struct cw1200_suspend_state *state; + int ret; + + spin_lock_bh(&pm_state->lock); + ret = timer_pending(&pm_state->stay_awake); + spin_unlock_bh(&pm_state->lock); + if (ret) + return -EAGAIN; + + /* Do not suspend when datapath is not idle */ + if (priv->tx_queue_stats.num_queued) + return -EBUSY; + + /* Make sure there is no configuration requests in progress. */ + if (!mutex_trylock(&priv->conf_mutex)) + return -EBUSY; + + /* Ensure pending operations are done. + * Note also that wow_suspend must return in ~2.5sec, before + * watchdog is triggered. + */ + if (priv->channel_switch_in_progress) + goto revert1; + + /* Do not suspend when join is pending */ + if (priv->join_pending) + goto revert1; + + /* Do not suspend when scanning */ + if (down_trylock(&priv->scan.lock)) + goto revert1; + + /* Lock TX. */ + wsm_lock_tx_async(priv); + + /* Wait to avoid possible race with bh code. + * But do not wait too long... + */ + if (wait_event_timeout(priv->bh_evt_wq, + !priv->hw_bufs_used, HZ / 10) <= 0) + goto revert2; + + /* Set UDP filter */ + wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_on.hdr); + + /* Set ethernet frame type filter */ + wsm_set_ether_type_filter(priv, &cw1200_ether_type_filter_on.hdr); + + /* Allocate state */ + state = kzalloc(sizeof(struct cw1200_suspend_state), GFP_KERNEL); + if (!state) + goto revert3; + + /* Change to legacy PS while going to suspend */ + if (!priv->vif->p2p && + priv->join_status == CW1200_JOIN_STATUS_STA && + priv->powersave_mode.mode != WSM_PSM_PS) { + state->prev_ps_mode = priv->powersave_mode.mode; + priv->powersave_mode.mode = WSM_PSM_PS; + cw1200_set_pm(priv, &priv->powersave_mode); + if (wait_event_interruptible_timeout(priv->ps_mode_switch_done, + !priv->ps_mode_switch_in_progress, 1*HZ) <= 0) { + goto revert3; + } + } + + /* Store delayed work states. */ + state->bss_loss_tmo = + cw1200_suspend_work(&priv->bss_loss_work); + state->join_tmo = + cw1200_suspend_work(&priv->join_timeout); + state->direct_probe = + cw1200_suspend_work(&priv->scan.probe_work); + state->link_id_gc = + cw1200_suspend_work(&priv->link_id_gc_work); + + cancel_delayed_work_sync(&priv->clear_recent_scan_work); + atomic_set(&priv->recent_scan, 0); + + /* Enable beacon skipping */ + if (priv->join_status == CW1200_JOIN_STATUS_STA && + priv->join_dtim_period && + !priv->has_multicast_subscription) { + state->beacon_skipping = true; + wsm_set_beacon_wakeup_period(priv, + priv->join_dtim_period, + CW1200_BEACON_SKIPPING_MULTIPLIER * priv->join_dtim_period); + } + + /* Stop serving thread */ + if (cw1200_bh_suspend(priv)) + goto revert4; + + ret = timer_pending(&priv->mcast_timeout); + if (ret) + goto revert5; + + /* Store suspend state */ + pm_state->suspend_state = state; + + /* Enable IRQ wake */ + ret = priv->hwbus_ops->power_mgmt(priv->hwbus_priv, true); + if (ret) { + wiphy_err(priv->hw->wiphy, + "PM request failed: %d. WoW is disabled.\n", ret); + cw1200_wow_resume(hw); + return -EBUSY; + } + + /* Force resume if event is coming from the device. */ + if (atomic_read(&priv->bh_rx)) { + cw1200_wow_resume(hw); + return -EAGAIN; + } + + return 0; + +revert5: + WARN_ON(cw1200_bh_resume(priv)); +revert4: + cw1200_resume_work(priv, &priv->bss_loss_work, + state->bss_loss_tmo); + cw1200_resume_work(priv, &priv->join_timeout, + state->join_tmo); + cw1200_resume_work(priv, &priv->scan.probe_work, + state->direct_probe); + cw1200_resume_work(priv, &priv->link_id_gc_work, + state->link_id_gc); + kfree(state); +revert3: + wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_off); + wsm_set_ether_type_filter(priv, &cw1200_ether_type_filter_off); +revert2: + wsm_unlock_tx(priv); + up(&priv->scan.lock); +revert1: + mutex_unlock(&priv->conf_mutex); + return -EBUSY; +} + +int cw1200_wow_resume(struct ieee80211_hw *hw) +{ + struct cw1200_common *priv = hw->priv; + struct cw1200_pm_state *pm_state = &priv->pm_state; + struct cw1200_suspend_state *state; + + state = pm_state->suspend_state; + pm_state->suspend_state = NULL; + + /* Disable IRQ wake */ + priv->hwbus_ops->power_mgmt(priv->hwbus_priv, false); + + /* Scan.lock must be released before BH is resumed other way + * in case when BSS_LOST command arrived the processing of the + * command will be delayed. + */ + up(&priv->scan.lock); + + /* Resume BH thread */ + WARN_ON(cw1200_bh_resume(priv)); + + /* Restores previous PS mode */ + if (!priv->vif->p2p && priv->join_status == CW1200_JOIN_STATUS_STA) { + priv->powersave_mode.mode = state->prev_ps_mode; + cw1200_set_pm(priv, &priv->powersave_mode); + } + + if (state->beacon_skipping) { + wsm_set_beacon_wakeup_period(priv, priv->beacon_int * + priv->join_dtim_period > + MAX_BEACON_SKIP_TIME_MS ? 1 : + priv->join_dtim_period, 0); + state->beacon_skipping = false; + } + + /* Resume delayed work */ + cw1200_resume_work(priv, &priv->bss_loss_work, + state->bss_loss_tmo); + cw1200_resume_work(priv, &priv->join_timeout, + state->join_tmo); + cw1200_resume_work(priv, &priv->scan.probe_work, + state->direct_probe); + cw1200_resume_work(priv, &priv->link_id_gc_work, + state->link_id_gc); + + /* Remove UDP port filter */ + wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_off); + + /* Remove ethernet frame type filter */ + wsm_set_ether_type_filter(priv, &cw1200_ether_type_filter_off); + + /* Unlock datapath */ + wsm_unlock_tx(priv); + + /* Unlock configuration mutex */ + mutex_unlock(&priv->conf_mutex); + + /* Free memory */ + kfree(state); + + return 0; +} diff --git a/drivers/net/wireless/cw1200/pm.h b/drivers/net/wireless/cw1200/pm.h new file mode 100644 index 000000000000..3ed90ff22bb8 --- /dev/null +++ b/drivers/net/wireless/cw1200/pm.h @@ -0,0 +1,43 @@ +/* + * Mac80211 power management interface for ST-Ericsson CW1200 mac80211 drivers + * + * Copyright (c) 2011, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef PM_H_INCLUDED +#define PM_H_INCLUDED + +/* ******************************************************************** */ +/* mac80211 API */ + +/* extern */ struct cw1200_common; +/* private */ struct cw1200_suspend_state; + +struct cw1200_pm_state { + struct cw1200_suspend_state *suspend_state; + struct timer_list stay_awake; + struct platform_device *pm_dev; + spinlock_t lock; /* Protect access */ +}; + +#ifdef CONFIG_PM +int cw1200_pm_init(struct cw1200_pm_state *pm, + struct cw1200_common *priv); +void cw1200_pm_deinit(struct cw1200_pm_state *pm); +int cw1200_wow_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan); +int cw1200_wow_resume(struct ieee80211_hw *hw); +int cw1200_can_suspend(struct cw1200_common *priv); +void cw1200_pm_stay_awake(struct cw1200_pm_state *pm, + unsigned long tmo); +#else +static inline void cw1200_pm_stay_awake(struct cw1200_pm_state *pm, + unsigned long tmo) { +} +#endif +#endif diff --git a/drivers/net/wireless/cw1200/queue.c b/drivers/net/wireless/cw1200/queue.c new file mode 100644 index 000000000000..8510454d5db1 --- /dev/null +++ b/drivers/net/wireless/cw1200/queue.c @@ -0,0 +1,583 @@ +/* + * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <net/mac80211.h> +#include <linux/sched.h> +#include "queue.h" +#include "cw1200.h" +#include "debug.h" + +/* private */ struct cw1200_queue_item +{ + struct list_head head; + struct sk_buff *skb; + u32 packet_id; + unsigned long queue_timestamp; + unsigned long xmit_timestamp; + struct cw1200_txpriv txpriv; + u8 generation; +}; + +static inline void __cw1200_queue_lock(struct cw1200_queue *queue) +{ + struct cw1200_queue_stats *stats = queue->stats; + if (queue->tx_locked_cnt++ == 0) { + pr_debug("[TX] Queue %d is locked.\n", + queue->queue_id); + ieee80211_stop_queue(stats->priv->hw, queue->queue_id); + } +} + +static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) +{ + struct cw1200_queue_stats *stats = queue->stats; + BUG_ON(!queue->tx_locked_cnt); + if (--queue->tx_locked_cnt == 0) { + pr_debug("[TX] Queue %d is unlocked.\n", + queue->queue_id); + ieee80211_wake_queue(stats->priv->hw, queue->queue_id); + } +} + +static inline void cw1200_queue_parse_id(u32 packet_id, u8 *queue_generation, + u8 *queue_id, u8 *item_generation, + u8 *item_id) +{ + *item_id = (packet_id >> 0) & 0xFF; + *item_generation = (packet_id >> 8) & 0xFF; + *queue_id = (packet_id >> 16) & 0xFF; + *queue_generation = (packet_id >> 24) & 0xFF; +} + +static inline u32 cw1200_queue_mk_packet_id(u8 queue_generation, u8 queue_id, + u8 item_generation, u8 item_id) +{ + return ((u32)item_id << 0) | + ((u32)item_generation << 8) | + ((u32)queue_id << 16) | + ((u32)queue_generation << 24); +} + +static void cw1200_queue_post_gc(struct cw1200_queue_stats *stats, + struct list_head *gc_list) +{ + struct cw1200_queue_item *item, *tmp; + + list_for_each_entry_safe(item, tmp, gc_list, head) { + list_del(&item->head); + stats->skb_dtor(stats->priv, item->skb, &item->txpriv); + kfree(item); + } +} + +static void cw1200_queue_register_post_gc(struct list_head *gc_list, + struct cw1200_queue_item *item) +{ + struct cw1200_queue_item *gc_item; + gc_item = kmalloc(sizeof(struct cw1200_queue_item), + GFP_ATOMIC); + BUG_ON(!gc_item); + memcpy(gc_item, item, sizeof(struct cw1200_queue_item)); + list_add_tail(&gc_item->head, gc_list); +} + +static void __cw1200_queue_gc(struct cw1200_queue *queue, + struct list_head *head, + bool unlock) +{ + struct cw1200_queue_stats *stats = queue->stats; + struct cw1200_queue_item *item = NULL, *tmp; + bool wakeup_stats = false; + + list_for_each_entry_safe(item, tmp, &queue->queue, head) { + if (jiffies - item->queue_timestamp < queue->ttl) + break; + --queue->num_queued; + --queue->link_map_cache[item->txpriv.link_id]; + spin_lock_bh(&stats->lock); + --stats->num_queued; + if (!--stats->link_map_cache[item->txpriv.link_id]) + wakeup_stats = true; + spin_unlock_bh(&stats->lock); + cw1200_debug_tx_ttl(stats->priv); + cw1200_queue_register_post_gc(head, item); + item->skb = NULL; + list_move_tail(&item->head, &queue->free_pool); + } + + if (wakeup_stats) + wake_up(&stats->wait_link_id_empty); + + if (queue->overfull) { + if (queue->num_queued <= (queue->capacity >> 1)) { + queue->overfull = false; + if (unlock) + __cw1200_queue_unlock(queue); + } else if (item) { + unsigned long tmo = item->queue_timestamp + queue->ttl; + mod_timer(&queue->gc, tmo); + cw1200_pm_stay_awake(&stats->priv->pm_state, + tmo - jiffies); + } + } +} + +static void cw1200_queue_gc(unsigned long arg) +{ + LIST_HEAD(list); + struct cw1200_queue *queue = + (struct cw1200_queue *)arg; + + spin_lock_bh(&queue->lock); + __cw1200_queue_gc(queue, &list, true); + spin_unlock_bh(&queue->lock); + cw1200_queue_post_gc(queue->stats, &list); +} + +int cw1200_queue_stats_init(struct cw1200_queue_stats *stats, + size_t map_capacity, + cw1200_queue_skb_dtor_t skb_dtor, + struct cw1200_common *priv) +{ + memset(stats, 0, sizeof(*stats)); + stats->map_capacity = map_capacity; + stats->skb_dtor = skb_dtor; + stats->priv = priv; + spin_lock_init(&stats->lock); + init_waitqueue_head(&stats->wait_link_id_empty); + + stats->link_map_cache = kzalloc(sizeof(int) * map_capacity, + GFP_KERNEL); + if (!stats->link_map_cache) + return -ENOMEM; + + return 0; +} + +int cw1200_queue_init(struct cw1200_queue *queue, + struct cw1200_queue_stats *stats, + u8 queue_id, + size_t capacity, + unsigned long ttl) +{ + size_t i; + + memset(queue, 0, sizeof(*queue)); + queue->stats = stats; + queue->capacity = capacity; + queue->queue_id = queue_id; + queue->ttl = ttl; + INIT_LIST_HEAD(&queue->queue); + INIT_LIST_HEAD(&queue->pending); + INIT_LIST_HEAD(&queue->free_pool); + spin_lock_init(&queue->lock); + init_timer(&queue->gc); + queue->gc.data = (unsigned long)queue; + queue->gc.function = cw1200_queue_gc; + + queue->pool = kzalloc(sizeof(struct cw1200_queue_item) * capacity, + GFP_KERNEL); + if (!queue->pool) + return -ENOMEM; + + queue->link_map_cache = kzalloc(sizeof(int) * stats->map_capacity, + GFP_KERNEL); + if (!queue->link_map_cache) { + kfree(queue->pool); + queue->pool = NULL; + return -ENOMEM; + } + + for (i = 0; i < capacity; ++i) + list_add_tail(&queue->pool[i].head, &queue->free_pool); + + return 0; +} + +int cw1200_queue_clear(struct cw1200_queue *queue) +{ + int i; + LIST_HEAD(gc_list); + struct cw1200_queue_stats *stats = queue->stats; + struct cw1200_queue_item *item, *tmp; + + spin_lock_bh(&queue->lock); + queue->generation++; + list_splice_tail_init(&queue->queue, &queue->pending); + list_for_each_entry_safe(item, tmp, &queue->pending, head) { + WARN_ON(!item->skb); + cw1200_queue_register_post_gc(&gc_list, item); + item->skb = NULL; + list_move_tail(&item->head, &queue->free_pool); + } + queue->num_queued = 0; + queue->num_pending = 0; + + spin_lock_bh(&stats->lock); + for (i = 0; i < stats->map_capacity; ++i) { + stats->num_queued -= queue->link_map_cache[i]; + stats->link_map_cache[i] -= queue->link_map_cache[i]; + queue->link_map_cache[i] = 0; + } + spin_unlock_bh(&stats->lock); + if (queue->overfull) { + queue->overfull = false; + __cw1200_queue_unlock(queue); + } + spin_unlock_bh(&queue->lock); + wake_up(&stats->wait_link_id_empty); + cw1200_queue_post_gc(stats, &gc_list); + return 0; +} + +void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats) +{ + kfree(stats->link_map_cache); + stats->link_map_cache = NULL; +} + +void cw1200_queue_deinit(struct cw1200_queue *queue) +{ + cw1200_queue_clear(queue); + del_timer_sync(&queue->gc); + INIT_LIST_HEAD(&queue->free_pool); + kfree(queue->pool); + kfree(queue->link_map_cache); + queue->pool = NULL; + queue->link_map_cache = NULL; + queue->capacity = 0; +} + +size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue, + u32 link_id_map) +{ + size_t ret; + int i, bit; + size_t map_capacity = queue->stats->map_capacity; + + if (!link_id_map) + return 0; + + spin_lock_bh(&queue->lock); + if (link_id_map == (u32)-1) { + ret = queue->num_queued - queue->num_pending; + } else { + ret = 0; + for (i = 0, bit = 1; i < map_capacity; ++i, bit <<= 1) { + if (link_id_map & bit) + ret += queue->link_map_cache[i]; + } + } + spin_unlock_bh(&queue->lock); + return ret; +} + +int cw1200_queue_put(struct cw1200_queue *queue, + struct sk_buff *skb, + struct cw1200_txpriv *txpriv) +{ + int ret = 0; + LIST_HEAD(gc_list); + struct cw1200_queue_stats *stats = queue->stats; + + if (txpriv->link_id >= queue->stats->map_capacity) + return -EINVAL; + + spin_lock_bh(&queue->lock); + if (!WARN_ON(list_empty(&queue->free_pool))) { + struct cw1200_queue_item *item = list_first_entry( + &queue->free_pool, struct cw1200_queue_item, head); + BUG_ON(item->skb); + + list_move_tail(&item->head, &queue->queue); + item->skb = skb; + item->txpriv = *txpriv; + item->generation = 0; + item->packet_id = cw1200_queue_mk_packet_id(queue->generation, + queue->queue_id, + item->generation, + item - queue->pool); + item->queue_timestamp = jiffies; + + ++queue->num_queued; + ++queue->link_map_cache[txpriv->link_id]; + + spin_lock_bh(&stats->lock); + ++stats->num_queued; + ++stats->link_map_cache[txpriv->link_id]; + spin_unlock_bh(&stats->lock); + + /* TX may happen in parallel sometimes. + * Leave extra queue slots so we don't overflow. + */ + if (queue->overfull == false && + queue->num_queued >= + (queue->capacity - (num_present_cpus() - 1))) { + queue->overfull = true; + __cw1200_queue_lock(queue); + mod_timer(&queue->gc, jiffies); + } + } else { + ret = -ENOENT; + } + spin_unlock_bh(&queue->lock); + return ret; +} + +int cw1200_queue_get(struct cw1200_queue *queue, + u32 link_id_map, + struct wsm_tx **tx, + struct ieee80211_tx_info **tx_info, + const struct cw1200_txpriv **txpriv) +{ + int ret = -ENOENT; + struct cw1200_queue_item *item; + struct cw1200_queue_stats *stats = queue->stats; + bool wakeup_stats = false; + + spin_lock_bh(&queue->lock); + list_for_each_entry(item, &queue->queue, head) { + if (link_id_map & BIT(item->txpriv.link_id)) { + ret = 0; + break; + } + } + + if (!WARN_ON(ret)) { + *tx = (struct wsm_tx *)item->skb->data; + *tx_info = IEEE80211_SKB_CB(item->skb); + *txpriv = &item->txpriv; + (*tx)->packet_id = __cpu_to_le32(item->packet_id); + list_move_tail(&item->head, &queue->pending); + ++queue->num_pending; + --queue->link_map_cache[item->txpriv.link_id]; + item->xmit_timestamp = jiffies; + + spin_lock_bh(&stats->lock); + --stats->num_queued; + if (!--stats->link_map_cache[item->txpriv.link_id]) + wakeup_stats = true; + spin_unlock_bh(&stats->lock); + } + spin_unlock_bh(&queue->lock); + if (wakeup_stats) + wake_up(&stats->wait_link_id_empty); + return ret; +} + +int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id) +{ + int ret = 0; + u8 queue_generation, queue_id, item_generation, item_id; + struct cw1200_queue_item *item; + struct cw1200_queue_stats *stats = queue->stats; + + cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id, + &item_generation, &item_id); + + item = &queue->pool[item_id]; + + spin_lock_bh(&queue->lock); + BUG_ON(queue_id != queue->queue_id); + if (queue_generation != queue->generation) { + ret = -ENOENT; + } else if (item_id >= (unsigned) queue->capacity) { + WARN_ON(1); + ret = -EINVAL; + } else if (item->generation != item_generation) { + WARN_ON(1); + ret = -ENOENT; + } else { + --queue->num_pending; + ++queue->link_map_cache[item->txpriv.link_id]; + + spin_lock_bh(&stats->lock); + ++stats->num_queued; + ++stats->link_map_cache[item->txpriv.link_id]; + spin_unlock_bh(&stats->lock); + + item->generation = ++item_generation; + item->packet_id = cw1200_queue_mk_packet_id(queue_generation, + queue_id, + item_generation, + item_id); + list_move(&item->head, &queue->queue); + } + spin_unlock_bh(&queue->lock); + return ret; +} + +int cw1200_queue_requeue_all(struct cw1200_queue *queue) +{ + struct cw1200_queue_item *item, *tmp; + struct cw1200_queue_stats *stats = queue->stats; + spin_lock_bh(&queue->lock); + + list_for_each_entry_safe_reverse(item, tmp, &queue->pending, head) { + --queue->num_pending; + ++queue->link_map_cache[item->txpriv.link_id]; + + spin_lock_bh(&stats->lock); + ++stats->num_queued; + ++stats->link_map_cache[item->txpriv.link_id]; + spin_unlock_bh(&stats->lock); + + ++item->generation; + item->packet_id = cw1200_queue_mk_packet_id(queue->generation, + queue->queue_id, + item->generation, + item - queue->pool); + list_move(&item->head, &queue->queue); + } + spin_unlock_bh(&queue->lock); + + return 0; +} + +int cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id) +{ + int ret = 0; + u8 queue_generation, queue_id, item_generation, item_id; + struct cw1200_queue_item *item; + struct cw1200_queue_stats *stats = queue->stats; + struct sk_buff *gc_skb = NULL; + struct cw1200_txpriv gc_txpriv; + + cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id, + &item_generation, &item_id); + + item = &queue->pool[item_id]; + + spin_lock_bh(&queue->lock); + BUG_ON(queue_id != queue->queue_id); + if (queue_generation != queue->generation) { + ret = -ENOENT; + } else if (item_id >= (unsigned) queue->capacity) { + WARN_ON(1); + ret = -EINVAL; + } else if (item->generation != item_generation) { + WARN_ON(1); + ret = -ENOENT; + } else { + gc_txpriv = item->txpriv; + gc_skb = item->skb; + item->skb = NULL; + --queue->num_pending; + --queue->num_queued; + ++queue->num_sent; + ++item->generation; + /* Do not use list_move_tail here, but list_move: + * try to utilize cache row. + */ + list_move(&item->head, &queue->free_pool); + + if (queue->overfull && + (queue->num_queued <= (queue->capacity >> 1))) { + queue->overfull = false; + __cw1200_queue_unlock(queue); + } + } + spin_unlock_bh(&queue->lock); + + if (gc_skb) + stats->skb_dtor(stats->priv, gc_skb, &gc_txpriv); + + return ret; +} + +int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id, + struct sk_buff **skb, + const struct cw1200_txpriv **txpriv) +{ + int ret = 0; + u8 queue_generation, queue_id, item_generation, item_id; + struct cw1200_queue_item *item; + cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id, + &item_generation, &item_id); + + item = &queue->pool[item_id]; + + spin_lock_bh(&queue->lock); + BUG_ON(queue_id != queue->queue_id); + if (queue_generation != queue->generation) { + ret = -ENOENT; + } else if (item_id >= (unsigned) queue->capacity) { + WARN_ON(1); + ret = -EINVAL; + } else if (item->generation != item_generation) { + WARN_ON(1); + ret = -ENOENT; + } else { + *skb = item->skb; + *txpriv = &item->txpriv; + } + spin_unlock_bh(&queue->lock); + return ret; +} + +void cw1200_queue_lock(struct cw1200_queue *queue) +{ + spin_lock_bh(&queue->lock); + __cw1200_queue_lock(queue); + spin_unlock_bh(&queue->lock); +} + +void cw1200_queue_unlock(struct cw1200_queue *queue) +{ + spin_lock_bh(&queue->lock); + __cw1200_queue_unlock(queue); + spin_unlock_bh(&queue->lock); +} + +bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue, + unsigned long *timestamp, + u32 pending_frame_id) +{ + struct cw1200_queue_item *item; + bool ret; + + spin_lock_bh(&queue->lock); + ret = !list_empty(&queue->pending); + if (ret) { + list_for_each_entry(item, &queue->pending, head) { + if (item->packet_id != pending_frame_id) + if (time_before(item->xmit_timestamp, + *timestamp)) + *timestamp = item->xmit_timestamp; + } + } + spin_unlock_bh(&queue->lock); + return ret; +} + +bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats, + u32 link_id_map) +{ + bool empty = true; + + spin_lock_bh(&stats->lock); + if (link_id_map == (u32)-1) { + empty = stats->num_queued == 0; + } else { + int i; + for (i = 0; i < stats->map_capacity; ++i) { + if (link_id_map & BIT(i)) { + if (stats->link_map_cache[i]) { + empty = false; + break; + } + } + } + } + spin_unlock_bh(&stats->lock); + + return empty; +} diff --git a/drivers/net/wireless/cw1200/queue.h b/drivers/net/wireless/cw1200/queue.h new file mode 100644 index 000000000000..119f9c79c14e --- /dev/null +++ b/drivers/net/wireless/cw1200/queue.h @@ -0,0 +1,116 @@ +/* + * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CW1200_QUEUE_H_INCLUDED +#define CW1200_QUEUE_H_INCLUDED + +/* private */ struct cw1200_queue_item; + +/* extern */ struct sk_buff; +/* extern */ struct wsm_tx; +/* extern */ struct cw1200_common; +/* extern */ struct ieee80211_tx_queue_stats; +/* extern */ struct cw1200_txpriv; + +/* forward */ struct cw1200_queue_stats; + +typedef void (*cw1200_queue_skb_dtor_t)(struct cw1200_common *priv, + struct sk_buff *skb, + const struct cw1200_txpriv *txpriv); + +struct cw1200_queue { + struct cw1200_queue_stats *stats; + size_t capacity; + size_t num_queued; + size_t num_pending; + size_t num_sent; + struct cw1200_queue_item *pool; + struct list_head queue; + struct list_head free_pool; + struct list_head pending; + int tx_locked_cnt; + int *link_map_cache; + bool overfull; + spinlock_t lock; /* Protect queue entry */ + u8 queue_id; + u8 generation; + struct timer_list gc; + unsigned long ttl; +}; + +struct cw1200_queue_stats { + spinlock_t lock; /* Protect stats entry */ + int *link_map_cache; + int num_queued; + size_t map_capacity; + wait_queue_head_t wait_link_id_empty; + cw1200_queue_skb_dtor_t skb_dtor; + struct cw1200_common *priv; +}; + +struct cw1200_txpriv { + u8 link_id; + u8 raw_link_id; + u8 tid; + u8 rate_id; + u8 offset; +}; + +int cw1200_queue_stats_init(struct cw1200_queue_stats *stats, + size_t map_capacity, + cw1200_queue_skb_dtor_t skb_dtor, + struct cw1200_common *priv); +int cw1200_queue_init(struct cw1200_queue *queue, + struct cw1200_queue_stats *stats, + u8 queue_id, + size_t capacity, + unsigned long ttl); +int cw1200_queue_clear(struct cw1200_queue *queue); +void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats); +void cw1200_queue_deinit(struct cw1200_queue *queue); + +size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue, + u32 link_id_map); +int cw1200_queue_put(struct cw1200_queue *queue, + struct sk_buff *skb, + struct cw1200_txpriv *txpriv); +int cw1200_queue_get(struct cw1200_queue *queue, + u32 link_id_map, + struct wsm_tx **tx, + struct ieee80211_tx_info **tx_info, + const struct cw1200_txpriv **txpriv); +int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id); +int cw1200_queue_requeue_all(struct cw1200_queue *queue); +int cw1200_queue_remove(struct cw1200_queue *queue, + u32 packet_id); +int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id, + struct sk_buff **skb, + const struct cw1200_txpriv **txpriv); +void cw1200_queue_lock(struct cw1200_queue *queue); +void cw1200_queue_unlock(struct cw1200_queue *queue); +bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue, + unsigned long *timestamp, + u32 pending_frame_id); + +bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats, + u32 link_id_map); + +static inline u8 cw1200_queue_get_queue_id(u32 packet_id) +{ + return (packet_id >> 16) & 0xFF; +} + +static inline u8 cw1200_queue_get_generation(u32 packet_id) +{ + return (packet_id >> 8) & 0xFF; +} + +#endif /* CW1200_QUEUE_H_INCLUDED */ diff --git a/drivers/net/wireless/cw1200/scan.c b/drivers/net/wireless/cw1200/scan.c new file mode 100644 index 000000000000..ee3c19037aac --- /dev/null +++ b/drivers/net/wireless/cw1200/scan.c @@ -0,0 +1,461 @@ +/* + * Scan implementation for ST-Ericsson CW1200 mac80211 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/sched.h> +#include "cw1200.h" +#include "scan.h" +#include "sta.h" +#include "pm.h" + +static void cw1200_scan_restart_delayed(struct cw1200_common *priv); + +static int cw1200_scan_start(struct cw1200_common *priv, struct wsm_scan *scan) +{ + int ret, i; + int tmo = 2000; + + switch (priv->join_status) { + case CW1200_JOIN_STATUS_PRE_STA: + case CW1200_JOIN_STATUS_JOINING: + return -EBUSY; + default: + break; + } + + wiphy_dbg(priv->hw->wiphy, "[SCAN] hw req, type %d, %d channels, flags: 0x%x.\n", + scan->type, scan->num_channels, scan->flags); + + for (i = 0; i < scan->num_channels; ++i) + tmo += scan->ch[i].max_chan_time + 10; + + cancel_delayed_work_sync(&priv->clear_recent_scan_work); + atomic_set(&priv->scan.in_progress, 1); + atomic_set(&priv->recent_scan, 1); + cw1200_pm_stay_awake(&priv->pm_state, tmo * HZ / 1000); + queue_delayed_work(priv->workqueue, &priv->scan.timeout, + tmo * HZ / 1000); + ret = wsm_scan(priv, scan); + if (ret) { + atomic_set(&priv->scan.in_progress, 0); + cancel_delayed_work_sync(&priv->scan.timeout); + cw1200_scan_restart_delayed(priv); + } + return ret; +} + +int cw1200_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_scan_request *req) +{ + struct cw1200_common *priv = hw->priv; + struct wsm_template_frame frame = { + .frame_type = WSM_FRAME_TYPE_PROBE_REQUEST, + }; + int i, ret; + + if (!priv->vif) + return -EINVAL; + + /* Scan when P2P_GO corrupt firmware MiniAP mode */ + if (priv->join_status == CW1200_JOIN_STATUS_AP) + return -EOPNOTSUPP; + + if (req->n_ssids == 1 && !req->ssids[0].ssid_len) + req->n_ssids = 0; + + wiphy_dbg(hw->wiphy, "[SCAN] Scan request for %d SSIDs.\n", + req->n_ssids); + + if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS) + return -EINVAL; + + frame.skb = ieee80211_probereq_get(hw, priv->vif, NULL, 0, + req->ie_len); + if (!frame.skb) + return -ENOMEM; + + if (req->ie_len) + memcpy(skb_put(frame.skb, req->ie_len), req->ie, req->ie_len); + + /* will be unlocked in cw1200_scan_work() */ + down(&priv->scan.lock); + mutex_lock(&priv->conf_mutex); + + ret = wsm_set_template_frame(priv, &frame); + if (!ret) { + /* Host want to be the probe responder. */ + ret = wsm_set_probe_responder(priv, true); + } + if (ret) { + mutex_unlock(&priv->conf_mutex); + up(&priv->scan.lock); + dev_kfree_skb(frame.skb); + return ret; + } + + wsm_lock_tx(priv); + + BUG_ON(priv->scan.req); + priv->scan.req = req; + priv->scan.n_ssids = 0; + priv->scan.status = 0; + priv->scan.begin = &req->channels[0]; + priv->scan.curr = priv->scan.begin; + priv->scan.end = &req->channels[req->n_channels]; + priv->scan.output_power = priv->output_power; + + for (i = 0; i < req->n_ssids; ++i) { + struct wsm_ssid *dst = &priv->scan.ssids[priv->scan.n_ssids]; + memcpy(&dst->ssid[0], req->ssids[i].ssid, sizeof(dst->ssid)); + dst->length = req->ssids[i].ssid_len; + ++priv->scan.n_ssids; + } + + mutex_unlock(&priv->conf_mutex); + + if (frame.skb) + dev_kfree_skb(frame.skb); + queue_work(priv->workqueue, &priv->scan.work); + return 0; +} + +void cw1200_scan_work(struct work_struct *work) +{ + struct cw1200_common *priv = container_of(work, struct cw1200_common, + scan.work); + struct ieee80211_channel **it; + struct wsm_scan scan = { + .type = WSM_SCAN_TYPE_FOREGROUND, + .flags = WSM_SCAN_FLAG_SPLIT_METHOD, + }; + bool first_run = (priv->scan.begin == priv->scan.curr && + priv->scan.begin != priv->scan.end); + int i; + + if (first_run) { + /* Firmware gets crazy if scan request is sent + * when STA is joined but not yet associated. + * Force unjoin in this case. + */ + if (cancel_delayed_work_sync(&priv->join_timeout) > 0) + cw1200_join_timeout(&priv->join_timeout.work); + } + + mutex_lock(&priv->conf_mutex); + + if (first_run) { + if (priv->join_status == CW1200_JOIN_STATUS_STA && + !(priv->powersave_mode.mode & WSM_PSM_PS)) { + struct wsm_set_pm pm = priv->powersave_mode; + pm.mode = WSM_PSM_PS; + cw1200_set_pm(priv, &pm); + } else if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) { + /* FW bug: driver has to restart p2p-dev mode + * after scan + */ + cw1200_disable_listening(priv); + } + } + + if (!priv->scan.req || (priv->scan.curr == priv->scan.end)) { + if (priv->scan.output_power != priv->output_power) + wsm_set_output_power(priv, priv->output_power * 10); + if (priv->join_status == CW1200_JOIN_STATUS_STA && + !(priv->powersave_mode.mode & WSM_PSM_PS)) + cw1200_set_pm(priv, &priv->powersave_mode); + + if (priv->scan.status < 0) + wiphy_dbg(priv->hw->wiphy, "[SCAN] Scan failed (%d).\n", + priv->scan.status); + else if (priv->scan.req) + wiphy_dbg(priv->hw->wiphy, + "[SCAN] Scan completed.\n"); + else + wiphy_dbg(priv->hw->wiphy, + "[SCAN] Scan canceled.\n"); + + priv->scan.req = NULL; + cw1200_scan_restart_delayed(priv); + wsm_unlock_tx(priv); + mutex_unlock(&priv->conf_mutex); + ieee80211_scan_completed(priv->hw, priv->scan.status ? 1 : 0); + up(&priv->scan.lock); + return; + } else { + struct ieee80211_channel *first = *priv->scan.curr; + for (it = priv->scan.curr + 1, i = 1; + it != priv->scan.end && i < WSM_SCAN_MAX_NUM_OF_CHANNELS; + ++it, ++i) { + if ((*it)->band != first->band) + break; + if (((*it)->flags ^ first->flags) & + IEEE80211_CHAN_PASSIVE_SCAN) + break; + if (!(first->flags & IEEE80211_CHAN_PASSIVE_SCAN) && + (*it)->max_power != first->max_power) + break; + } + scan.band = first->band; + + if (priv->scan.req->no_cck) + scan.max_tx_rate = WSM_TRANSMIT_RATE_6; + else + scan.max_tx_rate = WSM_TRANSMIT_RATE_1; + scan.num_probes = + (first->flags & IEEE80211_CHAN_PASSIVE_SCAN) ? 0 : 2; + scan.num_ssids = priv->scan.n_ssids; + scan.ssids = &priv->scan.ssids[0]; + scan.num_channels = it - priv->scan.curr; + /* TODO: Is it optimal? */ + scan.probe_delay = 100; + /* It is not stated in WSM specification, however + * FW team says that driver may not use FG scan + * when joined. + */ + if (priv->join_status == CW1200_JOIN_STATUS_STA) { + scan.type = WSM_SCAN_TYPE_BACKGROUND; + scan.flags = WSM_SCAN_FLAG_FORCE_BACKGROUND; + } + scan.ch = kzalloc( + sizeof(struct wsm_scan_ch) * (it - priv->scan.curr), + GFP_KERNEL); + if (!scan.ch) { + priv->scan.status = -ENOMEM; + goto fail; + } + for (i = 0; i < scan.num_channels; ++i) { + scan.ch[i].number = priv->scan.curr[i]->hw_value; + if (priv->scan.curr[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN) { + scan.ch[i].min_chan_time = 50; + scan.ch[i].max_chan_time = 100; + } else { + scan.ch[i].min_chan_time = 10; + scan.ch[i].max_chan_time = 25; + } + } + if (!(first->flags & IEEE80211_CHAN_PASSIVE_SCAN) && + priv->scan.output_power != first->max_power) { + priv->scan.output_power = first->max_power; + wsm_set_output_power(priv, + priv->scan.output_power * 10); + } + priv->scan.status = cw1200_scan_start(priv, &scan); + kfree(scan.ch); + if (priv->scan.status) + goto fail; + priv->scan.curr = it; + } + mutex_unlock(&priv->conf_mutex); + return; + +fail: + priv->scan.curr = priv->scan.end; + mutex_unlock(&priv->conf_mutex); + queue_work(priv->workqueue, &priv->scan.work); + return; +} + +static void cw1200_scan_restart_delayed(struct cw1200_common *priv) +{ + /* FW bug: driver has to restart p2p-dev mode after scan. */ + if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) { + cw1200_enable_listening(priv); + cw1200_update_filtering(priv); + } + + if (priv->delayed_unjoin) { + priv->delayed_unjoin = false; + if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0) + wsm_unlock_tx(priv); + } else if (priv->delayed_link_loss) { + wiphy_dbg(priv->hw->wiphy, "[CQM] Requeue BSS loss.\n"); + priv->delayed_link_loss = 0; + cw1200_cqm_bssloss_sm(priv, 1, 0, 0); + } +} + +static void cw1200_scan_complete(struct cw1200_common *priv) +{ + queue_delayed_work(priv->workqueue, &priv->clear_recent_scan_work, HZ); + if (priv->scan.direct_probe) { + wiphy_dbg(priv->hw->wiphy, "[SCAN] Direct probe complete.\n"); + cw1200_scan_restart_delayed(priv); + priv->scan.direct_probe = 0; + up(&priv->scan.lock); + wsm_unlock_tx(priv); + } else { + cw1200_scan_work(&priv->scan.work); + } +} + +void cw1200_scan_failed_cb(struct cw1200_common *priv) +{ + if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) + /* STA is stopped. */ + return; + + if (cancel_delayed_work_sync(&priv->scan.timeout) > 0) { + priv->scan.status = -EIO; + queue_delayed_work(priv->workqueue, &priv->scan.timeout, 0); + } +} + + +void cw1200_scan_complete_cb(struct cw1200_common *priv, + struct wsm_scan_complete *arg) +{ + if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) + /* STA is stopped. */ + return; + + if (cancel_delayed_work_sync(&priv->scan.timeout) > 0) { + priv->scan.status = 1; + queue_delayed_work(priv->workqueue, &priv->scan.timeout, 0); + } +} + +void cw1200_clear_recent_scan_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, + clear_recent_scan_work.work); + atomic_xchg(&priv->recent_scan, 0); +} + +void cw1200_scan_timeout(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, scan.timeout.work); + if (atomic_xchg(&priv->scan.in_progress, 0)) { + if (priv->scan.status > 0) { + priv->scan.status = 0; + } else if (!priv->scan.status) { + wiphy_warn(priv->hw->wiphy, + "Timeout waiting for scan complete notification.\n"); + priv->scan.status = -ETIMEDOUT; + priv->scan.curr = priv->scan.end; + wsm_stop_scan(priv); + } + cw1200_scan_complete(priv); + } +} + +void cw1200_probe_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, scan.probe_work.work); + u8 queue_id = cw1200_queue_get_queue_id(priv->pending_frame_id); + struct cw1200_queue *queue = &priv->tx_queue[queue_id]; + const struct cw1200_txpriv *txpriv; + struct wsm_tx *wsm; + struct wsm_template_frame frame = { + .frame_type = WSM_FRAME_TYPE_PROBE_REQUEST, + }; + struct wsm_ssid ssids[1] = {{ + .length = 0, + } }; + struct wsm_scan_ch ch[1] = {{ + .min_chan_time = 0, + .max_chan_time = 10, + } }; + struct wsm_scan scan = { + .type = WSM_SCAN_TYPE_FOREGROUND, + .num_probes = 1, + .probe_delay = 0, + .num_channels = 1, + .ssids = ssids, + .ch = ch, + }; + u8 *ies; + size_t ies_len; + int ret; + + wiphy_dbg(priv->hw->wiphy, "[SCAN] Direct probe work.\n"); + + mutex_lock(&priv->conf_mutex); + if (down_trylock(&priv->scan.lock)) { + /* Scan is already in progress. Requeue self. */ + schedule(); + queue_delayed_work(priv->workqueue, + &priv->scan.probe_work, HZ / 10); + mutex_unlock(&priv->conf_mutex); + return; + } + + /* Make sure we still have a pending probe req */ + if (cw1200_queue_get_skb(queue, priv->pending_frame_id, + &frame.skb, &txpriv)) { + up(&priv->scan.lock); + mutex_unlock(&priv->conf_mutex); + wsm_unlock_tx(priv); + return; + } + wsm = (struct wsm_tx *)frame.skb->data; + scan.max_tx_rate = wsm->max_tx_rate; + scan.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ? + WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G; + if (priv->join_status == CW1200_JOIN_STATUS_STA || + priv->join_status == CW1200_JOIN_STATUS_IBSS) { + scan.type = WSM_SCAN_TYPE_BACKGROUND; + scan.flags = WSM_SCAN_FLAG_FORCE_BACKGROUND; + } + ch[0].number = priv->channel->hw_value; + + skb_pull(frame.skb, txpriv->offset); + + ies = &frame.skb->data[sizeof(struct ieee80211_hdr_3addr)]; + ies_len = frame.skb->len - sizeof(struct ieee80211_hdr_3addr); + + if (ies_len) { + u8 *ssidie = + (u8 *)cfg80211_find_ie(WLAN_EID_SSID, ies, ies_len); + if (ssidie && ssidie[1] && ssidie[1] <= sizeof(ssids[0].ssid)) { + u8 *nextie = &ssidie[2 + ssidie[1]]; + /* Remove SSID from the IE list. It has to be provided + * as a separate argument in cw1200_scan_start call + */ + + /* Store SSID localy */ + ssids[0].length = ssidie[1]; + memcpy(ssids[0].ssid, &ssidie[2], ssids[0].length); + scan.num_ssids = 1; + + /* Remove SSID from IE list */ + ssidie[1] = 0; + memmove(&ssidie[2], nextie, &ies[ies_len] - nextie); + skb_trim(frame.skb, frame.skb->len - ssids[0].length); + } + } + + /* FW bug: driver has to restart p2p-dev mode after scan */ + if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) + cw1200_disable_listening(priv); + ret = wsm_set_template_frame(priv, &frame); + priv->scan.direct_probe = 1; + if (!ret) { + wsm_flush_tx(priv); + ret = cw1200_scan_start(priv, &scan); + } + mutex_unlock(&priv->conf_mutex); + + skb_push(frame.skb, txpriv->offset); + if (!ret) + IEEE80211_SKB_CB(frame.skb)->flags |= IEEE80211_TX_STAT_ACK; + BUG_ON(cw1200_queue_remove(queue, priv->pending_frame_id)); + + if (ret) { + priv->scan.direct_probe = 0; + up(&priv->scan.lock); + wsm_unlock_tx(priv); + } + + return; +} diff --git a/drivers/net/wireless/cw1200/scan.h b/drivers/net/wireless/cw1200/scan.h new file mode 100644 index 000000000000..5a8296ccfa82 --- /dev/null +++ b/drivers/net/wireless/cw1200/scan.h @@ -0,0 +1,56 @@ +/* + * Scan interface for ST-Ericsson CW1200 mac80211 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef SCAN_H_INCLUDED +#define SCAN_H_INCLUDED + +#include <linux/semaphore.h> +#include "wsm.h" + +/* external */ struct sk_buff; +/* external */ struct cfg80211_scan_request; +/* external */ struct ieee80211_channel; +/* external */ struct ieee80211_hw; +/* external */ struct work_struct; + +struct cw1200_scan { + struct semaphore lock; + struct work_struct work; + struct delayed_work timeout; + struct cfg80211_scan_request *req; + struct ieee80211_channel **begin; + struct ieee80211_channel **curr; + struct ieee80211_channel **end; + struct wsm_ssid ssids[WSM_SCAN_MAX_NUM_OF_SSIDS]; + int output_power; + int n_ssids; + int status; + atomic_t in_progress; + /* Direct probe requests workaround */ + struct delayed_work probe_work; + int direct_probe; +}; + +int cw1200_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_scan_request *req); +void cw1200_scan_work(struct work_struct *work); +void cw1200_scan_timeout(struct work_struct *work); +void cw1200_clear_recent_scan_work(struct work_struct *work); +void cw1200_scan_complete_cb(struct cw1200_common *priv, + struct wsm_scan_complete *arg); +void cw1200_scan_failed_cb(struct cw1200_common *priv); + +/* ******************************************************************** */ +/* Raw probe requests TX workaround */ +void cw1200_probe_work(struct work_struct *work); + +#endif diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c new file mode 100644 index 000000000000..4cd0352b508d --- /dev/null +++ b/drivers/net/wireless/cw1200/sta.c @@ -0,0 +1,2404 @@ +/* + * Mac80211 STA API for ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/vmalloc.h> +#include <linux/sched.h> +#include <linux/firmware.h> +#include <linux/module.h> + +#include "cw1200.h" +#include "sta.h" +#include "fwio.h" +#include "bh.h" +#include "debug.h" + +#ifndef ERP_INFO_BYTE_OFFSET +#define ERP_INFO_BYTE_OFFSET 2 +#endif + +static void cw1200_do_join(struct cw1200_common *priv); +static void cw1200_do_unjoin(struct cw1200_common *priv); + +static int cw1200_upload_beacon(struct cw1200_common *priv); +static int cw1200_upload_pspoll(struct cw1200_common *priv); +static int cw1200_upload_null(struct cw1200_common *priv); +static int cw1200_upload_qosnull(struct cw1200_common *priv); +static int cw1200_start_ap(struct cw1200_common *priv); +static int cw1200_update_beaconing(struct cw1200_common *priv); +static int cw1200_enable_beaconing(struct cw1200_common *priv, + bool enable); +static void __cw1200_sta_notify(struct ieee80211_hw *dev, + struct ieee80211_vif *vif, + enum sta_notify_cmd notify_cmd, + int link_id); +static int __cw1200_flush(struct cw1200_common *priv, bool drop); + +static inline void __cw1200_free_event_queue(struct list_head *list) +{ + struct cw1200_wsm_event *event, *tmp; + list_for_each_entry_safe(event, tmp, list, link) { + list_del(&event->link); + kfree(event); + } +} + +/* ******************************************************************** */ +/* STA API */ + +int cw1200_start(struct ieee80211_hw *dev) +{ + struct cw1200_common *priv = dev->priv; + int ret = 0; + + cw1200_pm_stay_awake(&priv->pm_state, HZ); + + mutex_lock(&priv->conf_mutex); + + /* default EDCA */ + WSM_EDCA_SET(&priv->edca, 0, 0x0002, 0x0003, 0x0007, 47, 0xc8, false); + WSM_EDCA_SET(&priv->edca, 1, 0x0002, 0x0007, 0x000f, 94, 0xc8, false); + WSM_EDCA_SET(&priv->edca, 2, 0x0003, 0x000f, 0x03ff, 0, 0xc8, false); + WSM_EDCA_SET(&priv->edca, 3, 0x0007, 0x000f, 0x03ff, 0, 0xc8, false); + ret = wsm_set_edca_params(priv, &priv->edca); + if (ret) + goto out; + + ret = cw1200_set_uapsd_param(priv, &priv->edca); + if (ret) + goto out; + + priv->setbssparams_done = false; + + memcpy(priv->mac_addr, dev->wiphy->perm_addr, ETH_ALEN); + priv->mode = NL80211_IFTYPE_MONITOR; + priv->wep_default_key_id = -1; + + priv->cqm_beacon_loss_count = 10; + + ret = cw1200_setup_mac(priv); + if (ret) + goto out; + +out: + mutex_unlock(&priv->conf_mutex); + return ret; +} + +void cw1200_stop(struct ieee80211_hw *dev) +{ + struct cw1200_common *priv = dev->priv; + LIST_HEAD(list); + int i; + + wsm_lock_tx(priv); + + while (down_trylock(&priv->scan.lock)) { + /* Scan is in progress. Force it to stop. */ + priv->scan.req = NULL; + schedule(); + } + up(&priv->scan.lock); + + cancel_delayed_work_sync(&priv->scan.probe_work); + cancel_delayed_work_sync(&priv->scan.timeout); + cancel_delayed_work_sync(&priv->clear_recent_scan_work); + cancel_delayed_work_sync(&priv->join_timeout); + cw1200_cqm_bssloss_sm(priv, 0, 0, 0); + cancel_work_sync(&priv->unjoin_work); + cancel_delayed_work_sync(&priv->link_id_gc_work); + flush_workqueue(priv->workqueue); + del_timer_sync(&priv->mcast_timeout); + mutex_lock(&priv->conf_mutex); + priv->mode = NL80211_IFTYPE_UNSPECIFIED; + priv->listening = false; + + spin_lock(&priv->event_queue_lock); + list_splice_init(&priv->event_queue, &list); + spin_unlock(&priv->event_queue_lock); + __cw1200_free_event_queue(&list); + + + priv->join_status = CW1200_JOIN_STATUS_PASSIVE; + priv->join_pending = false; + + for (i = 0; i < 4; i++) + cw1200_queue_clear(&priv->tx_queue[i]); + mutex_unlock(&priv->conf_mutex); + tx_policy_clean(priv); + + /* HACK! */ + if (atomic_xchg(&priv->tx_lock, 1) != 1) + pr_debug("[STA] TX is force-unlocked due to stop request.\n"); + + wsm_unlock_tx(priv); + atomic_xchg(&priv->tx_lock, 0); /* for recovery to work */ +} + +static int cw1200_bssloss_mitigation = 1; +module_param(cw1200_bssloss_mitigation, int, 0644); +MODULE_PARM_DESC(cw1200_bssloss_mitigation, "BSS Loss mitigation. 0 == disabled, 1 == enabled (default)"); + + +void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv, + int init, int good, int bad) +{ + int tx = 0; + + priv->delayed_link_loss = 0; + cancel_work_sync(&priv->bss_params_work); + + pr_debug("[STA] CQM BSSLOSS_SM: state: %d init %d good %d bad: %d txlock: %d uj: %d\n", + priv->bss_loss_state, + init, good, bad, + atomic_read(&priv->tx_lock), + priv->delayed_unjoin); + + /* If we have a pending unjoin */ + if (priv->delayed_unjoin) + return; + + if (init) { + queue_delayed_work(priv->workqueue, + &priv->bss_loss_work, + HZ); + priv->bss_loss_state = 0; + + /* Skip the confimration procedure in P2P case */ + if (!priv->vif->p2p && !atomic_read(&priv->tx_lock)) + tx = 1; + } else if (good) { + cancel_delayed_work_sync(&priv->bss_loss_work); + priv->bss_loss_state = 0; + queue_work(priv->workqueue, &priv->bss_params_work); + } else if (bad) { + /* XXX Should we just keep going until we time out? */ + if (priv->bss_loss_state < 3) + tx = 1; + } else { + cancel_delayed_work_sync(&priv->bss_loss_work); + priv->bss_loss_state = 0; + } + + /* Bypass mitigation if it's disabled */ + if (!cw1200_bssloss_mitigation) + tx = 0; + + /* Spit out a NULL packet to our AP if necessary */ + if (tx) { + struct sk_buff *skb; + + priv->bss_loss_state++; + + skb = ieee80211_nullfunc_get(priv->hw, priv->vif); + WARN_ON(!skb); + if (skb) + cw1200_tx(priv->hw, NULL, skb); + } +} + +int cw1200_add_interface(struct ieee80211_hw *dev, + struct ieee80211_vif *vif) +{ + int ret; + struct cw1200_common *priv = dev->priv; + /* __le32 auto_calibration_mode = __cpu_to_le32(1); */ + + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | + IEEE80211_VIF_SUPPORTS_CQM_RSSI; + + mutex_lock(&priv->conf_mutex); + + if (priv->mode != NL80211_IFTYPE_MONITOR) { + mutex_unlock(&priv->conf_mutex); + return -EOPNOTSUPP; + } + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_AP: + priv->mode = vif->type; + break; + default: + mutex_unlock(&priv->conf_mutex); + return -EOPNOTSUPP; + } + + priv->vif = vif; + memcpy(priv->mac_addr, vif->addr, ETH_ALEN); + ret = cw1200_setup_mac(priv); + /* Enable auto-calibration */ + /* Exception in subsequent channel switch; disabled. + * wsm_write_mib(priv, WSM_MIB_ID_SET_AUTO_CALIBRATION_MODE, + * &auto_calibration_mode, sizeof(auto_calibration_mode)); + */ + + mutex_unlock(&priv->conf_mutex); + return ret; +} + +void cw1200_remove_interface(struct ieee80211_hw *dev, + struct ieee80211_vif *vif) +{ + struct cw1200_common *priv = dev->priv; + struct wsm_reset reset = { + .reset_statistics = true, + }; + int i; + + mutex_lock(&priv->conf_mutex); + switch (priv->join_status) { + case CW1200_JOIN_STATUS_JOINING: + case CW1200_JOIN_STATUS_PRE_STA: + case CW1200_JOIN_STATUS_STA: + case CW1200_JOIN_STATUS_IBSS: + wsm_lock_tx(priv); + if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0) + wsm_unlock_tx(priv); + break; + case CW1200_JOIN_STATUS_AP: + for (i = 0; priv->link_id_map; ++i) { + if (priv->link_id_map & BIT(i)) { + reset.link_id = i; + wsm_reset(priv, &reset); + priv->link_id_map &= ~BIT(i); + } + } + memset(priv->link_id_db, 0, sizeof(priv->link_id_db)); + priv->sta_asleep_mask = 0; + priv->enable_beacon = false; + priv->tx_multicast = false; + priv->aid0_bit_set = false; + priv->buffered_multicasts = false; + priv->pspoll_mask = 0; + reset.link_id = 0; + wsm_reset(priv, &reset); + break; + case CW1200_JOIN_STATUS_MONITOR: + cw1200_update_listening(priv, false); + break; + default: + break; + } + priv->vif = NULL; + priv->mode = NL80211_IFTYPE_MONITOR; + memset(priv->mac_addr, 0, ETH_ALEN); + memset(&priv->p2p_ps_modeinfo, 0, sizeof(priv->p2p_ps_modeinfo)); + cw1200_free_keys(priv); + cw1200_setup_mac(priv); + priv->listening = false; + priv->join_status = CW1200_JOIN_STATUS_PASSIVE; + if (!__cw1200_flush(priv, true)) + wsm_unlock_tx(priv); + + mutex_unlock(&priv->conf_mutex); +} + +int cw1200_change_interface(struct ieee80211_hw *dev, + struct ieee80211_vif *vif, + enum nl80211_iftype new_type, + bool p2p) +{ + int ret = 0; + pr_debug("change_interface new: %d (%d), old: %d (%d)\n", new_type, + p2p, vif->type, vif->p2p); + + if (new_type != vif->type || vif->p2p != p2p) { + cw1200_remove_interface(dev, vif); + vif->type = new_type; + vif->p2p = p2p; + ret = cw1200_add_interface(dev, vif); + } + + return ret; +} + +int cw1200_config(struct ieee80211_hw *dev, u32 changed) +{ + int ret = 0; + struct cw1200_common *priv = dev->priv; + struct ieee80211_conf *conf = &dev->conf; + + pr_debug("CONFIG CHANGED: %08x\n", changed); + + down(&priv->scan.lock); + mutex_lock(&priv->conf_mutex); + /* TODO: IEEE80211_CONF_CHANGE_QOS */ + /* TODO: IEEE80211_CONF_CHANGE_LISTEN_INTERVAL */ + + if (changed & IEEE80211_CONF_CHANGE_POWER) { + priv->output_power = conf->power_level; + pr_debug("[STA] TX power: %d\n", priv->output_power); + wsm_set_output_power(priv, priv->output_power * 10); + } + + if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) && + (priv->channel != conf->chandef.chan)) { + struct ieee80211_channel *ch = conf->chandef.chan; + struct wsm_switch_channel channel = { + .channel_number = ch->hw_value, + }; + pr_debug("[STA] Freq %d (wsm ch: %d).\n", + ch->center_freq, ch->hw_value); + + /* __cw1200_flush() implicitly locks tx, if successful */ + if (!__cw1200_flush(priv, false)) { + if (!wsm_switch_channel(priv, &channel)) { + ret = wait_event_timeout(priv->channel_switch_done, + !priv->channel_switch_in_progress, + 3 * HZ); + if (ret) { + /* Already unlocks if successful */ + priv->channel = ch; + ret = 0; + } else { + ret = -ETIMEDOUT; + } + } else { + /* Unlock if switch channel fails */ + wsm_unlock_tx(priv); + } + } + } + + if (changed & IEEE80211_CONF_CHANGE_PS) { + if (!(conf->flags & IEEE80211_CONF_PS)) + priv->powersave_mode.mode = WSM_PSM_ACTIVE; + else if (conf->dynamic_ps_timeout <= 0) + priv->powersave_mode.mode = WSM_PSM_PS; + else + priv->powersave_mode.mode = WSM_PSM_FAST_PS; + + /* Firmware requires that value for this 1-byte field must + * be specified in units of 500us. Values above the 128ms + * threshold are not supported. + */ + if (conf->dynamic_ps_timeout >= 0x80) + priv->powersave_mode.fast_psm_idle_period = 0xFF; + else + priv->powersave_mode.fast_psm_idle_period = + conf->dynamic_ps_timeout << 1; + + if (priv->join_status == CW1200_JOIN_STATUS_STA && + priv->bss_params.aid) + cw1200_set_pm(priv, &priv->powersave_mode); + } + + if (changed & IEEE80211_CONF_CHANGE_MONITOR) { + /* TBD: It looks like it's transparent + * there's a monitor interface present -- use this + * to determine for example whether to calculate + * timestamps for packets or not, do not use instead + * of filter flags! + */ + } + + if (changed & IEEE80211_CONF_CHANGE_IDLE) { + struct wsm_operational_mode mode = { + .power_mode = cw1200_power_mode, + .disable_more_flag_usage = true, + }; + + wsm_lock_tx(priv); + /* Disable p2p-dev mode forced by TX request */ + if ((priv->join_status == CW1200_JOIN_STATUS_MONITOR) && + (conf->flags & IEEE80211_CONF_IDLE) && + !priv->listening) { + cw1200_disable_listening(priv); + priv->join_status = CW1200_JOIN_STATUS_PASSIVE; + } + wsm_set_operational_mode(priv, &mode); + wsm_unlock_tx(priv); + } + + if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) { + pr_debug("[STA] Retry limits: %d (long), %d (short).\n", + conf->long_frame_max_tx_count, + conf->short_frame_max_tx_count); + spin_lock_bh(&priv->tx_policy_cache.lock); + priv->long_frame_max_tx_count = conf->long_frame_max_tx_count; + priv->short_frame_max_tx_count = + (conf->short_frame_max_tx_count < 0x0F) ? + conf->short_frame_max_tx_count : 0x0F; + priv->hw->max_rate_tries = priv->short_frame_max_tx_count; + spin_unlock_bh(&priv->tx_policy_cache.lock); + } + mutex_unlock(&priv->conf_mutex); + up(&priv->scan.lock); + return ret; +} + +void cw1200_update_filtering(struct cw1200_common *priv) +{ + int ret; + bool bssid_filtering = !priv->rx_filter.bssid; + bool is_p2p = priv->vif && priv->vif->p2p; + bool is_sta = priv->vif && NL80211_IFTYPE_STATION == priv->vif->type; + + static struct wsm_beacon_filter_control bf_ctrl; + static struct wsm_mib_beacon_filter_table bf_tbl = { + .entry[0].ie_id = WLAN_EID_VENDOR_SPECIFIC, + .entry[0].flags = WSM_BEACON_FILTER_IE_HAS_CHANGED | + WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT | + WSM_BEACON_FILTER_IE_HAS_APPEARED, + .entry[0].oui[0] = 0x50, + .entry[0].oui[1] = 0x6F, + .entry[0].oui[2] = 0x9A, + .entry[1].ie_id = WLAN_EID_HT_OPERATION, + .entry[1].flags = WSM_BEACON_FILTER_IE_HAS_CHANGED | + WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT | + WSM_BEACON_FILTER_IE_HAS_APPEARED, + .entry[2].ie_id = WLAN_EID_ERP_INFO, + .entry[2].flags = WSM_BEACON_FILTER_IE_HAS_CHANGED | + WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT | + WSM_BEACON_FILTER_IE_HAS_APPEARED, + }; + + if (priv->join_status == CW1200_JOIN_STATUS_PASSIVE) + return; + else if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) + bssid_filtering = false; + + if (priv->disable_beacon_filter) { + bf_ctrl.enabled = 0; + bf_ctrl.bcn_count = 1; + bf_tbl.num = __cpu_to_le32(0); + } else if (is_p2p || !is_sta) { + bf_ctrl.enabled = WSM_BEACON_FILTER_ENABLE | + WSM_BEACON_FILTER_AUTO_ERP; + bf_ctrl.bcn_count = 0; + bf_tbl.num = __cpu_to_le32(2); + } else { + bf_ctrl.enabled = WSM_BEACON_FILTER_ENABLE; + bf_ctrl.bcn_count = 0; + bf_tbl.num = __cpu_to_le32(3); + } + + /* When acting as p2p client being connected to p2p GO, in order to + * receive frames from a different p2p device, turn off bssid filter. + * + * WARNING: FW dependency! + * This can only be used with FW WSM371 and its successors. + * In that FW version even with bssid filter turned off, + * device will block most of the unwanted frames. + */ + if (is_p2p) + bssid_filtering = false; + + ret = wsm_set_rx_filter(priv, &priv->rx_filter); + if (!ret) + ret = wsm_set_beacon_filter_table(priv, &bf_tbl); + if (!ret) + ret = wsm_beacon_filter_control(priv, &bf_ctrl); + if (!ret) + ret = wsm_set_bssid_filtering(priv, bssid_filtering); + if (!ret) + ret = wsm_set_multicast_filter(priv, &priv->multicast_filter); + if (ret) + wiphy_err(priv->hw->wiphy, + "Update filtering failed: %d.\n", ret); + return; +} + +void cw1200_update_filtering_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, + update_filtering_work); + + cw1200_update_filtering(priv); +} + +void cw1200_set_beacon_wakeup_period_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, + set_beacon_wakeup_period_work); + + wsm_set_beacon_wakeup_period(priv, + priv->beacon_int * priv->join_dtim_period > + MAX_BEACON_SKIP_TIME_MS ? 1 : + priv->join_dtim_period, 0); +} + +u64 cw1200_prepare_multicast(struct ieee80211_hw *hw, + struct netdev_hw_addr_list *mc_list) +{ + static u8 broadcast_ipv6[ETH_ALEN] = { + 0x33, 0x33, 0x00, 0x00, 0x00, 0x01 + }; + static u8 broadcast_ipv4[ETH_ALEN] = { + 0x01, 0x00, 0x5e, 0x00, 0x00, 0x01 + }; + struct cw1200_common *priv = hw->priv; + struct netdev_hw_addr *ha; + int count = 0; + + /* Disable multicast filtering */ + priv->has_multicast_subscription = false; + memset(&priv->multicast_filter, 0x00, sizeof(priv->multicast_filter)); + + if (netdev_hw_addr_list_count(mc_list) > WSM_MAX_GRP_ADDRTABLE_ENTRIES) + return 0; + + /* Enable if requested */ + netdev_hw_addr_list_for_each(ha, mc_list) { + pr_debug("[STA] multicast: %pM\n", ha->addr); + memcpy(&priv->multicast_filter.macaddrs[count], + ha->addr, ETH_ALEN); + if (memcmp(ha->addr, broadcast_ipv4, ETH_ALEN) && + memcmp(ha->addr, broadcast_ipv6, ETH_ALEN)) + priv->has_multicast_subscription = true; + count++; + } + + if (count) { + priv->multicast_filter.enable = __cpu_to_le32(1); + priv->multicast_filter.num_addrs = __cpu_to_le32(count); + } + + return netdev_hw_addr_list_count(mc_list); +} + +void cw1200_configure_filter(struct ieee80211_hw *dev, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct cw1200_common *priv = dev->priv; + bool listening = !!(*total_flags & + (FIF_PROMISC_IN_BSS | + FIF_OTHER_BSS | + FIF_BCN_PRBRESP_PROMISC | + FIF_PROBE_REQ)); + + *total_flags &= FIF_PROMISC_IN_BSS | + FIF_OTHER_BSS | + FIF_FCSFAIL | + FIF_BCN_PRBRESP_PROMISC | + FIF_PROBE_REQ; + + down(&priv->scan.lock); + mutex_lock(&priv->conf_mutex); + + priv->rx_filter.promiscuous = (*total_flags & FIF_PROMISC_IN_BSS) + ? 1 : 0; + priv->rx_filter.bssid = (*total_flags & (FIF_OTHER_BSS | + FIF_PROBE_REQ)) ? 1 : 0; + priv->rx_filter.fcs = (*total_flags & FIF_FCSFAIL) ? 1 : 0; + priv->disable_beacon_filter = !(*total_flags & + (FIF_BCN_PRBRESP_PROMISC | + FIF_PROMISC_IN_BSS | + FIF_PROBE_REQ)); + if (priv->listening != listening) { + priv->listening = listening; + wsm_lock_tx(priv); + cw1200_update_listening(priv, listening); + wsm_unlock_tx(priv); + } + cw1200_update_filtering(priv); + mutex_unlock(&priv->conf_mutex); + up(&priv->scan.lock); +} + +int cw1200_conf_tx(struct ieee80211_hw *dev, struct ieee80211_vif *vif, + u16 queue, const struct ieee80211_tx_queue_params *params) +{ + struct cw1200_common *priv = dev->priv; + int ret = 0; + /* To prevent re-applying PM request OID again and again*/ + bool old_uapsd_flags; + + mutex_lock(&priv->conf_mutex); + + if (queue < dev->queues) { + old_uapsd_flags = priv->uapsd_info.uapsd_flags; + + WSM_TX_QUEUE_SET(&priv->tx_queue_params, queue, 0, 0, 0); + ret = wsm_set_tx_queue_params(priv, + &priv->tx_queue_params.params[queue], queue); + if (ret) { + ret = -EINVAL; + goto out; + } + + WSM_EDCA_SET(&priv->edca, queue, params->aifs, + params->cw_min, params->cw_max, + params->txop, 0xc8, + params->uapsd); + ret = wsm_set_edca_params(priv, &priv->edca); + if (ret) { + ret = -EINVAL; + goto out; + } + + if (priv->mode == NL80211_IFTYPE_STATION) { + ret = cw1200_set_uapsd_param(priv, &priv->edca); + if (!ret && priv->setbssparams_done && + (priv->join_status == CW1200_JOIN_STATUS_STA) && + (old_uapsd_flags != priv->uapsd_info.uapsd_flags)) + ret = cw1200_set_pm(priv, &priv->powersave_mode); + } + } else { + ret = -EINVAL; + } + +out: + mutex_unlock(&priv->conf_mutex); + return ret; +} + +int cw1200_get_stats(struct ieee80211_hw *dev, + struct ieee80211_low_level_stats *stats) +{ + struct cw1200_common *priv = dev->priv; + + memcpy(stats, &priv->stats, sizeof(*stats)); + return 0; +} + +int cw1200_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg) +{ + struct wsm_set_pm pm = *arg; + + if (priv->uapsd_info.uapsd_flags != 0) + pm.mode &= ~WSM_PSM_FAST_PS_FLAG; + + if (memcmp(&pm, &priv->firmware_ps_mode, + sizeof(struct wsm_set_pm))) { + priv->firmware_ps_mode = pm; + return wsm_set_pm(priv, &pm); + } else { + return 0; + } +} + +int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + int ret = -EOPNOTSUPP; + struct cw1200_common *priv = dev->priv; + struct ieee80211_key_seq seq; + + mutex_lock(&priv->conf_mutex); + + if (cmd == SET_KEY) { + u8 *peer_addr = NULL; + int pairwise = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) ? + 1 : 0; + int idx = cw1200_alloc_key(priv); + struct wsm_add_key *wsm_key = &priv->keys[idx]; + + if (idx < 0) { + ret = -EINVAL; + goto finally; + } + + if (sta) + peer_addr = sta->addr; + + key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + if (key->keylen > 16) { + cw1200_free_key(priv, idx); + ret = -EINVAL; + goto finally; + } + + if (pairwise) { + wsm_key->type = WSM_KEY_TYPE_WEP_PAIRWISE; + memcpy(wsm_key->wep_pairwise.peer, + peer_addr, ETH_ALEN); + memcpy(wsm_key->wep_pairwise.keydata, + &key->key[0], key->keylen); + wsm_key->wep_pairwise.keylen = key->keylen; + } else { + wsm_key->type = WSM_KEY_TYPE_WEP_DEFAULT; + memcpy(wsm_key->wep_group.keydata, + &key->key[0], key->keylen); + wsm_key->wep_group.keylen = key->keylen; + wsm_key->wep_group.keyid = key->keyidx; + } + break; + case WLAN_CIPHER_SUITE_TKIP: + ieee80211_get_key_rx_seq(key, 0, &seq); + if (pairwise) { + wsm_key->type = WSM_KEY_TYPE_TKIP_PAIRWISE; + memcpy(wsm_key->tkip_pairwise.peer, + peer_addr, ETH_ALEN); + memcpy(wsm_key->tkip_pairwise.keydata, + &key->key[0], 16); + memcpy(wsm_key->tkip_pairwise.tx_mic_key, + &key->key[16], 8); + memcpy(wsm_key->tkip_pairwise.rx_mic_key, + &key->key[24], 8); + } else { + size_t mic_offset = + (priv->mode == NL80211_IFTYPE_AP) ? + 16 : 24; + wsm_key->type = WSM_KEY_TYPE_TKIP_GROUP; + memcpy(wsm_key->tkip_group.keydata, + &key->key[0], 16); + memcpy(wsm_key->tkip_group.rx_mic_key, + &key->key[mic_offset], 8); + + wsm_key->tkip_group.rx_seqnum[0] = seq.tkip.iv16 & 0xff; + wsm_key->tkip_group.rx_seqnum[1] = (seq.tkip.iv16 >> 8) & 0xff; + wsm_key->tkip_group.rx_seqnum[2] = seq.tkip.iv32 & 0xff; + wsm_key->tkip_group.rx_seqnum[3] = (seq.tkip.iv32 >> 8) & 0xff; + wsm_key->tkip_group.rx_seqnum[4] = (seq.tkip.iv32 >> 16) & 0xff; + wsm_key->tkip_group.rx_seqnum[5] = (seq.tkip.iv32 >> 24) & 0xff; + wsm_key->tkip_group.rx_seqnum[6] = 0; + wsm_key->tkip_group.rx_seqnum[7] = 0; + + wsm_key->tkip_group.keyid = key->keyidx; + } + break; + case WLAN_CIPHER_SUITE_CCMP: + ieee80211_get_key_rx_seq(key, 0, &seq); + if (pairwise) { + wsm_key->type = WSM_KEY_TYPE_AES_PAIRWISE; + memcpy(wsm_key->aes_pairwise.peer, + peer_addr, ETH_ALEN); + memcpy(wsm_key->aes_pairwise.keydata, + &key->key[0], 16); + } else { + wsm_key->type = WSM_KEY_TYPE_AES_GROUP; + memcpy(wsm_key->aes_group.keydata, + &key->key[0], 16); + + wsm_key->aes_group.rx_seqnum[0] = seq.ccmp.pn[5]; + wsm_key->aes_group.rx_seqnum[1] = seq.ccmp.pn[4]; + wsm_key->aes_group.rx_seqnum[2] = seq.ccmp.pn[3]; + wsm_key->aes_group.rx_seqnum[3] = seq.ccmp.pn[2]; + wsm_key->aes_group.rx_seqnum[4] = seq.ccmp.pn[1]; + wsm_key->aes_group.rx_seqnum[5] = seq.ccmp.pn[0]; + wsm_key->aes_group.rx_seqnum[6] = 0; + wsm_key->aes_group.rx_seqnum[7] = 0; + wsm_key->aes_group.keyid = key->keyidx; + } + break; + case WLAN_CIPHER_SUITE_SMS4: + if (pairwise) { + wsm_key->type = WSM_KEY_TYPE_WAPI_PAIRWISE; + memcpy(wsm_key->wapi_pairwise.peer, + peer_addr, ETH_ALEN); + memcpy(wsm_key->wapi_pairwise.keydata, + &key->key[0], 16); + memcpy(wsm_key->wapi_pairwise.mic_key, + &key->key[16], 16); + wsm_key->wapi_pairwise.keyid = key->keyidx; + } else { + wsm_key->type = WSM_KEY_TYPE_WAPI_GROUP; + memcpy(wsm_key->wapi_group.keydata, + &key->key[0], 16); + memcpy(wsm_key->wapi_group.mic_key, + &key->key[16], 16); + wsm_key->wapi_group.keyid = key->keyidx; + } + break; + default: + pr_warn("Unhandled key type %d\n", key->cipher); + cw1200_free_key(priv, idx); + ret = -EOPNOTSUPP; + goto finally; + } + ret = wsm_add_key(priv, wsm_key); + if (!ret) + key->hw_key_idx = idx; + else + cw1200_free_key(priv, idx); + } else if (cmd == DISABLE_KEY) { + struct wsm_remove_key wsm_key = { + .index = key->hw_key_idx, + }; + + if (wsm_key.index > WSM_KEY_MAX_INDEX) { + ret = -EINVAL; + goto finally; + } + + cw1200_free_key(priv, wsm_key.index); + ret = wsm_remove_key(priv, &wsm_key); + } else { + pr_warn("Unhandled key command %d\n", cmd); + } + +finally: + mutex_unlock(&priv->conf_mutex); + return ret; +} + +void cw1200_wep_key_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, wep_key_work); + u8 queue_id = cw1200_queue_get_queue_id(priv->pending_frame_id); + struct cw1200_queue *queue = &priv->tx_queue[queue_id]; + __le32 wep_default_key_id = __cpu_to_le32( + priv->wep_default_key_id); + + pr_debug("[STA] Setting default WEP key: %d\n", + priv->wep_default_key_id); + wsm_flush_tx(priv); + wsm_write_mib(priv, WSM_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID, + &wep_default_key_id, sizeof(wep_default_key_id)); + cw1200_queue_requeue(queue, priv->pending_frame_id); + wsm_unlock_tx(priv); +} + +int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +{ + int ret = 0; + __le32 val32; + struct cw1200_common *priv = hw->priv; + + if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) + return 0; + + if (value != (u32) -1) + val32 = __cpu_to_le32(value); + else + val32 = 0; /* disabled */ + + if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) { + /* device is down, can _not_ set threshold */ + ret = -ENODEV; + goto out; + } + + if (priv->rts_threshold == value) + goto out; + + pr_debug("[STA] Setting RTS threshold: %d\n", + priv->rts_threshold); + + /* mutex_lock(&priv->conf_mutex); */ + ret = wsm_write_mib(priv, WSM_MIB_ID_DOT11_RTS_THRESHOLD, + &val32, sizeof(val32)); + if (!ret) + priv->rts_threshold = value; + /* mutex_unlock(&priv->conf_mutex); */ + +out: + return ret; +} + +/* If successful, LOCKS the TX queue! */ +static int __cw1200_flush(struct cw1200_common *priv, bool drop) +{ + int i, ret; + + for (;;) { + /* TODO: correct flush handling is required when dev_stop. + * Temporary workaround: 2s + */ + if (drop) { + for (i = 0; i < 4; ++i) + cw1200_queue_clear(&priv->tx_queue[i]); + } else { + ret = wait_event_timeout( + priv->tx_queue_stats.wait_link_id_empty, + cw1200_queue_stats_is_empty( + &priv->tx_queue_stats, -1), + 2 * HZ); + } + + if (!drop && ret <= 0) { + ret = -ETIMEDOUT; + break; + } else { + ret = 0; + } + + wsm_lock_tx(priv); + if (!cw1200_queue_stats_is_empty(&priv->tx_queue_stats, -1)) { + /* Highly unlikely: WSM requeued frames. */ + wsm_unlock_tx(priv); + continue; + } + break; + } + return ret; +} + +void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop) +{ + struct cw1200_common *priv = hw->priv; + + switch (priv->mode) { + case NL80211_IFTYPE_MONITOR: + drop = true; + break; + case NL80211_IFTYPE_AP: + if (!priv->enable_beacon) + drop = true; + break; + } + + if (!__cw1200_flush(priv, drop)) + wsm_unlock_tx(priv); + + return; +} + +/* ******************************************************************** */ +/* WSM callbacks */ + +void cw1200_free_event_queue(struct cw1200_common *priv) +{ + LIST_HEAD(list); + + spin_lock(&priv->event_queue_lock); + list_splice_init(&priv->event_queue, &list); + spin_unlock(&priv->event_queue_lock); + + __cw1200_free_event_queue(&list); +} + +void cw1200_event_handler(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, event_handler); + struct cw1200_wsm_event *event; + LIST_HEAD(list); + + spin_lock(&priv->event_queue_lock); + list_splice_init(&priv->event_queue, &list); + spin_unlock(&priv->event_queue_lock); + + list_for_each_entry(event, &list, link) { + switch (event->evt.id) { + case WSM_EVENT_ERROR: + pr_err("Unhandled WSM Error from LMAC\n"); + break; + case WSM_EVENT_BSS_LOST: + pr_debug("[CQM] BSS lost.\n"); + cancel_work_sync(&priv->unjoin_work); + if (!down_trylock(&priv->scan.lock)) { + cw1200_cqm_bssloss_sm(priv, 1, 0, 0); + up(&priv->scan.lock); + } else { + /* Scan is in progress. Delay reporting. + * Scan complete will trigger bss_loss_work + */ + priv->delayed_link_loss = 1; + /* Also start a watchdog. */ + queue_delayed_work(priv->workqueue, + &priv->bss_loss_work, 5*HZ); + } + break; + case WSM_EVENT_BSS_REGAINED: + pr_debug("[CQM] BSS regained.\n"); + cw1200_cqm_bssloss_sm(priv, 0, 0, 0); + cancel_work_sync(&priv->unjoin_work); + break; + case WSM_EVENT_RADAR_DETECTED: + wiphy_info(priv->hw->wiphy, "radar pulse detected\n"); + break; + case WSM_EVENT_RCPI_RSSI: + { + /* RSSI: signed Q8.0, RCPI: unsigned Q7.1 + * RSSI = RCPI / 2 - 110 + */ + int rcpi_rssi = (int)(event->evt.data & 0xFF); + int cqm_evt; + if (priv->cqm_use_rssi) + rcpi_rssi = (s8)rcpi_rssi; + else + rcpi_rssi = rcpi_rssi / 2 - 110; + + cqm_evt = (rcpi_rssi <= priv->cqm_rssi_thold) ? + NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW : + NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH; + pr_debug("[CQM] RSSI event: %d.\n", rcpi_rssi); + ieee80211_cqm_rssi_notify(priv->vif, cqm_evt, + GFP_KERNEL); + break; + } + case WSM_EVENT_BT_INACTIVE: + pr_warn("Unhandled BT INACTIVE from LMAC\n"); + break; + case WSM_EVENT_BT_ACTIVE: + pr_warn("Unhandled BT ACTIVE from LMAC\n"); + break; + } + } + __cw1200_free_event_queue(&list); +} + +void cw1200_bss_loss_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, bss_loss_work.work); + + pr_debug("[CQM] Reporting connection loss.\n"); + wsm_lock_tx(priv); + if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0) + wsm_unlock_tx(priv); +} + +void cw1200_bss_params_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, bss_params_work); + mutex_lock(&priv->conf_mutex); + + priv->bss_params.reset_beacon_loss = 1; + wsm_set_bss_params(priv, &priv->bss_params); + priv->bss_params.reset_beacon_loss = 0; + + mutex_unlock(&priv->conf_mutex); +} + +/* ******************************************************************** */ +/* Internal API */ + +/* This function is called to Parse the SDD file + * to extract listen_interval and PTA related information + * sdd is a TLV: u8 id, u8 len, u8 data[] + */ +static int cw1200_parse_sdd_file(struct cw1200_common *priv) +{ + const u8 *p = priv->sdd->data; + int ret = 0; + + while (p + 2 <= priv->sdd->data + priv->sdd->size) { + if (p + p[1] + 2 > priv->sdd->data + priv->sdd->size) { + pr_warn("Malformed sdd structure\n"); + return -1; + } + switch (p[0]) { + case SDD_PTA_CFG_ELT_ID: { + u16 v; + if (p[1] < 4) { + pr_warn("SDD_PTA_CFG_ELT_ID malformed\n"); + ret = -1; + break; + } + v = le16_to_cpu(*((u16 *)(p + 2))); + if (!v) /* non-zero means this is enabled */ + break; + + v = le16_to_cpu(*((u16 *)(p + 4))); + priv->conf_listen_interval = (v >> 7) & 0x1F; + pr_debug("PTA found; Listen Interval %d\n", + priv->conf_listen_interval); + break; + } + case SDD_REFERENCE_FREQUENCY_ELT_ID: { + u16 clk = le16_to_cpu(*((u16 *)(p + 2))); + if (clk != priv->hw_refclk) + pr_warn("SDD file doesn't match configured refclk (%d vs %d)\n", + clk, priv->hw_refclk); + break; + } + default: + break; + } + p += p[1] + 2; + } + + if (!priv->bt_present) { + pr_debug("PTA element NOT found.\n"); + priv->conf_listen_interval = 0; + } + return ret; +} + +int cw1200_setup_mac(struct cw1200_common *priv) +{ + int ret = 0; + + /* NOTE: There is a bug in FW: it reports signal + * as RSSI if RSSI subscription is enabled. + * It's not enough to set WSM_RCPI_RSSI_USE_RSSI. + * + * NOTE2: RSSI based reports have been switched to RCPI, since + * FW has a bug and RSSI reported values are not stable, + * what can leads to signal level oscilations in user-end applications + */ + struct wsm_rcpi_rssi_threshold threshold = { + .rssiRcpiMode = WSM_RCPI_RSSI_THRESHOLD_ENABLE | + WSM_RCPI_RSSI_DONT_USE_UPPER | + WSM_RCPI_RSSI_DONT_USE_LOWER, + .rollingAverageCount = 16, + }; + + struct wsm_configuration cfg = { + .dot11StationId = &priv->mac_addr[0], + }; + + /* Remember the decission here to make sure, we will handle + * the RCPI/RSSI value correctly on WSM_EVENT_RCPI_RSS + */ + if (threshold.rssiRcpiMode & WSM_RCPI_RSSI_USE_RSSI) + priv->cqm_use_rssi = true; + + if (!priv->sdd) { + ret = request_firmware(&priv->sdd, priv->sdd_path, priv->pdev); + if (ret) { + pr_err("Can't load sdd file %s.\n", priv->sdd_path); + return ret; + } + cw1200_parse_sdd_file(priv); + } + + cfg.dpdData = priv->sdd->data; + cfg.dpdData_size = priv->sdd->size; + ret = wsm_configuration(priv, &cfg); + if (ret) + return ret; + + /* Configure RSSI/SCPI reporting as RSSI. */ + wsm_set_rcpi_rssi_threshold(priv, &threshold); + + return 0; +} + +static void cw1200_join_complete(struct cw1200_common *priv) +{ + pr_debug("[STA] Join complete (%d)\n", priv->join_complete_status); + + priv->join_pending = false; + if (priv->join_complete_status) { + priv->join_status = CW1200_JOIN_STATUS_PASSIVE; + cw1200_update_listening(priv, priv->listening); + cw1200_do_unjoin(priv); + ieee80211_connection_loss(priv->vif); + } else { + if (priv->mode == NL80211_IFTYPE_ADHOC) + priv->join_status = CW1200_JOIN_STATUS_IBSS; + else + priv->join_status = CW1200_JOIN_STATUS_PRE_STA; + } + wsm_unlock_tx(priv); /* Clearing the lock held before do_join() */ +} + +void cw1200_join_complete_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, join_complete_work); + mutex_lock(&priv->conf_mutex); + cw1200_join_complete(priv); + mutex_unlock(&priv->conf_mutex); +} + +void cw1200_join_complete_cb(struct cw1200_common *priv, + struct wsm_join_complete *arg) +{ + pr_debug("[STA] cw1200_join_complete_cb called, status=%d.\n", + arg->status); + + if (cancel_delayed_work(&priv->join_timeout)) { + priv->join_complete_status = arg->status; + queue_work(priv->workqueue, &priv->join_complete_work); + } +} + +/* MUST be called with tx_lock held! It will be unlocked for us. */ +static void cw1200_do_join(struct cw1200_common *priv) +{ + const u8 *bssid; + struct ieee80211_bss_conf *conf = &priv->vif->bss_conf; + struct cfg80211_bss *bss = NULL; + struct wsm_protected_mgmt_policy mgmt_policy; + struct wsm_join join = { + .mode = conf->ibss_joined ? + WSM_JOIN_MODE_IBSS : WSM_JOIN_MODE_BSS, + .preamble_type = WSM_JOIN_PREAMBLE_LONG, + .probe_for_join = 1, + .atim_window = 0, + .basic_rate_set = cw1200_rate_mask_to_wsm(priv, + conf->basic_rates), + }; + if (delayed_work_pending(&priv->join_timeout)) { + pr_warn("[STA] - Join request already pending, skipping..\n"); + wsm_unlock_tx(priv); + return; + } + + if (priv->join_status) + cw1200_do_unjoin(priv); + + bssid = priv->vif->bss_conf.bssid; + + bss = cfg80211_get_bss(priv->hw->wiphy, priv->channel, + bssid, NULL, 0, 0, 0); + + if (!bss && !conf->ibss_joined) { + wsm_unlock_tx(priv); + return; + } + + mutex_lock(&priv->conf_mutex); + + /* Under the conf lock: check scan status and + * bail out if it is in progress. + */ + if (atomic_read(&priv->scan.in_progress)) { + wsm_unlock_tx(priv); + goto done_put; + } + + priv->join_pending = true; + + /* Sanity check basic rates */ + if (!join.basic_rate_set) + join.basic_rate_set = 7; + + /* Sanity check beacon interval */ + if (!priv->beacon_int) + priv->beacon_int = 1; + + join.beacon_interval = priv->beacon_int; + + /* BT Coex related changes */ + if (priv->bt_present) { + if (((priv->conf_listen_interval * 100) % + priv->beacon_int) == 0) + priv->listen_interval = + ((priv->conf_listen_interval * 100) / + priv->beacon_int); + else + priv->listen_interval = + ((priv->conf_listen_interval * 100) / + priv->beacon_int + 1); + } + + if (priv->hw->conf.ps_dtim_period) + priv->join_dtim_period = priv->hw->conf.ps_dtim_period; + join.dtim_period = priv->join_dtim_period; + + join.channel_number = priv->channel->hw_value; + join.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ? + WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G; + + memcpy(join.bssid, bssid, sizeof(join.bssid)); + + pr_debug("[STA] Join BSSID: %pM DTIM: %d, interval: %d\n", + join.bssid, + join.dtim_period, priv->beacon_int); + + if (!conf->ibss_joined) { + const u8 *ssidie; + rcu_read_lock(); + ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID); + if (ssidie) { + join.ssid_len = ssidie[1]; + memcpy(join.ssid, &ssidie[2], join.ssid_len); + } + rcu_read_unlock(); + } + + if (priv->vif->p2p) { + join.flags |= WSM_JOIN_FLAGS_P2P_GO; + join.basic_rate_set = + cw1200_rate_mask_to_wsm(priv, 0xFF0); + } + + /* Enable asynchronous join calls */ + if (!conf->ibss_joined) { + join.flags |= WSM_JOIN_FLAGS_FORCE; + join.flags |= WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND; + } + + wsm_flush_tx(priv); + + /* Stay Awake for Join and Auth Timeouts and a bit more */ + cw1200_pm_stay_awake(&priv->pm_state, + CW1200_JOIN_TIMEOUT + CW1200_AUTH_TIMEOUT); + + cw1200_update_listening(priv, false); + + /* Turn on Block ACKs */ + wsm_set_block_ack_policy(priv, priv->ba_tx_tid_mask, + priv->ba_rx_tid_mask); + + /* Set up timeout */ + if (join.flags & WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND) { + priv->join_status = CW1200_JOIN_STATUS_JOINING; + queue_delayed_work(priv->workqueue, + &priv->join_timeout, + CW1200_JOIN_TIMEOUT); + } + + /* 802.11w protected mgmt frames */ + mgmt_policy.protectedMgmtEnable = 0; + mgmt_policy.unprotectedMgmtFramesAllowed = 1; + mgmt_policy.encryptionForAuthFrame = 1; + wsm_set_protected_mgmt_policy(priv, &mgmt_policy); + + /* Perform actual join */ + if (wsm_join(priv, &join)) { + pr_err("[STA] cw1200_join_work: wsm_join failed!\n"); + cancel_delayed_work_sync(&priv->join_timeout); + cw1200_update_listening(priv, priv->listening); + /* Tx lock still held, unjoin will clear it. */ + if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0) + wsm_unlock_tx(priv); + } else { + if (!(join.flags & WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND)) + cw1200_join_complete(priv); /* Will clear tx_lock */ + + /* Upload keys */ + cw1200_upload_keys(priv); + + /* Due to beacon filtering it is possible that the + * AP's beacon is not known for the mac80211 stack. + * Disable filtering temporary to make sure the stack + * receives at least one + */ + priv->disable_beacon_filter = true; + } + cw1200_update_filtering(priv); + +done_put: + mutex_unlock(&priv->conf_mutex); + if (bss) + cfg80211_put_bss(priv->hw->wiphy, bss); +} + +void cw1200_join_timeout(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, join_timeout.work); + pr_debug("[WSM] Join timed out.\n"); + wsm_lock_tx(priv); + if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0) + wsm_unlock_tx(priv); +} + +static void cw1200_do_unjoin(struct cw1200_common *priv) +{ + struct wsm_reset reset = { + .reset_statistics = true, + }; + + cancel_delayed_work_sync(&priv->join_timeout); + + mutex_lock(&priv->conf_mutex); + priv->join_pending = false; + + if (atomic_read(&priv->scan.in_progress)) { + if (priv->delayed_unjoin) + wiphy_dbg(priv->hw->wiphy, "Delayed unjoin is already scheduled.\n"); + else + priv->delayed_unjoin = true; + goto done; + } + + priv->delayed_link_loss = false; + + if (!priv->join_status) + goto done; + + if (priv->join_status > CW1200_JOIN_STATUS_IBSS) { + wiphy_err(priv->hw->wiphy, "Unexpected: join status: %d\n", + priv->join_status); + BUG_ON(1); + } + + cancel_work_sync(&priv->update_filtering_work); + cancel_work_sync(&priv->set_beacon_wakeup_period_work); + priv->join_status = CW1200_JOIN_STATUS_PASSIVE; + + /* Unjoin is a reset. */ + wsm_flush_tx(priv); + wsm_keep_alive_period(priv, 0); + wsm_reset(priv, &reset); + wsm_set_output_power(priv, priv->output_power * 10); + priv->join_dtim_period = 0; + cw1200_setup_mac(priv); + cw1200_free_event_queue(priv); + cancel_work_sync(&priv->event_handler); + cw1200_update_listening(priv, priv->listening); + cw1200_cqm_bssloss_sm(priv, 0, 0, 0); + + /* Disable Block ACKs */ + wsm_set_block_ack_policy(priv, 0, 0); + + priv->disable_beacon_filter = false; + cw1200_update_filtering(priv); + memset(&priv->association_mode, 0, + sizeof(priv->association_mode)); + memset(&priv->bss_params, 0, sizeof(priv->bss_params)); + priv->setbssparams_done = false; + memset(&priv->firmware_ps_mode, 0, + sizeof(priv->firmware_ps_mode)); + + pr_debug("[STA] Unjoin completed.\n"); + +done: + mutex_unlock(&priv->conf_mutex); +} + +void cw1200_unjoin_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, unjoin_work); + + cw1200_do_unjoin(priv); + + /* Tell the stack we're dead */ + ieee80211_connection_loss(priv->vif); + + wsm_unlock_tx(priv); +} + +int cw1200_enable_listening(struct cw1200_common *priv) +{ + struct wsm_start start = { + .mode = WSM_START_MODE_P2P_DEV, + .band = WSM_PHY_BAND_2_4G, + .beacon_interval = 100, + .dtim_period = 1, + .probe_delay = 0, + .basic_rate_set = 0x0F, + }; + + if (priv->channel) { + start.band = priv->channel->band == IEEE80211_BAND_5GHZ ? + WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G; + start.channel_number = priv->channel->hw_value; + } else { + start.band = WSM_PHY_BAND_2_4G; + start.channel_number = 1; + } + + return wsm_start(priv, &start); +} + +int cw1200_disable_listening(struct cw1200_common *priv) +{ + int ret; + struct wsm_reset reset = { + .reset_statistics = true, + }; + ret = wsm_reset(priv, &reset); + return ret; +} + +void cw1200_update_listening(struct cw1200_common *priv, bool enabled) +{ + if (enabled) { + if (priv->join_status == CW1200_JOIN_STATUS_PASSIVE) { + if (!cw1200_enable_listening(priv)) + priv->join_status = CW1200_JOIN_STATUS_MONITOR; + wsm_set_probe_responder(priv, true); + } + } else { + if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) { + if (!cw1200_disable_listening(priv)) + priv->join_status = CW1200_JOIN_STATUS_PASSIVE; + wsm_set_probe_responder(priv, false); + } + } +} + +int cw1200_set_uapsd_param(struct cw1200_common *priv, + const struct wsm_edca_params *arg) +{ + int ret; + u16 uapsd_flags = 0; + + /* Here's the mapping AC [queue, bit] + * VO [0,3], VI [1, 2], BE [2, 1], BK [3, 0] + */ + + if (arg->uapsd_enable[0]) + uapsd_flags |= 1 << 3; + + if (arg->uapsd_enable[1]) + uapsd_flags |= 1 << 2; + + if (arg->uapsd_enable[2]) + uapsd_flags |= 1 << 1; + + if (arg->uapsd_enable[3]) + uapsd_flags |= 1; + + /* Currently pseudo U-APSD operation is not supported, so setting + * MinAutoTriggerInterval, MaxAutoTriggerInterval and + * AutoTriggerStep to 0 + */ + + priv->uapsd_info.uapsd_flags = cpu_to_le16(uapsd_flags); + priv->uapsd_info.min_auto_trigger_interval = 0; + priv->uapsd_info.max_auto_trigger_interval = 0; + priv->uapsd_info.auto_trigger_step = 0; + + ret = wsm_set_uapsd_info(priv, &priv->uapsd_info); + return ret; +} + +/* ******************************************************************** */ +/* AP API */ + +int cw1200_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct cw1200_common *priv = hw->priv; + struct cw1200_sta_priv *sta_priv = + (struct cw1200_sta_priv *)&sta->drv_priv; + struct cw1200_link_entry *entry; + struct sk_buff *skb; + + if (priv->mode != NL80211_IFTYPE_AP) + return 0; + + sta_priv->link_id = cw1200_find_link_id(priv, sta->addr); + if (WARN_ON(!sta_priv->link_id)) { + wiphy_info(priv->hw->wiphy, + "[AP] No more link IDs available.\n"); + return -ENOENT; + } + + entry = &priv->link_id_db[sta_priv->link_id - 1]; + spin_lock_bh(&priv->ps_state_lock); + if ((sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) == + IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) + priv->sta_asleep_mask |= BIT(sta_priv->link_id); + entry->status = CW1200_LINK_HARD; + while ((skb = skb_dequeue(&entry->rx_queue))) + ieee80211_rx_irqsafe(priv->hw, skb); + spin_unlock_bh(&priv->ps_state_lock); + return 0; +} + +int cw1200_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct cw1200_common *priv = hw->priv; + struct cw1200_sta_priv *sta_priv = + (struct cw1200_sta_priv *)&sta->drv_priv; + struct cw1200_link_entry *entry; + + if (priv->mode != NL80211_IFTYPE_AP || !sta_priv->link_id) + return 0; + + entry = &priv->link_id_db[sta_priv->link_id - 1]; + spin_lock_bh(&priv->ps_state_lock); + entry->status = CW1200_LINK_RESERVE; + entry->timestamp = jiffies; + wsm_lock_tx_async(priv); + if (queue_work(priv->workqueue, &priv->link_id_work) <= 0) + wsm_unlock_tx(priv); + spin_unlock_bh(&priv->ps_state_lock); + flush_workqueue(priv->workqueue); + return 0; +} + +static void __cw1200_sta_notify(struct ieee80211_hw *dev, + struct ieee80211_vif *vif, + enum sta_notify_cmd notify_cmd, + int link_id) +{ + struct cw1200_common *priv = dev->priv; + u32 bit, prev; + + /* Zero link id means "for all link IDs" */ + if (link_id) + bit = BIT(link_id); + else if (WARN_ON_ONCE(notify_cmd != STA_NOTIFY_AWAKE)) + bit = 0; + else + bit = priv->link_id_map; + prev = priv->sta_asleep_mask & bit; + + switch (notify_cmd) { + case STA_NOTIFY_SLEEP: + if (!prev) { + if (priv->buffered_multicasts && + !priv->sta_asleep_mask) + queue_work(priv->workqueue, + &priv->multicast_start_work); + priv->sta_asleep_mask |= bit; + } + break; + case STA_NOTIFY_AWAKE: + if (prev) { + priv->sta_asleep_mask &= ~bit; + priv->pspoll_mask &= ~bit; + if (priv->tx_multicast && link_id && + !priv->sta_asleep_mask) + queue_work(priv->workqueue, + &priv->multicast_stop_work); + cw1200_bh_wakeup(priv); + } + break; + } +} + +void cw1200_sta_notify(struct ieee80211_hw *dev, + struct ieee80211_vif *vif, + enum sta_notify_cmd notify_cmd, + struct ieee80211_sta *sta) +{ + struct cw1200_common *priv = dev->priv; + struct cw1200_sta_priv *sta_priv = + (struct cw1200_sta_priv *)&sta->drv_priv; + + spin_lock_bh(&priv->ps_state_lock); + __cw1200_sta_notify(dev, vif, notify_cmd, sta_priv->link_id); + spin_unlock_bh(&priv->ps_state_lock); +} + +static void cw1200_ps_notify(struct cw1200_common *priv, + int link_id, bool ps) +{ + if (link_id > CW1200_MAX_STA_IN_AP_MODE) + return; + + pr_debug("%s for LinkId: %d. STAs asleep: %.8X\n", + ps ? "Stop" : "Start", + link_id, priv->sta_asleep_mask); + + __cw1200_sta_notify(priv->hw, priv->vif, + ps ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE, link_id); +} + +static int cw1200_set_tim_impl(struct cw1200_common *priv, bool aid0_bit_set) +{ + struct sk_buff *skb; + struct wsm_update_ie update_ie = { + .what = WSM_UPDATE_IE_BEACON, + .count = 1, + }; + u16 tim_offset, tim_length; + + pr_debug("[AP] mcast: %s.\n", aid0_bit_set ? "ena" : "dis"); + + skb = ieee80211_beacon_get_tim(priv->hw, priv->vif, + &tim_offset, &tim_length); + if (!skb) { + if (!__cw1200_flush(priv, true)) + wsm_unlock_tx(priv); + return -ENOENT; + } + + if (tim_offset && tim_length >= 6) { + /* Ignore DTIM count from mac80211: + * firmware handles DTIM internally. + */ + skb->data[tim_offset + 2] = 0; + + /* Set/reset aid0 bit */ + if (aid0_bit_set) + skb->data[tim_offset + 4] |= 1; + else + skb->data[tim_offset + 4] &= ~1; + } + + update_ie.ies = &skb->data[tim_offset]; + update_ie.length = tim_length; + wsm_update_ie(priv, &update_ie); + + dev_kfree_skb(skb); + + return 0; +} + +void cw1200_set_tim_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, set_tim_work); + (void)cw1200_set_tim_impl(priv, priv->aid0_bit_set); +} + +int cw1200_set_tim(struct ieee80211_hw *dev, struct ieee80211_sta *sta, + bool set) +{ + struct cw1200_common *priv = dev->priv; + queue_work(priv->workqueue, &priv->set_tim_work); + return 0; +} + +void cw1200_set_cts_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, set_cts_work); + + u8 erp_ie[3] = {WLAN_EID_ERP_INFO, 0x1, 0}; + struct wsm_update_ie update_ie = { + .what = WSM_UPDATE_IE_BEACON, + .count = 1, + .ies = erp_ie, + .length = 3, + }; + u32 erp_info; + __le32 use_cts_prot; + mutex_lock(&priv->conf_mutex); + erp_info = priv->erp_info; + mutex_unlock(&priv->conf_mutex); + use_cts_prot = + erp_info & WLAN_ERP_USE_PROTECTION ? + __cpu_to_le32(1) : 0; + + erp_ie[ERP_INFO_BYTE_OFFSET] = erp_info; + + pr_debug("[STA] ERP information 0x%x\n", erp_info); + + wsm_write_mib(priv, WSM_MIB_ID_NON_ERP_PROTECTION, + &use_cts_prot, sizeof(use_cts_prot)); + wsm_update_ie(priv, &update_ie); + + return; +} + +static int cw1200_set_btcoexinfo(struct cw1200_common *priv) +{ + struct wsm_override_internal_txrate arg; + int ret = 0; + + if (priv->mode == NL80211_IFTYPE_STATION) { + /* Plumb PSPOLL and NULL template */ + cw1200_upload_pspoll(priv); + cw1200_upload_null(priv); + cw1200_upload_qosnull(priv); + } else { + return 0; + } + + memset(&arg, 0, sizeof(struct wsm_override_internal_txrate)); + + if (!priv->vif->p2p) { + /* STATION mode */ + if (priv->bss_params.operational_rate_set & ~0xF) { + pr_debug("[STA] STA has ERP rates\n"); + /* G or BG mode */ + arg.internalTxRate = (__ffs( + priv->bss_params.operational_rate_set & ~0xF)); + } else { + pr_debug("[STA] STA has non ERP rates\n"); + /* B only mode */ + arg.internalTxRate = (__ffs(priv->association_mode.basic_rate_set)); + } + arg.nonErpInternalTxRate = (__ffs(priv->association_mode.basic_rate_set)); + } else { + /* P2P mode */ + arg.internalTxRate = (__ffs(priv->bss_params.operational_rate_set & ~0xF)); + arg.nonErpInternalTxRate = (__ffs(priv->bss_params.operational_rate_set & ~0xF)); + } + + pr_debug("[STA] BTCOEX_INFO MODE %d, internalTxRate : %x, nonErpInternalTxRate: %x\n", + priv->mode, + arg.internalTxRate, + arg.nonErpInternalTxRate); + + ret = wsm_write_mib(priv, WSM_MIB_ID_OVERRIDE_INTERNAL_TX_RATE, + &arg, sizeof(arg)); + + return ret; +} + +void cw1200_bss_info_changed(struct ieee80211_hw *dev, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u32 changed) +{ + struct cw1200_common *priv = dev->priv; + bool do_join = false; + + mutex_lock(&priv->conf_mutex); + + pr_debug("BSS CHANGED: %08x\n", changed); + + /* TODO: BSS_CHANGED_QOS */ + /* TODO: BSS_CHANGED_TXPOWER */ + + if (changed & BSS_CHANGED_ARP_FILTER) { + struct wsm_mib_arp_ipv4_filter filter = {0}; + int i; + + pr_debug("[STA] BSS_CHANGED_ARP_FILTER cnt: %d\n", + info->arp_addr_cnt); + + /* Currently only one IP address is supported by firmware. + * In case of more IPs arp filtering will be disabled. + */ + if (info->arp_addr_cnt > 0 && + info->arp_addr_cnt <= WSM_MAX_ARP_IP_ADDRTABLE_ENTRIES) { + for (i = 0; i < info->arp_addr_cnt; i++) { + filter.ipv4addrs[i] = info->arp_addr_list[i]; + pr_debug("[STA] addr[%d]: 0x%X\n", + i, filter.ipv4addrs[i]); + } + filter.enable = __cpu_to_le32(1); + } + + pr_debug("[STA] arp ip filter enable: %d\n", + __le32_to_cpu(filter.enable)); + + wsm_set_arp_ipv4_filter(priv, &filter); + } + + if (changed & + (BSS_CHANGED_BEACON | + BSS_CHANGED_AP_PROBE_RESP | + BSS_CHANGED_BSSID | + BSS_CHANGED_SSID | + BSS_CHANGED_IBSS)) { + pr_debug("BSS_CHANGED_BEACON\n"); + priv->beacon_int = info->beacon_int; + cw1200_update_beaconing(priv); + cw1200_upload_beacon(priv); + } + + if (changed & BSS_CHANGED_BEACON_ENABLED) { + pr_debug("BSS_CHANGED_BEACON_ENABLED (%d)\n", info->enable_beacon); + + if (priv->enable_beacon != info->enable_beacon) { + cw1200_enable_beaconing(priv, info->enable_beacon); + priv->enable_beacon = info->enable_beacon; + } + } + + if (changed & BSS_CHANGED_BEACON_INT) { + pr_debug("CHANGED_BEACON_INT\n"); + if (info->ibss_joined) + do_join = true; + else if (priv->join_status == CW1200_JOIN_STATUS_AP) + cw1200_update_beaconing(priv); + } + + /* assoc/disassoc, or maybe AID changed */ + if (changed & BSS_CHANGED_ASSOC) { + wsm_lock_tx(priv); + priv->wep_default_key_id = -1; + wsm_unlock_tx(priv); + } + + if (changed & BSS_CHANGED_BSSID) { + pr_debug("BSS_CHANGED_BSSID\n"); + do_join = true; + } + + if (changed & + (BSS_CHANGED_ASSOC | + BSS_CHANGED_BSSID | + BSS_CHANGED_IBSS | + BSS_CHANGED_BASIC_RATES | + BSS_CHANGED_HT)) { + pr_debug("BSS_CHANGED_ASSOC\n"); + if (info->assoc) { + if (priv->join_status < CW1200_JOIN_STATUS_PRE_STA) { + ieee80211_connection_loss(vif); + mutex_unlock(&priv->conf_mutex); + return; + } else if (priv->join_status == CW1200_JOIN_STATUS_PRE_STA) { + priv->join_status = CW1200_JOIN_STATUS_STA; + } + } else { + do_join = true; + } + + if (info->assoc || info->ibss_joined) { + struct ieee80211_sta *sta = NULL; + u32 val = 0; + + if (info->dtim_period) + priv->join_dtim_period = info->dtim_period; + priv->beacon_int = info->beacon_int; + + rcu_read_lock(); + + if (info->bssid && !info->ibss_joined) + sta = ieee80211_find_sta(vif, info->bssid); + if (sta) { + priv->ht_info.ht_cap = sta->ht_cap; + priv->bss_params.operational_rate_set = + cw1200_rate_mask_to_wsm(priv, + sta->supp_rates[priv->channel->band]); + priv->ht_info.channel_type = cfg80211_get_chandef_type(&dev->conf.chandef); + priv->ht_info.operation_mode = info->ht_operation_mode; + } else { + memset(&priv->ht_info, 0, + sizeof(priv->ht_info)); + priv->bss_params.operational_rate_set = -1; + } + rcu_read_unlock(); + + /* Non Greenfield stations present */ + if (priv->ht_info.operation_mode & + IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) + val |= WSM_NON_GREENFIELD_STA_PRESENT; + + /* Set HT protection method */ + val |= (priv->ht_info.operation_mode & IEEE80211_HT_OP_MODE_PROTECTION) << 2; + + /* TODO: + * STBC_param.dual_cts + * STBC_param.LSIG_TXOP_FILL + */ + + val = cpu_to_le32(val); + wsm_write_mib(priv, WSM_MIB_ID_SET_HT_PROTECTION, + &val, sizeof(val)); + + priv->association_mode.greenfield = + cw1200_ht_greenfield(&priv->ht_info); + priv->association_mode.flags = + WSM_ASSOCIATION_MODE_SNOOP_ASSOC_FRAMES | + WSM_ASSOCIATION_MODE_USE_PREAMBLE_TYPE | + WSM_ASSOCIATION_MODE_USE_HT_MODE | + WSM_ASSOCIATION_MODE_USE_BASIC_RATE_SET | + WSM_ASSOCIATION_MODE_USE_MPDU_START_SPACING; + priv->association_mode.preamble = + info->use_short_preamble ? + WSM_JOIN_PREAMBLE_SHORT : + WSM_JOIN_PREAMBLE_LONG; + priv->association_mode.basic_rate_set = __cpu_to_le32( + cw1200_rate_mask_to_wsm(priv, + info->basic_rates)); + priv->association_mode.mpdu_start_spacing = + cw1200_ht_ampdu_density(&priv->ht_info); + + cw1200_cqm_bssloss_sm(priv, 0, 0, 0); + cancel_work_sync(&priv->unjoin_work); + + priv->bss_params.beacon_lost_count = priv->cqm_beacon_loss_count; + priv->bss_params.aid = info->aid; + + if (priv->join_dtim_period < 1) + priv->join_dtim_period = 1; + + pr_debug("[STA] DTIM %d, interval: %d\n", + priv->join_dtim_period, priv->beacon_int); + pr_debug("[STA] Preamble: %d, Greenfield: %d, Aid: %d, Rates: 0x%.8X, Basic: 0x%.8X\n", + priv->association_mode.preamble, + priv->association_mode.greenfield, + priv->bss_params.aid, + priv->bss_params.operational_rate_set, + priv->association_mode.basic_rate_set); + wsm_set_association_mode(priv, &priv->association_mode); + + if (!info->ibss_joined) { + wsm_keep_alive_period(priv, 30 /* sec */); + wsm_set_bss_params(priv, &priv->bss_params); + priv->setbssparams_done = true; + cw1200_set_beacon_wakeup_period_work(&priv->set_beacon_wakeup_period_work); + cw1200_set_pm(priv, &priv->powersave_mode); + } + if (priv->vif->p2p) { + pr_debug("[STA] Setting p2p powersave configuration.\n"); + wsm_set_p2p_ps_modeinfo(priv, + &priv->p2p_ps_modeinfo); + } + if (priv->bt_present) + cw1200_set_btcoexinfo(priv); + } else { + memset(&priv->association_mode, 0, + sizeof(priv->association_mode)); + memset(&priv->bss_params, 0, sizeof(priv->bss_params)); + } + } + + /* ERP Protection */ + if (changed & (BSS_CHANGED_ASSOC | + BSS_CHANGED_ERP_CTS_PROT | + BSS_CHANGED_ERP_PREAMBLE)) { + u32 prev_erp_info = priv->erp_info; + if (info->use_cts_prot) + priv->erp_info |= WLAN_ERP_USE_PROTECTION; + else if (!(prev_erp_info & WLAN_ERP_NON_ERP_PRESENT)) + priv->erp_info &= ~WLAN_ERP_USE_PROTECTION; + + if (info->use_short_preamble) + priv->erp_info |= WLAN_ERP_BARKER_PREAMBLE; + else + priv->erp_info &= ~WLAN_ERP_BARKER_PREAMBLE; + + pr_debug("[STA] ERP Protection: %x\n", priv->erp_info); + + if (prev_erp_info != priv->erp_info) + queue_work(priv->workqueue, &priv->set_cts_work); + } + + /* ERP Slottime */ + if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_SLOT)) { + __le32 slot_time = info->use_short_slot ? + __cpu_to_le32(9) : __cpu_to_le32(20); + pr_debug("[STA] Slot time: %d us.\n", + __le32_to_cpu(slot_time)); + wsm_write_mib(priv, WSM_MIB_ID_DOT11_SLOT_TIME, + &slot_time, sizeof(slot_time)); + } + + if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_CQM)) { + struct wsm_rcpi_rssi_threshold threshold = { + .rollingAverageCount = 8, + }; + pr_debug("[CQM] RSSI threshold subscribe: %d +- %d\n", + info->cqm_rssi_thold, info->cqm_rssi_hyst); + priv->cqm_rssi_thold = info->cqm_rssi_thold; + priv->cqm_rssi_hyst = info->cqm_rssi_hyst; + + if (info->cqm_rssi_thold || info->cqm_rssi_hyst) { + /* RSSI subscription enabled */ + /* TODO: It's not a correct way of setting threshold. + * Upper and lower must be set equal here and adjusted + * in callback. However current implementation is much + * more relaible and stable. + */ + + /* RSSI: signed Q8.0, RCPI: unsigned Q7.1 + * RSSI = RCPI / 2 - 110 + */ + if (priv->cqm_use_rssi) { + threshold.upperThreshold = + info->cqm_rssi_thold + info->cqm_rssi_hyst; + threshold.lowerThreshold = + info->cqm_rssi_thold; + threshold.rssiRcpiMode |= WSM_RCPI_RSSI_USE_RSSI; + } else { + threshold.upperThreshold = (info->cqm_rssi_thold + info->cqm_rssi_hyst + 110) * 2; + threshold.lowerThreshold = (info->cqm_rssi_thold + 110) * 2; + } + threshold.rssiRcpiMode |= WSM_RCPI_RSSI_THRESHOLD_ENABLE; + } else { + /* There is a bug in FW, see sta.c. We have to enable + * dummy subscription to get correct RSSI values. + */ + threshold.rssiRcpiMode |= + WSM_RCPI_RSSI_THRESHOLD_ENABLE | + WSM_RCPI_RSSI_DONT_USE_UPPER | + WSM_RCPI_RSSI_DONT_USE_LOWER; + if (priv->cqm_use_rssi) + threshold.rssiRcpiMode |= WSM_RCPI_RSSI_USE_RSSI; + } + wsm_set_rcpi_rssi_threshold(priv, &threshold); + } + mutex_unlock(&priv->conf_mutex); + + if (do_join) { + wsm_lock_tx(priv); + cw1200_do_join(priv); /* Will unlock it for us */ + } +} + +void cw1200_multicast_start_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, multicast_start_work); + long tmo = priv->join_dtim_period * + (priv->beacon_int + 20) * HZ / 1024; + + cancel_work_sync(&priv->multicast_stop_work); + + if (!priv->aid0_bit_set) { + wsm_lock_tx(priv); + cw1200_set_tim_impl(priv, true); + priv->aid0_bit_set = true; + mod_timer(&priv->mcast_timeout, jiffies + tmo); + wsm_unlock_tx(priv); + } +} + +void cw1200_multicast_stop_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, multicast_stop_work); + + if (priv->aid0_bit_set) { + del_timer_sync(&priv->mcast_timeout); + wsm_lock_tx(priv); + priv->aid0_bit_set = false; + cw1200_set_tim_impl(priv, false); + wsm_unlock_tx(priv); + } +} + +void cw1200_mcast_timeout(unsigned long arg) +{ + struct cw1200_common *priv = + (struct cw1200_common *)arg; + + wiphy_warn(priv->hw->wiphy, + "Multicast delivery timeout.\n"); + spin_lock_bh(&priv->ps_state_lock); + priv->tx_multicast = priv->aid0_bit_set && + priv->buffered_multicasts; + if (priv->tx_multicast) + cw1200_bh_wakeup(priv); + spin_unlock_bh(&priv->ps_state_lock); +} + +int cw1200_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size) +{ + /* Aggregation is implemented fully in firmware, + * including block ack negotiation. Do not allow + * mac80211 stack to do anything: it interferes with + * the firmware. + */ + + /* Note that we still need this function stubbed. */ + return -ENOTSUPP; +} + +/* ******************************************************************** */ +/* WSM callback */ +void cw1200_suspend_resume(struct cw1200_common *priv, + struct wsm_suspend_resume *arg) +{ + pr_debug("[AP] %s: %s\n", + arg->stop ? "stop" : "start", + arg->multicast ? "broadcast" : "unicast"); + + if (arg->multicast) { + bool cancel_tmo = false; + spin_lock_bh(&priv->ps_state_lock); + if (arg->stop) { + priv->tx_multicast = false; + } else { + /* Firmware sends this indication every DTIM if there + * is a STA in powersave connected. There is no reason + * to suspend, following wakeup will consume much more + * power than it could be saved. + */ + cw1200_pm_stay_awake(&priv->pm_state, + priv->join_dtim_period * + (priv->beacon_int + 20) * HZ / 1024); + priv->tx_multicast = (priv->aid0_bit_set && + priv->buffered_multicasts); + if (priv->tx_multicast) { + cancel_tmo = true; + cw1200_bh_wakeup(priv); + } + } + spin_unlock_bh(&priv->ps_state_lock); + if (cancel_tmo) + del_timer_sync(&priv->mcast_timeout); + } else { + spin_lock_bh(&priv->ps_state_lock); + cw1200_ps_notify(priv, arg->link_id, arg->stop); + spin_unlock_bh(&priv->ps_state_lock); + if (!arg->stop) + cw1200_bh_wakeup(priv); + } + return; +} + +/* ******************************************************************** */ +/* AP privates */ + +static int cw1200_upload_beacon(struct cw1200_common *priv) +{ + int ret = 0; + struct ieee80211_mgmt *mgmt; + struct wsm_template_frame frame = { + .frame_type = WSM_FRAME_TYPE_BEACON, + }; + + u16 tim_offset; + u16 tim_len; + + if (priv->mode == NL80211_IFTYPE_STATION || + priv->mode == NL80211_IFTYPE_MONITOR || + priv->mode == NL80211_IFTYPE_UNSPECIFIED) + goto done; + + if (priv->vif->p2p) + frame.rate = WSM_TRANSMIT_RATE_6; + + frame.skb = ieee80211_beacon_get_tim(priv->hw, priv->vif, + &tim_offset, &tim_len); + if (!frame.skb) + return -ENOMEM; + + ret = wsm_set_template_frame(priv, &frame); + + if (ret) + goto done; + + /* TODO: Distill probe resp; remove TIM + * and any other beacon-specific IEs + */ + mgmt = (void *)frame.skb->data; + mgmt->frame_control = + __cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_PROBE_RESP); + + frame.frame_type = WSM_FRAME_TYPE_PROBE_RESPONSE; + if (priv->vif->p2p) { + ret = wsm_set_probe_responder(priv, true); + } else { + ret = wsm_set_template_frame(priv, &frame); + wsm_set_probe_responder(priv, false); + } + +done: + dev_kfree_skb(frame.skb); + + return ret; +} + +static int cw1200_upload_pspoll(struct cw1200_common *priv) +{ + int ret = 0; + struct wsm_template_frame frame = { + .frame_type = WSM_FRAME_TYPE_PS_POLL, + .rate = 0xFF, + }; + + + frame.skb = ieee80211_pspoll_get(priv->hw, priv->vif); + if (!frame.skb) + return -ENOMEM; + + ret = wsm_set_template_frame(priv, &frame); + + dev_kfree_skb(frame.skb); + + return ret; +} + +static int cw1200_upload_null(struct cw1200_common *priv) +{ + int ret = 0; + struct wsm_template_frame frame = { + .frame_type = WSM_FRAME_TYPE_NULL, + .rate = 0xFF, + }; + + frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif); + if (!frame.skb) + return -ENOMEM; + + ret = wsm_set_template_frame(priv, &frame); + + dev_kfree_skb(frame.skb); + + return ret; +} + +static int cw1200_upload_qosnull(struct cw1200_common *priv) +{ + int ret = 0; + /* TODO: This needs to be implemented + + struct wsm_template_frame frame = { + .frame_type = WSM_FRAME_TYPE_QOS_NULL, + .rate = 0xFF, + }; + + frame.skb = ieee80211_qosnullfunc_get(priv->hw, priv->vif); + if (!frame.skb) + return -ENOMEM; + + ret = wsm_set_template_frame(priv, &frame); + + dev_kfree_skb(frame.skb); + + */ + return ret; +} + +static int cw1200_enable_beaconing(struct cw1200_common *priv, + bool enable) +{ + struct wsm_beacon_transmit transmit = { + .enable_beaconing = enable, + }; + + return wsm_beacon_transmit(priv, &transmit); +} + +static int cw1200_start_ap(struct cw1200_common *priv) +{ + int ret; + struct ieee80211_bss_conf *conf = &priv->vif->bss_conf; + struct wsm_start start = { + .mode = priv->vif->p2p ? + WSM_START_MODE_P2P_GO : WSM_START_MODE_AP, + .band = (priv->channel->band == IEEE80211_BAND_5GHZ) ? + WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G, + .channel_number = priv->channel->hw_value, + .beacon_interval = conf->beacon_int, + .dtim_period = conf->dtim_period, + .preamble = conf->use_short_preamble ? + WSM_JOIN_PREAMBLE_SHORT : + WSM_JOIN_PREAMBLE_LONG, + .probe_delay = 100, + .basic_rate_set = cw1200_rate_mask_to_wsm(priv, + conf->basic_rates), + }; + struct wsm_operational_mode mode = { + .power_mode = cw1200_power_mode, + .disable_more_flag_usage = true, + }; + + memset(start.ssid, 0, sizeof(start.ssid)); + if (!conf->hidden_ssid) { + start.ssid_len = conf->ssid_len; + memcpy(start.ssid, conf->ssid, start.ssid_len); + } + + priv->beacon_int = conf->beacon_int; + priv->join_dtim_period = conf->dtim_period; + + memset(&priv->link_id_db, 0, sizeof(priv->link_id_db)); + + pr_debug("[AP] ch: %d(%d), bcn: %d(%d), brt: 0x%.8X, ssid: %.*s.\n", + start.channel_number, start.band, + start.beacon_interval, start.dtim_period, + start.basic_rate_set, + start.ssid_len, start.ssid); + ret = wsm_start(priv, &start); + if (!ret) + ret = cw1200_upload_keys(priv); + if (!ret && priv->vif->p2p) { + pr_debug("[AP] Setting p2p powersave configuration.\n"); + wsm_set_p2p_ps_modeinfo(priv, &priv->p2p_ps_modeinfo); + } + if (!ret) { + wsm_set_block_ack_policy(priv, 0, 0); + priv->join_status = CW1200_JOIN_STATUS_AP; + cw1200_update_filtering(priv); + } + wsm_set_operational_mode(priv, &mode); + return ret; +} + +static int cw1200_update_beaconing(struct cw1200_common *priv) +{ + struct ieee80211_bss_conf *conf = &priv->vif->bss_conf; + struct wsm_reset reset = { + .link_id = 0, + .reset_statistics = true, + }; + + if (priv->mode == NL80211_IFTYPE_AP) { + /* TODO: check if changed channel, band */ + if (priv->join_status != CW1200_JOIN_STATUS_AP || + priv->beacon_int != conf->beacon_int) { + pr_debug("ap restarting\n"); + wsm_lock_tx(priv); + if (priv->join_status != CW1200_JOIN_STATUS_PASSIVE) + wsm_reset(priv, &reset); + priv->join_status = CW1200_JOIN_STATUS_PASSIVE; + cw1200_start_ap(priv); + wsm_unlock_tx(priv); + } else + pr_debug("ap started join_status: %d\n", + priv->join_status); + } + return 0; +} diff --git a/drivers/net/wireless/cw1200/sta.h b/drivers/net/wireless/cw1200/sta.h new file mode 100644 index 000000000000..35babb62cc6a --- /dev/null +++ b/drivers/net/wireless/cw1200/sta.h @@ -0,0 +1,123 @@ +/* + * Mac80211 STA interface for ST-Ericsson CW1200 mac80211 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef STA_H_INCLUDED +#define STA_H_INCLUDED + +/* ******************************************************************** */ +/* mac80211 API */ + +int cw1200_start(struct ieee80211_hw *dev); +void cw1200_stop(struct ieee80211_hw *dev); +int cw1200_add_interface(struct ieee80211_hw *dev, + struct ieee80211_vif *vif); +void cw1200_remove_interface(struct ieee80211_hw *dev, + struct ieee80211_vif *vif); +int cw1200_change_interface(struct ieee80211_hw *dev, + struct ieee80211_vif *vif, + enum nl80211_iftype new_type, + bool p2p); +int cw1200_config(struct ieee80211_hw *dev, u32 changed); +void cw1200_configure_filter(struct ieee80211_hw *dev, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast); +int cw1200_conf_tx(struct ieee80211_hw *dev, struct ieee80211_vif *vif, + u16 queue, const struct ieee80211_tx_queue_params *params); +int cw1200_get_stats(struct ieee80211_hw *dev, + struct ieee80211_low_level_stats *stats); +int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key); + +int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value); + +void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop); + +u64 cw1200_prepare_multicast(struct ieee80211_hw *hw, + struct netdev_hw_addr_list *mc_list); + +int cw1200_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg); + +/* ******************************************************************** */ +/* WSM callbacks */ + +void cw1200_join_complete_cb(struct cw1200_common *priv, + struct wsm_join_complete *arg); + +/* ******************************************************************** */ +/* WSM events */ + +void cw1200_free_event_queue(struct cw1200_common *priv); +void cw1200_event_handler(struct work_struct *work); +void cw1200_bss_loss_work(struct work_struct *work); +void cw1200_bss_params_work(struct work_struct *work); +void cw1200_keep_alive_work(struct work_struct *work); +void cw1200_tx_failure_work(struct work_struct *work); + +void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv, int init, int good, + int bad); +static inline void cw1200_cqm_bssloss_sm(struct cw1200_common *priv, + int init, int good, int bad) +{ + spin_lock(&priv->bss_loss_lock); + __cw1200_cqm_bssloss_sm(priv, init, good, bad); + spin_unlock(&priv->bss_loss_lock); +} + +/* ******************************************************************** */ +/* Internal API */ + +int cw1200_setup_mac(struct cw1200_common *priv); +void cw1200_join_timeout(struct work_struct *work); +void cw1200_unjoin_work(struct work_struct *work); +void cw1200_join_complete_work(struct work_struct *work); +void cw1200_wep_key_work(struct work_struct *work); +void cw1200_update_listening(struct cw1200_common *priv, bool enabled); +void cw1200_update_filtering(struct cw1200_common *priv); +void cw1200_update_filtering_work(struct work_struct *work); +void cw1200_set_beacon_wakeup_period_work(struct work_struct *work); +int cw1200_enable_listening(struct cw1200_common *priv); +int cw1200_disable_listening(struct cw1200_common *priv); +int cw1200_set_uapsd_param(struct cw1200_common *priv, + const struct wsm_edca_params *arg); +void cw1200_ba_work(struct work_struct *work); +void cw1200_ba_timer(unsigned long arg); + +/* AP stuffs */ +int cw1200_set_tim(struct ieee80211_hw *dev, struct ieee80211_sta *sta, + bool set); +int cw1200_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int cw1200_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void cw1200_sta_notify(struct ieee80211_hw *dev, struct ieee80211_vif *vif, + enum sta_notify_cmd notify_cmd, + struct ieee80211_sta *sta); +void cw1200_bss_info_changed(struct ieee80211_hw *dev, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u32 changed); +int cw1200_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size); + +void cw1200_suspend_resume(struct cw1200_common *priv, + struct wsm_suspend_resume *arg); +void cw1200_set_tim_work(struct work_struct *work); +void cw1200_set_cts_work(struct work_struct *work); +void cw1200_multicast_start_work(struct work_struct *work); +void cw1200_multicast_stop_work(struct work_struct *work); +void cw1200_mcast_timeout(unsigned long arg); + +#endif diff --git a/drivers/net/wireless/cw1200/txrx.c b/drivers/net/wireless/cw1200/txrx.c new file mode 100644 index 000000000000..44ca10cb0d39 --- /dev/null +++ b/drivers/net/wireless/cw1200/txrx.c @@ -0,0 +1,1474 @@ +/* + * Datapath implementation for ST-Ericsson CW1200 mac80211 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <net/mac80211.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> + +#include "cw1200.h" +#include "wsm.h" +#include "bh.h" +#include "sta.h" +#include "debug.h" + +#define CW1200_INVALID_RATE_ID (0xFF) + +static int cw1200_handle_action_rx(struct cw1200_common *priv, + struct sk_buff *skb); +static const struct ieee80211_rate * +cw1200_get_tx_rate(const struct cw1200_common *priv, + const struct ieee80211_tx_rate *rate); + +/* ******************************************************************** */ +/* TX queue lock / unlock */ + +static inline void cw1200_tx_queues_lock(struct cw1200_common *priv) +{ + int i; + for (i = 0; i < 4; ++i) + cw1200_queue_lock(&priv->tx_queue[i]); +} + +static inline void cw1200_tx_queues_unlock(struct cw1200_common *priv) +{ + int i; + for (i = 0; i < 4; ++i) + cw1200_queue_unlock(&priv->tx_queue[i]); +} + +/* ******************************************************************** */ +/* TX policy cache implementation */ + +static void tx_policy_dump(struct tx_policy *policy) +{ + pr_debug("[TX policy] %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X: %d\n", + policy->raw[0] & 0x0F, policy->raw[0] >> 4, + policy->raw[1] & 0x0F, policy->raw[1] >> 4, + policy->raw[2] & 0x0F, policy->raw[2] >> 4, + policy->raw[3] & 0x0F, policy->raw[3] >> 4, + policy->raw[4] & 0x0F, policy->raw[4] >> 4, + policy->raw[5] & 0x0F, policy->raw[5] >> 4, + policy->raw[6] & 0x0F, policy->raw[6] >> 4, + policy->raw[7] & 0x0F, policy->raw[7] >> 4, + policy->raw[8] & 0x0F, policy->raw[8] >> 4, + policy->raw[9] & 0x0F, policy->raw[9] >> 4, + policy->raw[10] & 0x0F, policy->raw[10] >> 4, + policy->raw[11] & 0x0F, policy->raw[11] >> 4, + policy->defined); +} + +static void tx_policy_build(const struct cw1200_common *priv, + /* [out] */ struct tx_policy *policy, + struct ieee80211_tx_rate *rates, size_t count) +{ + int i, j; + unsigned limit = priv->short_frame_max_tx_count; + unsigned total = 0; + BUG_ON(rates[0].idx < 0); + memset(policy, 0, sizeof(*policy)); + + /* Sort rates in descending order. */ + for (i = 1; i < count; ++i) { + if (rates[i].idx < 0) { + count = i; + break; + } + if (rates[i].idx > rates[i - 1].idx) { + struct ieee80211_tx_rate tmp = rates[i - 1]; + rates[i - 1] = rates[i]; + rates[i] = tmp; + } + } + + /* Eliminate duplicates. */ + total = rates[0].count; + for (i = 0, j = 1; j < count; ++j) { + if (rates[j].idx == rates[i].idx) { + rates[i].count += rates[j].count; + } else if (rates[j].idx > rates[i].idx) { + break; + } else { + ++i; + if (i != j) + rates[i] = rates[j]; + } + total += rates[j].count; + } + count = i + 1; + + /* Re-fill policy trying to keep every requested rate and with + * respect to the global max tx retransmission count. + */ + if (limit < count) + limit = count; + if (total > limit) { + for (i = 0; i < count; ++i) { + int left = count - i - 1; + if (rates[i].count > limit - left) + rates[i].count = limit - left; + limit -= rates[i].count; + } + } + + /* HACK!!! Device has problems (at least) switching from + * 54Mbps CTS to 1Mbps. This switch takes enormous amount + * of time (100-200 ms), leading to valuable throughput drop. + * As a workaround, additional g-rates are injected to the + * policy. + */ + if (count == 2 && !(rates[0].flags & IEEE80211_TX_RC_MCS) && + rates[0].idx > 4 && rates[0].count > 2 && + rates[1].idx < 2) { + int mid_rate = (rates[0].idx + 4) >> 1; + + /* Decrease number of retries for the initial rate */ + rates[0].count -= 2; + + if (mid_rate != 4) { + /* Keep fallback rate at 1Mbps. */ + rates[3] = rates[1]; + + /* Inject 1 transmission on lowest g-rate */ + rates[2].idx = 4; + rates[2].count = 1; + rates[2].flags = rates[1].flags; + + /* Inject 1 transmission on mid-rate */ + rates[1].idx = mid_rate; + rates[1].count = 1; + + /* Fallback to 1 Mbps is a really bad thing, + * so let's try to increase probability of + * successful transmission on the lowest g rate + * even more + */ + if (rates[0].count >= 3) { + --rates[0].count; + ++rates[2].count; + } + + /* Adjust amount of rates defined */ + count += 2; + } else { + /* Keep fallback rate at 1Mbps. */ + rates[2] = rates[1]; + + /* Inject 2 transmissions on lowest g-rate */ + rates[1].idx = 4; + rates[1].count = 2; + + /* Adjust amount of rates defined */ + count += 1; + } + } + + policy->defined = cw1200_get_tx_rate(priv, &rates[0])->hw_value + 1; + + for (i = 0; i < count; ++i) { + register unsigned rateid, off, shift, retries; + + rateid = cw1200_get_tx_rate(priv, &rates[i])->hw_value; + off = rateid >> 3; /* eq. rateid / 8 */ + shift = (rateid & 0x07) << 2; /* eq. (rateid % 8) * 4 */ + + retries = rates[i].count; + if (retries > 0x0F) { + rates[i].count = 0x0f; + retries = 0x0F; + } + policy->tbl[off] |= __cpu_to_le32(retries << shift); + policy->retry_count += retries; + } + + pr_debug("[TX policy] Policy (%zu): %d:%d, %d:%d, %d:%d, %d:%d\n", + count, + rates[0].idx, rates[0].count, + rates[1].idx, rates[1].count, + rates[2].idx, rates[2].count, + rates[3].idx, rates[3].count); +} + +static inline bool tx_policy_is_equal(const struct tx_policy *wanted, + const struct tx_policy *cached) +{ + size_t count = wanted->defined >> 1; + if (wanted->defined > cached->defined) + return false; + if (count) { + if (memcmp(wanted->raw, cached->raw, count)) + return false; + } + if (wanted->defined & 1) { + if ((wanted->raw[count] & 0x0F) != (cached->raw[count] & 0x0F)) + return false; + } + return true; +} + +static int tx_policy_find(struct tx_policy_cache *cache, + const struct tx_policy *wanted) +{ + /* O(n) complexity. Not so good, but there's only 8 entries in + * the cache. + * Also lru helps to reduce search time. + */ + struct tx_policy_cache_entry *it; + /* First search for policy in "used" list */ + list_for_each_entry(it, &cache->used, link) { + if (tx_policy_is_equal(wanted, &it->policy)) + return it - cache->cache; + } + /* Then - in "free list" */ + list_for_each_entry(it, &cache->free, link) { + if (tx_policy_is_equal(wanted, &it->policy)) + return it - cache->cache; + } + return -1; +} + +static inline void tx_policy_use(struct tx_policy_cache *cache, + struct tx_policy_cache_entry *entry) +{ + ++entry->policy.usage_count; + list_move(&entry->link, &cache->used); +} + +static inline int tx_policy_release(struct tx_policy_cache *cache, + struct tx_policy_cache_entry *entry) +{ + int ret = --entry->policy.usage_count; + if (!ret) + list_move(&entry->link, &cache->free); + return ret; +} + +void tx_policy_clean(struct cw1200_common *priv) +{ + int idx, locked; + struct tx_policy_cache *cache = &priv->tx_policy_cache; + struct tx_policy_cache_entry *entry; + + cw1200_tx_queues_lock(priv); + spin_lock_bh(&cache->lock); + locked = list_empty(&cache->free); + + for (idx = 0; idx < TX_POLICY_CACHE_SIZE; idx++) { + entry = &cache->cache[idx]; + /* Policy usage count should be 0 at this time as all queues + should be empty + */ + if (WARN_ON(entry->policy.usage_count)) { + entry->policy.usage_count = 0; + list_move(&entry->link, &cache->free); + } + memset(&entry->policy, 0, sizeof(entry->policy)); + } + if (locked) + cw1200_tx_queues_unlock(priv); + + cw1200_tx_queues_unlock(priv); + spin_unlock_bh(&cache->lock); +} + +/* ******************************************************************** */ +/* External TX policy cache API */ + +void tx_policy_init(struct cw1200_common *priv) +{ + struct tx_policy_cache *cache = &priv->tx_policy_cache; + int i; + + memset(cache, 0, sizeof(*cache)); + + spin_lock_init(&cache->lock); + INIT_LIST_HEAD(&cache->used); + INIT_LIST_HEAD(&cache->free); + + for (i = 0; i < TX_POLICY_CACHE_SIZE; ++i) + list_add(&cache->cache[i].link, &cache->free); +} + +static int tx_policy_get(struct cw1200_common *priv, + struct ieee80211_tx_rate *rates, + size_t count, bool *renew) +{ + int idx; + struct tx_policy_cache *cache = &priv->tx_policy_cache; + struct tx_policy wanted; + + tx_policy_build(priv, &wanted, rates, count); + + spin_lock_bh(&cache->lock); + if (WARN_ON_ONCE(list_empty(&cache->free))) { + spin_unlock_bh(&cache->lock); + return CW1200_INVALID_RATE_ID; + } + idx = tx_policy_find(cache, &wanted); + if (idx >= 0) { + pr_debug("[TX policy] Used TX policy: %d\n", idx); + *renew = false; + } else { + struct tx_policy_cache_entry *entry; + *renew = true; + /* If policy is not found create a new one + * using the oldest entry in "free" list + */ + entry = list_entry(cache->free.prev, + struct tx_policy_cache_entry, link); + entry->policy = wanted; + idx = entry - cache->cache; + pr_debug("[TX policy] New TX policy: %d\n", idx); + tx_policy_dump(&entry->policy); + } + tx_policy_use(cache, &cache->cache[idx]); + if (list_empty(&cache->free)) { + /* Lock TX queues. */ + cw1200_tx_queues_lock(priv); + } + spin_unlock_bh(&cache->lock); + return idx; +} + +static void tx_policy_put(struct cw1200_common *priv, int idx) +{ + int usage, locked; + struct tx_policy_cache *cache = &priv->tx_policy_cache; + + spin_lock_bh(&cache->lock); + locked = list_empty(&cache->free); + usage = tx_policy_release(cache, &cache->cache[idx]); + if (locked && !usage) { + /* Unlock TX queues. */ + cw1200_tx_queues_unlock(priv); + } + spin_unlock_bh(&cache->lock); +} + +static int tx_policy_upload(struct cw1200_common *priv) +{ + struct tx_policy_cache *cache = &priv->tx_policy_cache; + int i; + struct wsm_set_tx_rate_retry_policy arg = { + .num = 0, + }; + spin_lock_bh(&cache->lock); + + /* Upload only modified entries. */ + for (i = 0; i < TX_POLICY_CACHE_SIZE; ++i) { + struct tx_policy *src = &cache->cache[i].policy; + if (src->retry_count && !src->uploaded) { + struct wsm_tx_rate_retry_policy *dst = + &arg.tbl[arg.num]; + dst->index = i; + dst->short_retries = priv->short_frame_max_tx_count; + dst->long_retries = priv->long_frame_max_tx_count; + + dst->flags = WSM_TX_RATE_POLICY_FLAG_TERMINATE_WHEN_FINISHED | + WSM_TX_RATE_POLICY_FLAG_COUNT_INITIAL_TRANSMIT; + memcpy(dst->rate_count_indices, src->tbl, + sizeof(dst->rate_count_indices)); + src->uploaded = 1; + ++arg.num; + } + } + spin_unlock_bh(&cache->lock); + cw1200_debug_tx_cache_miss(priv); + pr_debug("[TX policy] Upload %d policies\n", arg.num); + return wsm_set_tx_rate_retry_policy(priv, &arg); +} + +void tx_policy_upload_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, tx_policy_upload_work); + + pr_debug("[TX] TX policy upload.\n"); + tx_policy_upload(priv); + + wsm_unlock_tx(priv); + cw1200_tx_queues_unlock(priv); +} + +/* ******************************************************************** */ +/* cw1200 TX implementation */ + +struct cw1200_txinfo { + struct sk_buff *skb; + unsigned queue; + struct ieee80211_tx_info *tx_info; + const struct ieee80211_rate *rate; + struct ieee80211_hdr *hdr; + size_t hdrlen; + const u8 *da; + struct cw1200_sta_priv *sta_priv; + struct ieee80211_sta *sta; + struct cw1200_txpriv txpriv; +}; + +u32 cw1200_rate_mask_to_wsm(struct cw1200_common *priv, u32 rates) +{ + u32 ret = 0; + int i; + for (i = 0; i < 32; ++i) { + if (rates & BIT(i)) + ret |= BIT(priv->rates[i].hw_value); + } + return ret; +} + +static const struct ieee80211_rate * +cw1200_get_tx_rate(const struct cw1200_common *priv, + const struct ieee80211_tx_rate *rate) +{ + if (rate->idx < 0) + return NULL; + if (rate->flags & IEEE80211_TX_RC_MCS) + return &priv->mcs_rates[rate->idx]; + return &priv->hw->wiphy->bands[priv->channel->band]-> + bitrates[rate->idx]; +} + +static int +cw1200_tx_h_calc_link_ids(struct cw1200_common *priv, + struct cw1200_txinfo *t) +{ + if (t->sta && t->sta_priv->link_id) + t->txpriv.raw_link_id = + t->txpriv.link_id = + t->sta_priv->link_id; + else if (priv->mode != NL80211_IFTYPE_AP) + t->txpriv.raw_link_id = + t->txpriv.link_id = 0; + else if (is_multicast_ether_addr(t->da)) { + if (priv->enable_beacon) { + t->txpriv.raw_link_id = 0; + t->txpriv.link_id = CW1200_LINK_ID_AFTER_DTIM; + } else { + t->txpriv.raw_link_id = 0; + t->txpriv.link_id = 0; + } + } else { + t->txpriv.link_id = cw1200_find_link_id(priv, t->da); + if (!t->txpriv.link_id) + t->txpriv.link_id = cw1200_alloc_link_id(priv, t->da); + if (!t->txpriv.link_id) { + wiphy_err(priv->hw->wiphy, + "No more link IDs available.\n"); + return -ENOENT; + } + t->txpriv.raw_link_id = t->txpriv.link_id; + } + if (t->txpriv.raw_link_id) + priv->link_id_db[t->txpriv.raw_link_id - 1].timestamp = + jiffies; + if (t->sta && (t->sta->uapsd_queues & BIT(t->queue))) + t->txpriv.link_id = CW1200_LINK_ID_UAPSD; + return 0; +} + +static void +cw1200_tx_h_pm(struct cw1200_common *priv, + struct cw1200_txinfo *t) +{ + if (ieee80211_is_auth(t->hdr->frame_control)) { + u32 mask = ~BIT(t->txpriv.raw_link_id); + spin_lock_bh(&priv->ps_state_lock); + priv->sta_asleep_mask &= mask; + priv->pspoll_mask &= mask; + spin_unlock_bh(&priv->ps_state_lock); + } +} + +static void +cw1200_tx_h_calc_tid(struct cw1200_common *priv, + struct cw1200_txinfo *t) +{ + if (ieee80211_is_data_qos(t->hdr->frame_control)) { + u8 *qos = ieee80211_get_qos_ctl(t->hdr); + t->txpriv.tid = qos[0] & IEEE80211_QOS_CTL_TID_MASK; + } else if (ieee80211_is_data(t->hdr->frame_control)) { + t->txpriv.tid = 0; + } +} + +static int +cw1200_tx_h_crypt(struct cw1200_common *priv, + struct cw1200_txinfo *t) +{ + if (!t->tx_info->control.hw_key || + !ieee80211_has_protected(t->hdr->frame_control)) + return 0; + + t->hdrlen += t->tx_info->control.hw_key->iv_len; + skb_put(t->skb, t->tx_info->control.hw_key->icv_len); + + if (t->tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) + skb_put(t->skb, 8); /* MIC space */ + + return 0; +} + +static int +cw1200_tx_h_align(struct cw1200_common *priv, + struct cw1200_txinfo *t, + u8 *flags) +{ + size_t offset = (size_t)t->skb->data & 3; + + if (!offset) + return 0; + + if (offset & 1) { + wiphy_err(priv->hw->wiphy, + "Bug: attempt to transmit a frame with wrong alignment: %zu\n", + offset); + return -EINVAL; + } + + if (skb_headroom(t->skb) < offset) { + wiphy_err(priv->hw->wiphy, + "Bug: no space allocated for DMA alignment. headroom: %d\n", + skb_headroom(t->skb)); + return -ENOMEM; + } + skb_push(t->skb, offset); + t->hdrlen += offset; + t->txpriv.offset += offset; + *flags |= WSM_TX_2BYTES_SHIFT; + cw1200_debug_tx_align(priv); + return 0; +} + +static int +cw1200_tx_h_action(struct cw1200_common *priv, + struct cw1200_txinfo *t) +{ + struct ieee80211_mgmt *mgmt = + (struct ieee80211_mgmt *)t->hdr; + if (ieee80211_is_action(t->hdr->frame_control) && + mgmt->u.action.category == WLAN_CATEGORY_BACK) + return 1; + else + return 0; +} + +/* Add WSM header */ +static struct wsm_tx * +cw1200_tx_h_wsm(struct cw1200_common *priv, + struct cw1200_txinfo *t) +{ + struct wsm_tx *wsm; + + if (skb_headroom(t->skb) < sizeof(struct wsm_tx)) { + wiphy_err(priv->hw->wiphy, + "Bug: no space allocated for WSM header. headroom: %d\n", + skb_headroom(t->skb)); + return NULL; + } + + wsm = (struct wsm_tx *)skb_push(t->skb, sizeof(struct wsm_tx)); + t->txpriv.offset += sizeof(struct wsm_tx); + memset(wsm, 0, sizeof(*wsm)); + wsm->hdr.len = __cpu_to_le16(t->skb->len); + wsm->hdr.id = __cpu_to_le16(0x0004); + wsm->queue_id = wsm_queue_id_to_wsm(t->queue); + return wsm; +} + +/* BT Coex specific handling */ +static void +cw1200_tx_h_bt(struct cw1200_common *priv, + struct cw1200_txinfo *t, + struct wsm_tx *wsm) +{ + u8 priority = 0; + + if (!priv->bt_present) + return; + + if (ieee80211_is_nullfunc(t->hdr->frame_control)) { + priority = WSM_EPTA_PRIORITY_MGT; + } else if (ieee80211_is_data(t->hdr->frame_control)) { + /* Skip LLC SNAP header (+6) */ + u8 *payload = &t->skb->data[t->hdrlen]; + u16 *ethertype = (u16 *)&payload[6]; + if (*ethertype == __be16_to_cpu(ETH_P_PAE)) + priority = WSM_EPTA_PRIORITY_EAPOL; + } else if (ieee80211_is_assoc_req(t->hdr->frame_control) || + ieee80211_is_reassoc_req(t->hdr->frame_control)) { + struct ieee80211_mgmt *mgt_frame = + (struct ieee80211_mgmt *)t->hdr; + + if (mgt_frame->u.assoc_req.listen_interval < + priv->listen_interval) { + pr_debug("Modified Listen Interval to %d from %d\n", + priv->listen_interval, + mgt_frame->u.assoc_req.listen_interval); + /* Replace listen interval derieved from + * the one read from SDD + */ + mgt_frame->u.assoc_req.listen_interval = + priv->listen_interval; + } + } + + if (!priority) { + if (ieee80211_is_action(t->hdr->frame_control)) + priority = WSM_EPTA_PRIORITY_ACTION; + else if (ieee80211_is_mgmt(t->hdr->frame_control)) + priority = WSM_EPTA_PRIORITY_MGT; + else if ((wsm->queue_id == WSM_QUEUE_VOICE)) + priority = WSM_EPTA_PRIORITY_VOICE; + else if ((wsm->queue_id == WSM_QUEUE_VIDEO)) + priority = WSM_EPTA_PRIORITY_VIDEO; + else + priority = WSM_EPTA_PRIORITY_DATA; + } + + pr_debug("[TX] EPTA priority %d.\n", priority); + + wsm->flags |= priority << 1; +} + +static int +cw1200_tx_h_rate_policy(struct cw1200_common *priv, + struct cw1200_txinfo *t, + struct wsm_tx *wsm) +{ + bool tx_policy_renew = false; + + t->txpriv.rate_id = tx_policy_get(priv, + t->tx_info->control.rates, IEEE80211_TX_MAX_RATES, + &tx_policy_renew); + if (t->txpriv.rate_id == CW1200_INVALID_RATE_ID) + return -EFAULT; + + wsm->flags |= t->txpriv.rate_id << 4; + + t->rate = cw1200_get_tx_rate(priv, + &t->tx_info->control.rates[0]), + wsm->max_tx_rate = t->rate->hw_value; + if (t->rate->flags & IEEE80211_TX_RC_MCS) { + if (cw1200_ht_greenfield(&priv->ht_info)) + wsm->ht_tx_parameters |= + __cpu_to_le32(WSM_HT_TX_GREENFIELD); + else + wsm->ht_tx_parameters |= + __cpu_to_le32(WSM_HT_TX_MIXED); + } + + if (tx_policy_renew) { + pr_debug("[TX] TX policy renew.\n"); + /* It's not so optimal to stop TX queues every now and then. + * Better to reimplement task scheduling with + * a counter. TODO. + */ + wsm_lock_tx_async(priv); + cw1200_tx_queues_lock(priv); + if (queue_work(priv->workqueue, + &priv->tx_policy_upload_work) <= 0) { + cw1200_tx_queues_unlock(priv); + wsm_unlock_tx(priv); + } + } + return 0; +} + +static bool +cw1200_tx_h_pm_state(struct cw1200_common *priv, + struct cw1200_txinfo *t) +{ + int was_buffered = 1; + + if (t->txpriv.link_id == CW1200_LINK_ID_AFTER_DTIM && + !priv->buffered_multicasts) { + priv->buffered_multicasts = true; + if (priv->sta_asleep_mask) + queue_work(priv->workqueue, + &priv->multicast_start_work); + } + + if (t->txpriv.raw_link_id && t->txpriv.tid < CW1200_MAX_TID) + was_buffered = priv->link_id_db[t->txpriv.raw_link_id - 1].buffered[t->txpriv.tid]++; + + return !was_buffered; +} + +/* ******************************************************************** */ + +void cw1200_tx(struct ieee80211_hw *dev, + struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct cw1200_common *priv = dev->priv; + struct cw1200_txinfo t = { + .skb = skb, + .queue = skb_get_queue_mapping(skb), + .tx_info = IEEE80211_SKB_CB(skb), + .hdr = (struct ieee80211_hdr *)skb->data, + .txpriv.tid = CW1200_MAX_TID, + .txpriv.rate_id = CW1200_INVALID_RATE_ID, + }; + struct ieee80211_sta *sta; + struct wsm_tx *wsm; + bool tid_update = 0; + u8 flags = 0; + int ret; + + if (priv->bh_error) + goto drop; + + t.hdrlen = ieee80211_hdrlen(t.hdr->frame_control); + t.da = ieee80211_get_DA(t.hdr); + if (control) { + t.sta = control->sta; + t.sta_priv = (struct cw1200_sta_priv *)&t.sta->drv_priv; + } + + if (WARN_ON(t.queue >= 4)) + goto drop; + + ret = cw1200_tx_h_calc_link_ids(priv, &t); + if (ret) + goto drop; + + pr_debug("[TX] TX %d bytes (queue: %d, link_id: %d (%d)).\n", + skb->len, t.queue, t.txpriv.link_id, + t.txpriv.raw_link_id); + + cw1200_tx_h_pm(priv, &t); + cw1200_tx_h_calc_tid(priv, &t); + ret = cw1200_tx_h_crypt(priv, &t); + if (ret) + goto drop; + ret = cw1200_tx_h_align(priv, &t, &flags); + if (ret) + goto drop; + ret = cw1200_tx_h_action(priv, &t); + if (ret) + goto drop; + wsm = cw1200_tx_h_wsm(priv, &t); + if (!wsm) { + ret = -ENOMEM; + goto drop; + } + wsm->flags |= flags; + cw1200_tx_h_bt(priv, &t, wsm); + ret = cw1200_tx_h_rate_policy(priv, &t, wsm); + if (ret) + goto drop; + + rcu_read_lock(); + sta = rcu_dereference(t.sta); + + spin_lock_bh(&priv->ps_state_lock); + { + tid_update = cw1200_tx_h_pm_state(priv, &t); + BUG_ON(cw1200_queue_put(&priv->tx_queue[t.queue], + t.skb, &t.txpriv)); + } + spin_unlock_bh(&priv->ps_state_lock); + + if (tid_update && sta) + ieee80211_sta_set_buffered(sta, t.txpriv.tid, true); + + rcu_read_unlock(); + + cw1200_bh_wakeup(priv); + + return; + +drop: + cw1200_skb_dtor(priv, skb, &t.txpriv); + return; +} + +/* ******************************************************************** */ + +static int cw1200_handle_action_rx(struct cw1200_common *priv, + struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (void *)skb->data; + + /* Filter block ACK negotiation: fully controlled by firmware */ + if (mgmt->u.action.category == WLAN_CATEGORY_BACK) + return 1; + + return 0; +} + +static int cw1200_handle_pspoll(struct cw1200_common *priv, + struct sk_buff *skb) +{ + struct ieee80211_sta *sta; + struct ieee80211_pspoll *pspoll = (struct ieee80211_pspoll *)skb->data; + int link_id = 0; + u32 pspoll_mask = 0; + int drop = 1; + int i; + + if (priv->join_status != CW1200_JOIN_STATUS_AP) + goto done; + if (memcmp(priv->vif->addr, pspoll->bssid, ETH_ALEN)) + goto done; + + rcu_read_lock(); + sta = ieee80211_find_sta(priv->vif, pspoll->ta); + if (sta) { + struct cw1200_sta_priv *sta_priv; + sta_priv = (struct cw1200_sta_priv *)&sta->drv_priv; + link_id = sta_priv->link_id; + pspoll_mask = BIT(sta_priv->link_id); + } + rcu_read_unlock(); + if (!link_id) + goto done; + + priv->pspoll_mask |= pspoll_mask; + drop = 0; + + /* Do not report pspols if data for given link id is queued already. */ + for (i = 0; i < 4; ++i) { + if (cw1200_queue_get_num_queued(&priv->tx_queue[i], + pspoll_mask)) { + cw1200_bh_wakeup(priv); + drop = 1; + break; + } + } + pr_debug("[RX] PSPOLL: %s\n", drop ? "local" : "fwd"); +done: + return drop; +} + +/* ******************************************************************** */ + +void cw1200_tx_confirm_cb(struct cw1200_common *priv, + int link_id, + struct wsm_tx_confirm *arg) +{ + u8 queue_id = cw1200_queue_get_queue_id(arg->packet_id); + struct cw1200_queue *queue = &priv->tx_queue[queue_id]; + struct sk_buff *skb; + const struct cw1200_txpriv *txpriv; + + pr_debug("[TX] TX confirm: %d, %d.\n", + arg->status, arg->ack_failures); + + if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) { + /* STA is stopped. */ + return; + } + + if (WARN_ON(queue_id >= 4)) + return; + + if (arg->status) + pr_debug("TX failed: %d.\n", arg->status); + + if ((arg->status == WSM_REQUEUE) && + (arg->flags & WSM_TX_STATUS_REQUEUE)) { + /* "Requeue" means "implicit suspend" */ + struct wsm_suspend_resume suspend = { + .link_id = link_id, + .stop = 1, + .multicast = !link_id, + }; + cw1200_suspend_resume(priv, &suspend); + wiphy_warn(priv->hw->wiphy, "Requeue for link_id %d (try %d). STAs asleep: 0x%.8X\n", + link_id, + cw1200_queue_get_generation(arg->packet_id) + 1, + priv->sta_asleep_mask); + cw1200_queue_requeue(queue, arg->packet_id); + spin_lock_bh(&priv->ps_state_lock); + if (!link_id) { + priv->buffered_multicasts = true; + if (priv->sta_asleep_mask) { + queue_work(priv->workqueue, + &priv->multicast_start_work); + } + } + spin_unlock_bh(&priv->ps_state_lock); + } else if (!cw1200_queue_get_skb(queue, arg->packet_id, + &skb, &txpriv)) { + struct ieee80211_tx_info *tx = IEEE80211_SKB_CB(skb); + int tx_count = arg->ack_failures; + u8 ht_flags = 0; + int i; + + if (cw1200_ht_greenfield(&priv->ht_info)) + ht_flags |= IEEE80211_TX_RC_GREEN_FIELD; + + spin_lock(&priv->bss_loss_lock); + if (priv->bss_loss_state && + arg->packet_id == priv->bss_loss_confirm_id) { + if (arg->status) { + /* Recovery failed */ + __cw1200_cqm_bssloss_sm(priv, 0, 0, 1); + } else { + /* Recovery succeeded */ + __cw1200_cqm_bssloss_sm(priv, 0, 1, 0); + } + } + spin_unlock(&priv->bss_loss_lock); + + if (!arg->status) { + tx->flags |= IEEE80211_TX_STAT_ACK; + ++tx_count; + cw1200_debug_txed(priv); + if (arg->flags & WSM_TX_STATUS_AGGREGATION) { + /* Do not report aggregation to mac80211: + * it confuses minstrel a lot. + */ + /* tx->flags |= IEEE80211_TX_STAT_AMPDU; */ + cw1200_debug_txed_agg(priv); + } + } else { + if (tx_count) + ++tx_count; + } + + for (i = 0; i < IEEE80211_TX_MAX_RATES; ++i) { + if (tx->status.rates[i].count >= tx_count) { + tx->status.rates[i].count = tx_count; + break; + } + tx_count -= tx->status.rates[i].count; + if (tx->status.rates[i].flags & IEEE80211_TX_RC_MCS) + tx->status.rates[i].flags |= ht_flags; + } + + for (++i; i < IEEE80211_TX_MAX_RATES; ++i) { + tx->status.rates[i].count = 0; + tx->status.rates[i].idx = -1; + } + + /* Pull off any crypto trailers that we added on */ + if (tx->control.hw_key) { + skb_trim(skb, skb->len - tx->control.hw_key->icv_len); + if (tx->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) + skb_trim(skb, skb->len - 8); /* MIC space */ + } + cw1200_queue_remove(queue, arg->packet_id); + } + /* XXX TODO: Only wake if there are pending transmits.. */ + cw1200_bh_wakeup(priv); +} + +static void cw1200_notify_buffered_tx(struct cw1200_common *priv, + struct sk_buff *skb, int link_id, int tid) +{ + struct ieee80211_sta *sta; + struct ieee80211_hdr *hdr; + u8 *buffered; + u8 still_buffered = 0; + + if (link_id && tid < CW1200_MAX_TID) { + buffered = priv->link_id_db + [link_id - 1].buffered; + + spin_lock_bh(&priv->ps_state_lock); + if (!WARN_ON(!buffered[tid])) + still_buffered = --buffered[tid]; + spin_unlock_bh(&priv->ps_state_lock); + + if (!still_buffered && tid < CW1200_MAX_TID) { + hdr = (struct ieee80211_hdr *)skb->data; + rcu_read_lock(); + sta = ieee80211_find_sta(priv->vif, hdr->addr1); + if (sta) + ieee80211_sta_set_buffered(sta, tid, false); + rcu_read_unlock(); + } + } +} + +void cw1200_skb_dtor(struct cw1200_common *priv, + struct sk_buff *skb, + const struct cw1200_txpriv *txpriv) +{ + skb_pull(skb, txpriv->offset); + if (txpriv->rate_id != CW1200_INVALID_RATE_ID) { + cw1200_notify_buffered_tx(priv, skb, + txpriv->raw_link_id, txpriv->tid); + tx_policy_put(priv, txpriv->rate_id); + } + ieee80211_tx_status(priv->hw, skb); +} + +void cw1200_rx_cb(struct cw1200_common *priv, + struct wsm_rx *arg, + int link_id, + struct sk_buff **skb_p) +{ + struct sk_buff *skb = *skb_p; + struct ieee80211_rx_status *hdr = IEEE80211_SKB_RXCB(skb); + struct ieee80211_hdr *frame = (struct ieee80211_hdr *)skb->data; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + struct cw1200_link_entry *entry = NULL; + unsigned long grace_period; + + bool early_data = false; + bool p2p = priv->vif && priv->vif->p2p; + size_t hdrlen; + hdr->flag = 0; + + if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) { + /* STA is stopped. */ + goto drop; + } + + if (link_id && link_id <= CW1200_MAX_STA_IN_AP_MODE) { + entry = &priv->link_id_db[link_id - 1]; + if (entry->status == CW1200_LINK_SOFT && + ieee80211_is_data(frame->frame_control)) + early_data = true; + entry->timestamp = jiffies; + } else if (p2p && + ieee80211_is_action(frame->frame_control) && + (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)) { + pr_debug("[RX] Going to MAP&RESET link ID\n"); + WARN_ON(work_pending(&priv->linkid_reset_work)); + memcpy(&priv->action_frame_sa[0], + ieee80211_get_SA(frame), ETH_ALEN); + priv->action_linkid = 0; + schedule_work(&priv->linkid_reset_work); + } + + if (link_id && p2p && + ieee80211_is_action(frame->frame_control) && + (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)) { + /* Link ID already exists for the ACTION frame. + * Reset and Remap + */ + WARN_ON(work_pending(&priv->linkid_reset_work)); + memcpy(&priv->action_frame_sa[0], + ieee80211_get_SA(frame), ETH_ALEN); + priv->action_linkid = link_id; + schedule_work(&priv->linkid_reset_work); + } + if (arg->status) { + if (arg->status == WSM_STATUS_MICFAILURE) { + pr_debug("[RX] MIC failure.\n"); + hdr->flag |= RX_FLAG_MMIC_ERROR; + } else if (arg->status == WSM_STATUS_NO_KEY_FOUND) { + pr_debug("[RX] No key found.\n"); + goto drop; + } else { + pr_debug("[RX] Receive failure: %d.\n", + arg->status); + goto drop; + } + } + + if (skb->len < sizeof(struct ieee80211_pspoll)) { + wiphy_warn(priv->hw->wiphy, "Mailformed SDU rx'ed. Size is lesser than IEEE header.\n"); + goto drop; + } + + if (ieee80211_is_pspoll(frame->frame_control)) + if (cw1200_handle_pspoll(priv, skb)) + goto drop; + + hdr->band = ((arg->channel_number & 0xff00) || + (arg->channel_number > 14)) ? + IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ; + hdr->freq = ieee80211_channel_to_frequency( + arg->channel_number, + hdr->band); + + if (arg->rx_rate >= 14) { + hdr->flag |= RX_FLAG_HT; + hdr->rate_idx = arg->rx_rate - 14; + } else if (arg->rx_rate >= 4) { + hdr->rate_idx = arg->rx_rate - 2; + } else { + hdr->rate_idx = arg->rx_rate; + } + + hdr->signal = (s8)arg->rcpi_rssi; + hdr->antenna = 0; + + hdrlen = ieee80211_hdrlen(frame->frame_control); + + if (WSM_RX_STATUS_ENCRYPTION(arg->flags)) { + size_t iv_len = 0, icv_len = 0; + + hdr->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED; + + /* Oops... There is no fast way to ask mac80211 about + * IV/ICV lengths. Even defineas are not exposed. + */ + switch (WSM_RX_STATUS_ENCRYPTION(arg->flags)) { + case WSM_RX_STATUS_WEP: + iv_len = 4 /* WEP_IV_LEN */; + icv_len = 4 /* WEP_ICV_LEN */; + break; + case WSM_RX_STATUS_TKIP: + iv_len = 8 /* TKIP_IV_LEN */; + icv_len = 4 /* TKIP_ICV_LEN */ + + 8 /*MICHAEL_MIC_LEN*/; + hdr->flag |= RX_FLAG_MMIC_STRIPPED; + break; + case WSM_RX_STATUS_AES: + iv_len = 8 /* CCMP_HDR_LEN */; + icv_len = 8 /* CCMP_MIC_LEN */; + break; + case WSM_RX_STATUS_WAPI: + iv_len = 18 /* WAPI_HDR_LEN */; + icv_len = 16 /* WAPI_MIC_LEN */; + break; + default: + pr_warn("Unknown encryption type %d\n", + WSM_RX_STATUS_ENCRYPTION(arg->flags)); + goto drop; + } + + /* Firmware strips ICV in case of MIC failure. */ + if (arg->status == WSM_STATUS_MICFAILURE) + icv_len = 0; + + if (skb->len < hdrlen + iv_len + icv_len) { + wiphy_warn(priv->hw->wiphy, "Malformed SDU rx'ed. Size is lesser than crypto headers.\n"); + goto drop; + } + + /* Remove IV, ICV and MIC */ + skb_trim(skb, skb->len - icv_len); + memmove(skb->data + iv_len, skb->data, hdrlen); + skb_pull(skb, iv_len); + } + + /* Remove TSF from the end of frame */ + if (arg->flags & WSM_RX_STATUS_TSF_INCLUDED) { + memcpy(&hdr->mactime, skb->data + skb->len - 8, 8); + hdr->mactime = le64_to_cpu(hdr->mactime); + if (skb->len >= 8) + skb_trim(skb, skb->len - 8); + } else { + hdr->mactime = 0; + } + + cw1200_debug_rxed(priv); + if (arg->flags & WSM_RX_STATUS_AGGREGATE) + cw1200_debug_rxed_agg(priv); + + if (ieee80211_is_action(frame->frame_control) && + (arg->flags & WSM_RX_STATUS_ADDRESS1)) { + if (cw1200_handle_action_rx(priv, skb)) + return; + } else if (ieee80211_is_beacon(frame->frame_control) && + !arg->status && + !memcmp(ieee80211_get_SA(frame), priv->vif->bss_conf.bssid, + ETH_ALEN)) { + const u8 *tim_ie; + u8 *ies = ((struct ieee80211_mgmt *) + (skb->data))->u.beacon.variable; + size_t ies_len = skb->len - (ies - (u8 *)(skb->data)); + + tim_ie = cfg80211_find_ie(WLAN_EID_TIM, ies, ies_len); + if (tim_ie) { + struct ieee80211_tim_ie *tim = + (struct ieee80211_tim_ie *)&tim_ie[2]; + + if (priv->join_dtim_period != tim->dtim_period) { + priv->join_dtim_period = tim->dtim_period; + queue_work(priv->workqueue, + &priv->set_beacon_wakeup_period_work); + } + } + + /* Disable beacon filter once we're associated... */ + if (priv->disable_beacon_filter && + (priv->vif->bss_conf.assoc || + priv->vif->bss_conf.ibss_joined)) { + priv->disable_beacon_filter = false; + queue_work(priv->workqueue, + &priv->update_filtering_work); + } + } + + /* Stay awake after frame is received to give + * userspace chance to react and acquire appropriate + * wakelock. + */ + if (ieee80211_is_auth(frame->frame_control)) + grace_period = 5 * HZ; + else if (ieee80211_is_deauth(frame->frame_control)) + grace_period = 5 * HZ; + else + grace_period = 1 * HZ; + cw1200_pm_stay_awake(&priv->pm_state, grace_period); + + if (early_data) { + spin_lock_bh(&priv->ps_state_lock); + /* Double-check status with lock held */ + if (entry->status == CW1200_LINK_SOFT) + skb_queue_tail(&entry->rx_queue, skb); + else + ieee80211_rx_irqsafe(priv->hw, skb); + spin_unlock_bh(&priv->ps_state_lock); + } else { + ieee80211_rx_irqsafe(priv->hw, skb); + } + *skb_p = NULL; + + return; + +drop: + /* TODO: update failure counters */ + return; +} + +/* ******************************************************************** */ +/* Security */ + +int cw1200_alloc_key(struct cw1200_common *priv) +{ + int idx; + + idx = ffs(~priv->key_map) - 1; + if (idx < 0 || idx > WSM_KEY_MAX_INDEX) + return -1; + + priv->key_map |= BIT(idx); + priv->keys[idx].index = idx; + return idx; +} + +void cw1200_free_key(struct cw1200_common *priv, int idx) +{ + BUG_ON(!(priv->key_map & BIT(idx))); + memset(&priv->keys[idx], 0, sizeof(priv->keys[idx])); + priv->key_map &= ~BIT(idx); +} + +void cw1200_free_keys(struct cw1200_common *priv) +{ + memset(&priv->keys, 0, sizeof(priv->keys)); + priv->key_map = 0; +} + +int cw1200_upload_keys(struct cw1200_common *priv) +{ + int idx, ret = 0; + for (idx = 0; idx <= WSM_KEY_MAX_INDEX; ++idx) + if (priv->key_map & BIT(idx)) { + ret = wsm_add_key(priv, &priv->keys[idx]); + if (ret < 0) + break; + } + return ret; +} + +/* Workaround for WFD test case 6.1.10 */ +void cw1200_link_id_reset(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, linkid_reset_work); + int temp_linkid; + + if (!priv->action_linkid) { + /* In GO mode we can receive ACTION frames without a linkID */ + temp_linkid = cw1200_alloc_link_id(priv, + &priv->action_frame_sa[0]); + WARN_ON(!temp_linkid); + if (temp_linkid) { + /* Make sure we execute the WQ */ + flush_workqueue(priv->workqueue); + /* Release the link ID */ + spin_lock_bh(&priv->ps_state_lock); + priv->link_id_db[temp_linkid - 1].prev_status = + priv->link_id_db[temp_linkid - 1].status; + priv->link_id_db[temp_linkid - 1].status = + CW1200_LINK_RESET; + spin_unlock_bh(&priv->ps_state_lock); + wsm_lock_tx_async(priv); + if (queue_work(priv->workqueue, + &priv->link_id_work) <= 0) + wsm_unlock_tx(priv); + } + } else { + spin_lock_bh(&priv->ps_state_lock); + priv->link_id_db[priv->action_linkid - 1].prev_status = + priv->link_id_db[priv->action_linkid - 1].status; + priv->link_id_db[priv->action_linkid - 1].status = + CW1200_LINK_RESET_REMAP; + spin_unlock_bh(&priv->ps_state_lock); + wsm_lock_tx_async(priv); + if (queue_work(priv->workqueue, &priv->link_id_work) <= 0) + wsm_unlock_tx(priv); + flush_workqueue(priv->workqueue); + } +} + +int cw1200_find_link_id(struct cw1200_common *priv, const u8 *mac) +{ + int i, ret = 0; + spin_lock_bh(&priv->ps_state_lock); + for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) { + if (!memcmp(mac, priv->link_id_db[i].mac, ETH_ALEN) && + priv->link_id_db[i].status) { + priv->link_id_db[i].timestamp = jiffies; + ret = i + 1; + break; + } + } + spin_unlock_bh(&priv->ps_state_lock); + return ret; +} + +int cw1200_alloc_link_id(struct cw1200_common *priv, const u8 *mac) +{ + int i, ret = 0; + unsigned long max_inactivity = 0; + unsigned long now = jiffies; + + spin_lock_bh(&priv->ps_state_lock); + for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) { + if (!priv->link_id_db[i].status) { + ret = i + 1; + break; + } else if (priv->link_id_db[i].status != CW1200_LINK_HARD && + !priv->tx_queue_stats.link_map_cache[i + 1]) { + unsigned long inactivity = + now - priv->link_id_db[i].timestamp; + if (inactivity < max_inactivity) + continue; + max_inactivity = inactivity; + ret = i + 1; + } + } + if (ret) { + struct cw1200_link_entry *entry = &priv->link_id_db[ret - 1]; + pr_debug("[AP] STA added, link_id: %d\n", ret); + entry->status = CW1200_LINK_RESERVE; + memcpy(&entry->mac, mac, ETH_ALEN); + memset(&entry->buffered, 0, CW1200_MAX_TID); + skb_queue_head_init(&entry->rx_queue); + wsm_lock_tx_async(priv); + if (queue_work(priv->workqueue, &priv->link_id_work) <= 0) + wsm_unlock_tx(priv); + } else { + wiphy_info(priv->hw->wiphy, + "[AP] Early: no more link IDs available.\n"); + } + + spin_unlock_bh(&priv->ps_state_lock); + return ret; +} + +void cw1200_link_id_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, link_id_work); + wsm_flush_tx(priv); + cw1200_link_id_gc_work(&priv->link_id_gc_work.work); + wsm_unlock_tx(priv); +} + +void cw1200_link_id_gc_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, link_id_gc_work.work); + struct wsm_reset reset = { + .reset_statistics = false, + }; + struct wsm_map_link map_link = { + .link_id = 0, + }; + unsigned long now = jiffies; + unsigned long next_gc = -1; + long ttl; + bool need_reset; + u32 mask; + int i; + + if (priv->join_status != CW1200_JOIN_STATUS_AP) + return; + + wsm_lock_tx(priv); + spin_lock_bh(&priv->ps_state_lock); + for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) { + need_reset = false; + mask = BIT(i + 1); + if (priv->link_id_db[i].status == CW1200_LINK_RESERVE || + (priv->link_id_db[i].status == CW1200_LINK_HARD && + !(priv->link_id_map & mask))) { + if (priv->link_id_map & mask) { + priv->sta_asleep_mask &= ~mask; + priv->pspoll_mask &= ~mask; + need_reset = true; + } + priv->link_id_map |= mask; + if (priv->link_id_db[i].status != CW1200_LINK_HARD) + priv->link_id_db[i].status = CW1200_LINK_SOFT; + memcpy(map_link.mac_addr, priv->link_id_db[i].mac, + ETH_ALEN); + spin_unlock_bh(&priv->ps_state_lock); + if (need_reset) { + reset.link_id = i + 1; + wsm_reset(priv, &reset); + } + map_link.link_id = i + 1; + wsm_map_link(priv, &map_link); + next_gc = min(next_gc, CW1200_LINK_ID_GC_TIMEOUT); + spin_lock_bh(&priv->ps_state_lock); + } else if (priv->link_id_db[i].status == CW1200_LINK_SOFT) { + ttl = priv->link_id_db[i].timestamp - now + + CW1200_LINK_ID_GC_TIMEOUT; + if (ttl <= 0) { + need_reset = true; + priv->link_id_db[i].status = CW1200_LINK_OFF; + priv->link_id_map &= ~mask; + priv->sta_asleep_mask &= ~mask; + priv->pspoll_mask &= ~mask; + memset(map_link.mac_addr, 0, ETH_ALEN); + spin_unlock_bh(&priv->ps_state_lock); + reset.link_id = i + 1; + wsm_reset(priv, &reset); + spin_lock_bh(&priv->ps_state_lock); + } else { + next_gc = min_t(unsigned long, next_gc, ttl); + } + } else if (priv->link_id_db[i].status == CW1200_LINK_RESET || + priv->link_id_db[i].status == + CW1200_LINK_RESET_REMAP) { + int status = priv->link_id_db[i].status; + priv->link_id_db[i].status = + priv->link_id_db[i].prev_status; + priv->link_id_db[i].timestamp = now; + reset.link_id = i + 1; + spin_unlock_bh(&priv->ps_state_lock); + wsm_reset(priv, &reset); + if (status == CW1200_LINK_RESET_REMAP) { + memcpy(map_link.mac_addr, + priv->link_id_db[i].mac, + ETH_ALEN); + map_link.link_id = i + 1; + wsm_map_link(priv, &map_link); + next_gc = min(next_gc, + CW1200_LINK_ID_GC_TIMEOUT); + } + spin_lock_bh(&priv->ps_state_lock); + } + if (need_reset) { + skb_queue_purge(&priv->link_id_db[i].rx_queue); + pr_debug("[AP] STA removed, link_id: %d\n", + reset.link_id); + } + } + spin_unlock_bh(&priv->ps_state_lock); + if (next_gc != -1) + queue_delayed_work(priv->workqueue, + &priv->link_id_gc_work, next_gc); + wsm_unlock_tx(priv); +} diff --git a/drivers/net/wireless/cw1200/txrx.h b/drivers/net/wireless/cw1200/txrx.h new file mode 100644 index 000000000000..492a4e14213b --- /dev/null +++ b/drivers/net/wireless/cw1200/txrx.h @@ -0,0 +1,106 @@ +/* + * Datapath interface for ST-Ericsson CW1200 mac80211 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CW1200_TXRX_H +#define CW1200_TXRX_H + +#include <linux/list.h> + +/* extern */ struct ieee80211_hw; +/* extern */ struct sk_buff; +/* extern */ struct wsm_tx; +/* extern */ struct wsm_rx; +/* extern */ struct wsm_tx_confirm; +/* extern */ struct cw1200_txpriv; + +struct tx_policy { + union { + __le32 tbl[3]; + u8 raw[12]; + }; + u8 defined; + u8 usage_count; + u8 retry_count; + u8 uploaded; +}; + +struct tx_policy_cache_entry { + struct tx_policy policy; + struct list_head link; +}; + +#define TX_POLICY_CACHE_SIZE (8) +struct tx_policy_cache { + struct tx_policy_cache_entry cache[TX_POLICY_CACHE_SIZE]; + struct list_head used; + struct list_head free; + spinlock_t lock; /* Protect policy cache */ +}; + +/* ******************************************************************** */ +/* TX policy cache */ +/* Intention of TX policy cache is an overcomplicated WSM API. + * Device does not accept per-PDU tx retry sequence. + * It uses "tx retry policy id" instead, so driver code has to sync + * linux tx retry sequences with a retry policy table in the device. + */ +void tx_policy_init(struct cw1200_common *priv); +void tx_policy_upload_work(struct work_struct *work); +void tx_policy_clean(struct cw1200_common *priv); + +/* ******************************************************************** */ +/* TX implementation */ + +u32 cw1200_rate_mask_to_wsm(struct cw1200_common *priv, + u32 rates); +void cw1200_tx(struct ieee80211_hw *dev, + struct ieee80211_tx_control *control, + struct sk_buff *skb); +void cw1200_skb_dtor(struct cw1200_common *priv, + struct sk_buff *skb, + const struct cw1200_txpriv *txpriv); + +/* ******************************************************************** */ +/* WSM callbacks */ + +void cw1200_tx_confirm_cb(struct cw1200_common *priv, + int link_id, + struct wsm_tx_confirm *arg); +void cw1200_rx_cb(struct cw1200_common *priv, + struct wsm_rx *arg, + int link_id, + struct sk_buff **skb_p); + +/* ******************************************************************** */ +/* Timeout */ + +void cw1200_tx_timeout(struct work_struct *work); + +/* ******************************************************************** */ +/* Security */ +int cw1200_alloc_key(struct cw1200_common *priv); +void cw1200_free_key(struct cw1200_common *priv, int idx); +void cw1200_free_keys(struct cw1200_common *priv); +int cw1200_upload_keys(struct cw1200_common *priv); + +/* ******************************************************************** */ +/* Workaround for WFD test case 6.1.10 */ +void cw1200_link_id_reset(struct work_struct *work); + +#define CW1200_LINK_ID_GC_TIMEOUT ((unsigned long)(10 * HZ)) + +int cw1200_find_link_id(struct cw1200_common *priv, const u8 *mac); +int cw1200_alloc_link_id(struct cw1200_common *priv, const u8 *mac); +void cw1200_link_id_work(struct work_struct *work); +void cw1200_link_id_gc_work(struct work_struct *work); + + +#endif /* CW1200_TXRX_H */ diff --git a/drivers/net/wireless/cw1200/wsm.c b/drivers/net/wireless/cw1200/wsm.c new file mode 100644 index 000000000000..d95094fdcc50 --- /dev/null +++ b/drivers/net/wireless/cw1200/wsm.c @@ -0,0 +1,1823 @@ +/* + * WSM host interface (HI) implementation for + * ST-Ericsson CW1200 mac80211 drivers. + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/skbuff.h> +#include <linux/wait.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/random.h> + +#include "cw1200.h" +#include "wsm.h" +#include "bh.h" +#include "sta.h" +#include "debug.h" + +#define WSM_CMD_TIMEOUT (2 * HZ) /* With respect to interrupt loss */ +#define WSM_CMD_START_TIMEOUT (7 * HZ) +#define WSM_CMD_RESET_TIMEOUT (3 * HZ) /* 2 sec. timeout was observed. */ +#define WSM_CMD_MAX_TIMEOUT (3 * HZ) + +#define WSM_SKIP(buf, size) \ + do { \ + if ((buf)->data + size > (buf)->end) \ + goto underflow; \ + (buf)->data += size; \ + } while (0) + +#define WSM_GET(buf, ptr, size) \ + do { \ + if ((buf)->data + size > (buf)->end) \ + goto underflow; \ + memcpy(ptr, (buf)->data, size); \ + (buf)->data += size; \ + } while (0) + +#define __WSM_GET(buf, type, cvt) \ + ({ \ + type val; \ + if ((buf)->data + sizeof(type) > (buf)->end) \ + goto underflow; \ + val = cvt(*(type *)(buf)->data); \ + (buf)->data += sizeof(type); \ + val; \ + }) + +#define WSM_GET8(buf) __WSM_GET(buf, u8, (u8)) +#define WSM_GET16(buf) __WSM_GET(buf, u16, __le16_to_cpu) +#define WSM_GET32(buf) __WSM_GET(buf, u32, __le32_to_cpu) + +#define WSM_PUT(buf, ptr, size) \ + do { \ + if ((buf)->data + size > (buf)->end) \ + if (wsm_buf_reserve((buf), size)) \ + goto nomem; \ + memcpy((buf)->data, ptr, size); \ + (buf)->data += size; \ + } while (0) + +#define __WSM_PUT(buf, val, type, cvt) \ + do { \ + if ((buf)->data + sizeof(type) > (buf)->end) \ + if (wsm_buf_reserve((buf), sizeof(type))) \ + goto nomem; \ + *(type *)(buf)->data = cvt(val); \ + (buf)->data += sizeof(type); \ + } while (0) + +#define WSM_PUT8(buf, val) __WSM_PUT(buf, val, u8, (u8)) +#define WSM_PUT16(buf, val) __WSM_PUT(buf, val, u16, __cpu_to_le16) +#define WSM_PUT32(buf, val) __WSM_PUT(buf, val, u32, __cpu_to_le32) + +static void wsm_buf_reset(struct wsm_buf *buf); +static int wsm_buf_reserve(struct wsm_buf *buf, size_t extra_size); + +static int wsm_cmd_send(struct cw1200_common *priv, + struct wsm_buf *buf, + void *arg, u16 cmd, long tmo); + +#define wsm_cmd_lock(__priv) mutex_lock(&((__priv)->wsm_cmd_mux)) +#define wsm_cmd_unlock(__priv) mutex_unlock(&((__priv)->wsm_cmd_mux)) + +/* ******************************************************************** */ +/* WSM API implementation */ + +static int wsm_generic_confirm(struct cw1200_common *priv, + void *arg, + struct wsm_buf *buf) +{ + u32 status = WSM_GET32(buf); + if (status != WSM_STATUS_SUCCESS) + return -EINVAL; + return 0; + +underflow: + WARN_ON(1); + return -EINVAL; +} + +int wsm_configuration(struct cw1200_common *priv, struct wsm_configuration *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + WSM_PUT32(buf, arg->dot11MaxTransmitMsduLifeTime); + WSM_PUT32(buf, arg->dot11MaxReceiveLifeTime); + WSM_PUT32(buf, arg->dot11RtsThreshold); + + /* DPD block. */ + WSM_PUT16(buf, arg->dpdData_size + 12); + WSM_PUT16(buf, 1); /* DPD version */ + WSM_PUT(buf, arg->dot11StationId, ETH_ALEN); + WSM_PUT16(buf, 5); /* DPD flags */ + WSM_PUT(buf, arg->dpdData, arg->dpdData_size); + + ret = wsm_cmd_send(priv, buf, arg, + WSM_CONFIGURATION_REQ_ID, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +static int wsm_configuration_confirm(struct cw1200_common *priv, + struct wsm_configuration *arg, + struct wsm_buf *buf) +{ + int i; + int status; + + status = WSM_GET32(buf); + if (WARN_ON(status != WSM_STATUS_SUCCESS)) + return -EINVAL; + + WSM_GET(buf, arg->dot11StationId, ETH_ALEN); + arg->dot11FrequencyBandsSupported = WSM_GET8(buf); + WSM_SKIP(buf, 1); + arg->supportedRateMask = WSM_GET32(buf); + for (i = 0; i < 2; ++i) { + arg->txPowerRange[i].min_power_level = WSM_GET32(buf); + arg->txPowerRange[i].max_power_level = WSM_GET32(buf); + arg->txPowerRange[i].stepping = WSM_GET32(buf); + } + return 0; + +underflow: + WARN_ON(1); + return -EINVAL; +} + +/* ******************************************************************** */ + +int wsm_reset(struct cw1200_common *priv, const struct wsm_reset *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + u16 cmd = WSM_RESET_REQ_ID | WSM_TX_LINK_ID(arg->link_id); + + wsm_cmd_lock(priv); + + WSM_PUT32(buf, arg->reset_statistics ? 0 : 1); + ret = wsm_cmd_send(priv, buf, NULL, cmd, WSM_CMD_RESET_TIMEOUT); + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +struct wsm_mib { + u16 mib_id; + void *buf; + size_t buf_size; +}; + +int wsm_read_mib(struct cw1200_common *priv, u16 mib_id, void *_buf, + size_t buf_size) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + struct wsm_mib mib_buf = { + .mib_id = mib_id, + .buf = _buf, + .buf_size = buf_size, + }; + wsm_cmd_lock(priv); + + WSM_PUT16(buf, mib_id); + WSM_PUT16(buf, 0); + + ret = wsm_cmd_send(priv, buf, &mib_buf, + WSM_READ_MIB_REQ_ID, WSM_CMD_TIMEOUT); + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +static int wsm_read_mib_confirm(struct cw1200_common *priv, + struct wsm_mib *arg, + struct wsm_buf *buf) +{ + u16 size; + if (WARN_ON(WSM_GET32(buf) != WSM_STATUS_SUCCESS)) + return -EINVAL; + + if (WARN_ON(WSM_GET16(buf) != arg->mib_id)) + return -EINVAL; + + size = WSM_GET16(buf); + if (size > arg->buf_size) + size = arg->buf_size; + + WSM_GET(buf, arg->buf, size); + arg->buf_size = size; + return 0; + +underflow: + WARN_ON(1); + return -EINVAL; +} + +/* ******************************************************************** */ + +int wsm_write_mib(struct cw1200_common *priv, u16 mib_id, void *_buf, + size_t buf_size) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + struct wsm_mib mib_buf = { + .mib_id = mib_id, + .buf = _buf, + .buf_size = buf_size, + }; + + wsm_cmd_lock(priv); + + WSM_PUT16(buf, mib_id); + WSM_PUT16(buf, buf_size); + WSM_PUT(buf, _buf, buf_size); + + ret = wsm_cmd_send(priv, buf, &mib_buf, + WSM_WRITE_MIB_REQ_ID, WSM_CMD_TIMEOUT); + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +static int wsm_write_mib_confirm(struct cw1200_common *priv, + struct wsm_mib *arg, + struct wsm_buf *buf) +{ + int ret; + + ret = wsm_generic_confirm(priv, arg, buf); + if (ret) + return ret; + + if (arg->mib_id == WSM_MIB_ID_OPERATIONAL_POWER_MODE) { + /* OperationalMode: update PM status. */ + const char *p = arg->buf; + cw1200_enable_powersave(priv, (p[0] & 0x0F) ? true : false); + } + return 0; +} + +/* ******************************************************************** */ + +int wsm_scan(struct cw1200_common *priv, const struct wsm_scan *arg) +{ + int i; + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + if (arg->num_channels > 48) + return -EINVAL; + + if (arg->num_ssids > 2) + return -EINVAL; + + if (arg->band > 1) + return -EINVAL; + + wsm_cmd_lock(priv); + + WSM_PUT8(buf, arg->band); + WSM_PUT8(buf, arg->type); + WSM_PUT8(buf, arg->flags); + WSM_PUT8(buf, arg->max_tx_rate); + WSM_PUT32(buf, arg->auto_scan_interval); + WSM_PUT8(buf, arg->num_probes); + WSM_PUT8(buf, arg->num_channels); + WSM_PUT8(buf, arg->num_ssids); + WSM_PUT8(buf, arg->probe_delay); + + for (i = 0; i < arg->num_channels; ++i) { + WSM_PUT16(buf, arg->ch[i].number); + WSM_PUT16(buf, 0); + WSM_PUT32(buf, arg->ch[i].min_chan_time); + WSM_PUT32(buf, arg->ch[i].max_chan_time); + WSM_PUT32(buf, 0); + } + + for (i = 0; i < arg->num_ssids; ++i) { + WSM_PUT32(buf, arg->ssids[i].length); + WSM_PUT(buf, &arg->ssids[i].ssid[0], + sizeof(arg->ssids[i].ssid)); + } + + ret = wsm_cmd_send(priv, buf, NULL, + WSM_START_SCAN_REQ_ID, WSM_CMD_TIMEOUT); + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_stop_scan(struct cw1200_common *priv) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + wsm_cmd_lock(priv); + ret = wsm_cmd_send(priv, buf, NULL, + WSM_STOP_SCAN_REQ_ID, WSM_CMD_TIMEOUT); + wsm_cmd_unlock(priv); + return ret; +} + + +static int wsm_tx_confirm(struct cw1200_common *priv, + struct wsm_buf *buf, + int link_id) +{ + struct wsm_tx_confirm tx_confirm; + + tx_confirm.packet_id = WSM_GET32(buf); + tx_confirm.status = WSM_GET32(buf); + tx_confirm.tx_rate = WSM_GET8(buf); + tx_confirm.ack_failures = WSM_GET8(buf); + tx_confirm.flags = WSM_GET16(buf); + tx_confirm.media_delay = WSM_GET32(buf); + tx_confirm.tx_queue_delay = WSM_GET32(buf); + + cw1200_tx_confirm_cb(priv, link_id, &tx_confirm); + return 0; + +underflow: + WARN_ON(1); + return -EINVAL; +} + +static int wsm_multi_tx_confirm(struct cw1200_common *priv, + struct wsm_buf *buf, int link_id) +{ + int ret; + int count; + int i; + + count = WSM_GET32(buf); + if (WARN_ON(count <= 0)) + return -EINVAL; + + if (count > 1) { + /* We already released one buffer, now for the rest */ + ret = wsm_release_tx_buffer(priv, count - 1); + if (ret < 0) + return ret; + else if (ret > 0) + cw1200_bh_wakeup(priv); + } + + cw1200_debug_txed_multi(priv, count); + for (i = 0; i < count; ++i) { + ret = wsm_tx_confirm(priv, buf, link_id); + if (ret) + return ret; + } + return ret; + +underflow: + WARN_ON(1); + return -EINVAL; +} + +/* ******************************************************************** */ + +static int wsm_join_confirm(struct cw1200_common *priv, + struct wsm_join_cnf *arg, + struct wsm_buf *buf) +{ + arg->status = WSM_GET32(buf); + if (WARN_ON(arg->status) != WSM_STATUS_SUCCESS) + return -EINVAL; + + arg->min_power_level = WSM_GET32(buf); + arg->max_power_level = WSM_GET32(buf); + + return 0; + +underflow: + WARN_ON(1); + return -EINVAL; +} + +int wsm_join(struct cw1200_common *priv, struct wsm_join *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + struct wsm_join_cnf resp; + wsm_cmd_lock(priv); + + WSM_PUT8(buf, arg->mode); + WSM_PUT8(buf, arg->band); + WSM_PUT16(buf, arg->channel_number); + WSM_PUT(buf, &arg->bssid[0], sizeof(arg->bssid)); + WSM_PUT16(buf, arg->atim_window); + WSM_PUT8(buf, arg->preamble_type); + WSM_PUT8(buf, arg->probe_for_join); + WSM_PUT8(buf, arg->dtim_period); + WSM_PUT8(buf, arg->flags); + WSM_PUT32(buf, arg->ssid_len); + WSM_PUT(buf, &arg->ssid[0], sizeof(arg->ssid)); + WSM_PUT32(buf, arg->beacon_interval); + WSM_PUT32(buf, arg->basic_rate_set); + + priv->tx_burst_idx = -1; + ret = wsm_cmd_send(priv, buf, &resp, + WSM_JOIN_REQ_ID, WSM_CMD_TIMEOUT); + /* TODO: Update state based on resp.min|max_power_level */ + + priv->join_complete_status = resp.status; + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_set_bss_params(struct cw1200_common *priv, + const struct wsm_set_bss_params *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + WSM_PUT8(buf, (arg->reset_beacon_loss ? 0x1 : 0)); + WSM_PUT8(buf, arg->beacon_lost_count); + WSM_PUT16(buf, arg->aid); + WSM_PUT32(buf, arg->operational_rate_set); + + ret = wsm_cmd_send(priv, buf, NULL, + WSM_SET_BSS_PARAMS_REQ_ID, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_add_key(struct cw1200_common *priv, const struct wsm_add_key *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + WSM_PUT(buf, arg, sizeof(*arg)); + + ret = wsm_cmd_send(priv, buf, NULL, + WSM_ADD_KEY_REQ_ID, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_remove_key(struct cw1200_common *priv, const struct wsm_remove_key *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + WSM_PUT8(buf, arg->index); + WSM_PUT8(buf, 0); + WSM_PUT16(buf, 0); + + ret = wsm_cmd_send(priv, buf, NULL, + WSM_REMOVE_KEY_REQ_ID, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_set_tx_queue_params(struct cw1200_common *priv, + const struct wsm_set_tx_queue_params *arg, u8 id) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + u8 queue_id_to_wmm_aci[] = {3, 2, 0, 1}; + + wsm_cmd_lock(priv); + + WSM_PUT8(buf, queue_id_to_wmm_aci[id]); + WSM_PUT8(buf, 0); + WSM_PUT8(buf, arg->ackPolicy); + WSM_PUT8(buf, 0); + WSM_PUT32(buf, arg->maxTransmitLifetime); + WSM_PUT16(buf, arg->allowedMediumTime); + WSM_PUT16(buf, 0); + + ret = wsm_cmd_send(priv, buf, NULL, 0x0012, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_set_edca_params(struct cw1200_common *priv, + const struct wsm_edca_params *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + /* Implemented according to specification. */ + + WSM_PUT16(buf, arg->params[3].cwmin); + WSM_PUT16(buf, arg->params[2].cwmin); + WSM_PUT16(buf, arg->params[1].cwmin); + WSM_PUT16(buf, arg->params[0].cwmin); + + WSM_PUT16(buf, arg->params[3].cwmax); + WSM_PUT16(buf, arg->params[2].cwmax); + WSM_PUT16(buf, arg->params[1].cwmax); + WSM_PUT16(buf, arg->params[0].cwmax); + + WSM_PUT8(buf, arg->params[3].aifns); + WSM_PUT8(buf, arg->params[2].aifns); + WSM_PUT8(buf, arg->params[1].aifns); + WSM_PUT8(buf, arg->params[0].aifns); + + WSM_PUT16(buf, arg->params[3].txop_limit); + WSM_PUT16(buf, arg->params[2].txop_limit); + WSM_PUT16(buf, arg->params[1].txop_limit); + WSM_PUT16(buf, arg->params[0].txop_limit); + + WSM_PUT32(buf, arg->params[3].max_rx_lifetime); + WSM_PUT32(buf, arg->params[2].max_rx_lifetime); + WSM_PUT32(buf, arg->params[1].max_rx_lifetime); + WSM_PUT32(buf, arg->params[0].max_rx_lifetime); + + ret = wsm_cmd_send(priv, buf, NULL, + WSM_EDCA_PARAMS_REQ_ID, WSM_CMD_TIMEOUT); + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_switch_channel(struct cw1200_common *priv, + const struct wsm_switch_channel *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + WSM_PUT8(buf, arg->mode); + WSM_PUT8(buf, arg->switch_count); + WSM_PUT16(buf, arg->channel_number); + + priv->channel_switch_in_progress = 1; + + ret = wsm_cmd_send(priv, buf, NULL, + WSM_SWITCH_CHANNEL_REQ_ID, WSM_CMD_TIMEOUT); + if (ret) + priv->channel_switch_in_progress = 0; + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + priv->ps_mode_switch_in_progress = 1; + + wsm_cmd_lock(priv); + + WSM_PUT8(buf, arg->mode); + WSM_PUT8(buf, arg->fast_psm_idle_period); + WSM_PUT8(buf, arg->ap_psm_change_period); + WSM_PUT8(buf, arg->min_auto_pspoll_period); + + ret = wsm_cmd_send(priv, buf, NULL, + WSM_SET_PM_REQ_ID, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_start(struct cw1200_common *priv, const struct wsm_start *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + WSM_PUT8(buf, arg->mode); + WSM_PUT8(buf, arg->band); + WSM_PUT16(buf, arg->channel_number); + WSM_PUT32(buf, arg->ct_window); + WSM_PUT32(buf, arg->beacon_interval); + WSM_PUT8(buf, arg->dtim_period); + WSM_PUT8(buf, arg->preamble); + WSM_PUT8(buf, arg->probe_delay); + WSM_PUT8(buf, arg->ssid_len); + WSM_PUT(buf, arg->ssid, sizeof(arg->ssid)); + WSM_PUT32(buf, arg->basic_rate_set); + + priv->tx_burst_idx = -1; + ret = wsm_cmd_send(priv, buf, NULL, + WSM_START_REQ_ID, WSM_CMD_START_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_beacon_transmit(struct cw1200_common *priv, + const struct wsm_beacon_transmit *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + WSM_PUT32(buf, arg->enable_beaconing ? 1 : 0); + + ret = wsm_cmd_send(priv, buf, NULL, + WSM_BEACON_TRANSMIT_REQ_ID, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_start_find(struct cw1200_common *priv) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + ret = wsm_cmd_send(priv, buf, NULL, 0x0019, WSM_CMD_TIMEOUT); + wsm_cmd_unlock(priv); + return ret; +} + +/* ******************************************************************** */ + +int wsm_stop_find(struct cw1200_common *priv) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + ret = wsm_cmd_send(priv, buf, NULL, 0x001A, WSM_CMD_TIMEOUT); + wsm_cmd_unlock(priv); + return ret; +} + +/* ******************************************************************** */ + +int wsm_map_link(struct cw1200_common *priv, const struct wsm_map_link *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + u16 cmd = 0x001C | WSM_TX_LINK_ID(arg->link_id); + + wsm_cmd_lock(priv); + + WSM_PUT(buf, &arg->mac_addr[0], sizeof(arg->mac_addr)); + WSM_PUT16(buf, 0); + + ret = wsm_cmd_send(priv, buf, NULL, cmd, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_update_ie(struct cw1200_common *priv, + const struct wsm_update_ie *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + WSM_PUT16(buf, arg->what); + WSM_PUT16(buf, arg->count); + WSM_PUT(buf, arg->ies, arg->length); + + ret = wsm_cmd_send(priv, buf, NULL, 0x001B, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ +int wsm_set_probe_responder(struct cw1200_common *priv, bool enable) +{ + priv->rx_filter.probeResponder = enable; + return wsm_set_rx_filter(priv, &priv->rx_filter); +} + +/* ******************************************************************** */ +/* WSM indication events implementation */ +const char * const cw1200_fw_types[] = { + "ETF", + "WFM", + "WSM", + "HI test", + "Platform test" +}; + +static int wsm_startup_indication(struct cw1200_common *priv, + struct wsm_buf *buf) +{ + priv->wsm_caps.input_buffers = WSM_GET16(buf); + priv->wsm_caps.input_buffer_size = WSM_GET16(buf); + priv->wsm_caps.hw_id = WSM_GET16(buf); + priv->wsm_caps.hw_subid = WSM_GET16(buf); + priv->wsm_caps.status = WSM_GET16(buf); + priv->wsm_caps.fw_cap = WSM_GET16(buf); + priv->wsm_caps.fw_type = WSM_GET16(buf); + priv->wsm_caps.fw_api = WSM_GET16(buf); + priv->wsm_caps.fw_build = WSM_GET16(buf); + priv->wsm_caps.fw_ver = WSM_GET16(buf); + WSM_GET(buf, priv->wsm_caps.fw_label, sizeof(priv->wsm_caps.fw_label)); + priv->wsm_caps.fw_label[sizeof(priv->wsm_caps.fw_label) - 1] = 0; /* Do not trust FW too much... */ + + if (WARN_ON(priv->wsm_caps.status)) + return -EINVAL; + + if (WARN_ON(priv->wsm_caps.fw_type > 4)) + return -EINVAL; + + pr_info("CW1200 WSM init done.\n" + " Input buffers: %d x %d bytes\n" + " Hardware: %d.%d\n" + " %s firmware [%s], ver: %d, build: %d," + " api: %d, cap: 0x%.4X\n", + priv->wsm_caps.input_buffers, + priv->wsm_caps.input_buffer_size, + priv->wsm_caps.hw_id, priv->wsm_caps.hw_subid, + cw1200_fw_types[priv->wsm_caps.fw_type], + priv->wsm_caps.fw_label, priv->wsm_caps.fw_ver, + priv->wsm_caps.fw_build, + priv->wsm_caps.fw_api, priv->wsm_caps.fw_cap); + + /* Disable unsupported frequency bands */ + if (!(priv->wsm_caps.fw_cap & 0x1)) + priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL; + if (!(priv->wsm_caps.fw_cap & 0x2)) + priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL; + + priv->firmware_ready = 1; + wake_up(&priv->wsm_startup_done); + return 0; + +underflow: + WARN_ON(1); + return -EINVAL; +} + +static int wsm_receive_indication(struct cw1200_common *priv, + int link_id, + struct wsm_buf *buf, + struct sk_buff **skb_p) +{ + struct wsm_rx rx; + struct ieee80211_hdr *hdr; + size_t hdr_len; + __le16 fctl; + + rx.status = WSM_GET32(buf); + rx.channel_number = WSM_GET16(buf); + rx.rx_rate = WSM_GET8(buf); + rx.rcpi_rssi = WSM_GET8(buf); + rx.flags = WSM_GET32(buf); + + /* FW Workaround: Drop probe resp or + beacon when RSSI is 0 + */ + hdr = (struct ieee80211_hdr *)(*skb_p)->data; + + if (!rx.rcpi_rssi && + (ieee80211_is_probe_resp(hdr->frame_control) || + ieee80211_is_beacon(hdr->frame_control))) + return 0; + + /* If no RSSI subscription has been made, + * convert RCPI to RSSI here + */ + if (!priv->cqm_use_rssi) + rx.rcpi_rssi = rx.rcpi_rssi / 2 - 110; + + fctl = *(__le16 *)buf->data; + hdr_len = buf->data - buf->begin; + skb_pull(*skb_p, hdr_len); + if (!rx.status && ieee80211_is_deauth(fctl)) { + if (priv->join_status == CW1200_JOIN_STATUS_STA) { + /* Shedule unjoin work */ + pr_debug("[WSM] Issue unjoin command (RX).\n"); + wsm_lock_tx_async(priv); + if (queue_work(priv->workqueue, + &priv->unjoin_work) <= 0) + wsm_unlock_tx(priv); + } + } + cw1200_rx_cb(priv, &rx, link_id, skb_p); + if (*skb_p) + skb_push(*skb_p, hdr_len); + + return 0; + +underflow: + return -EINVAL; +} + +static int wsm_event_indication(struct cw1200_common *priv, struct wsm_buf *buf) +{ + int first; + struct cw1200_wsm_event *event; + + if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) { + /* STA is stopped. */ + return 0; + } + + event = kzalloc(sizeof(struct cw1200_wsm_event), GFP_KERNEL); + if (!event) + return -ENOMEM; + + event->evt.id = __le32_to_cpu(WSM_GET32(buf)); + event->evt.data = __le32_to_cpu(WSM_GET32(buf)); + + pr_debug("[WSM] Event: %d(%d)\n", + event->evt.id, event->evt.data); + + spin_lock(&priv->event_queue_lock); + first = list_empty(&priv->event_queue); + list_add_tail(&event->link, &priv->event_queue); + spin_unlock(&priv->event_queue_lock); + + if (first) + queue_work(priv->workqueue, &priv->event_handler); + + return 0; + +underflow: + kfree(event); + return -EINVAL; +} + +static int wsm_channel_switch_indication(struct cw1200_common *priv, + struct wsm_buf *buf) +{ + WARN_ON(WSM_GET32(buf)); + + priv->channel_switch_in_progress = 0; + wake_up(&priv->channel_switch_done); + + wsm_unlock_tx(priv); + + return 0; + +underflow: + return -EINVAL; +} + +static int wsm_set_pm_indication(struct cw1200_common *priv, + struct wsm_buf *buf) +{ + /* TODO: Check buf (struct wsm_set_pm_complete) for validity */ + if (priv->ps_mode_switch_in_progress) { + priv->ps_mode_switch_in_progress = 0; + wake_up(&priv->ps_mode_switch_done); + } + return 0; +} + +static int wsm_scan_started(struct cw1200_common *priv, void *arg, + struct wsm_buf *buf) +{ + u32 status = WSM_GET32(buf); + if (status != WSM_STATUS_SUCCESS) { + cw1200_scan_failed_cb(priv); + return -EINVAL; + } + return 0; + +underflow: + WARN_ON(1); + return -EINVAL; +} + +static int wsm_scan_complete_indication(struct cw1200_common *priv, + struct wsm_buf *buf) +{ + struct wsm_scan_complete arg; + arg.status = WSM_GET32(buf); + arg.psm = WSM_GET8(buf); + arg.num_channels = WSM_GET8(buf); + cw1200_scan_complete_cb(priv, &arg); + + return 0; + +underflow: + return -EINVAL; +} + +static int wsm_join_complete_indication(struct cw1200_common *priv, + struct wsm_buf *buf) +{ + struct wsm_join_complete arg; + arg.status = WSM_GET32(buf); + pr_debug("[WSM] Join complete indication, status: %d\n", arg.status); + cw1200_join_complete_cb(priv, &arg); + + return 0; + +underflow: + return -EINVAL; +} + +static int wsm_find_complete_indication(struct cw1200_common *priv, + struct wsm_buf *buf) +{ + pr_warn("Implement find_complete_indication\n"); + return 0; +} + +static int wsm_ba_timeout_indication(struct cw1200_common *priv, + struct wsm_buf *buf) +{ + u32 dummy; + u8 tid; + u8 dummy2; + u8 addr[ETH_ALEN]; + + dummy = WSM_GET32(buf); + tid = WSM_GET8(buf); + dummy2 = WSM_GET8(buf); + WSM_GET(buf, addr, ETH_ALEN); + + pr_info("BlockACK timeout, tid %d, addr %pM\n", + tid, addr); + + return 0; + +underflow: + return -EINVAL; +} + +static int wsm_suspend_resume_indication(struct cw1200_common *priv, + int link_id, struct wsm_buf *buf) +{ + u32 flags; + struct wsm_suspend_resume arg; + + flags = WSM_GET32(buf); + arg.link_id = link_id; + arg.stop = !(flags & 1); + arg.multicast = !!(flags & 8); + arg.queue = (flags >> 1) & 3; + + cw1200_suspend_resume(priv, &arg); + + return 0; + +underflow: + return -EINVAL; +} + + +/* ******************************************************************** */ +/* WSM TX */ + +static int wsm_cmd_send(struct cw1200_common *priv, + struct wsm_buf *buf, + void *arg, u16 cmd, long tmo) +{ + size_t buf_len = buf->data - buf->begin; + int ret; + + /* Don't bother if we're dead. */ + if (priv->bh_error) { + ret = 0; + goto done; + } + + /* Block until the cmd buffer is completed. Tortuous. */ + spin_lock(&priv->wsm_cmd.lock); + while (!priv->wsm_cmd.done) { + spin_unlock(&priv->wsm_cmd.lock); + spin_lock(&priv->wsm_cmd.lock); + } + priv->wsm_cmd.done = 0; + spin_unlock(&priv->wsm_cmd.lock); + + if (cmd == WSM_WRITE_MIB_REQ_ID || + cmd == WSM_READ_MIB_REQ_ID) + pr_debug("[WSM] >>> 0x%.4X [MIB: 0x%.4X] (%zu)\n", + cmd, __le16_to_cpu(((__le16 *)buf->begin)[2]), + buf_len); + else + pr_debug("[WSM] >>> 0x%.4X (%zu)\n", cmd, buf_len); + + /* Due to buggy SPI on CW1200, we need to + * pad the message by a few bytes to ensure + * that it's completely received. + */ + buf_len += 4; + + /* Fill HI message header */ + /* BH will add sequence number */ + ((__le16 *)buf->begin)[0] = __cpu_to_le16(buf_len); + ((__le16 *)buf->begin)[1] = __cpu_to_le16(cmd); + + spin_lock(&priv->wsm_cmd.lock); + BUG_ON(priv->wsm_cmd.ptr); + priv->wsm_cmd.ptr = buf->begin; + priv->wsm_cmd.len = buf_len; + priv->wsm_cmd.arg = arg; + priv->wsm_cmd.cmd = cmd; + spin_unlock(&priv->wsm_cmd.lock); + + cw1200_bh_wakeup(priv); + + /* Wait for command completion */ + ret = wait_event_timeout(priv->wsm_cmd_wq, + priv->wsm_cmd.done, tmo); + + if (!ret && !priv->wsm_cmd.done) { + spin_lock(&priv->wsm_cmd.lock); + priv->wsm_cmd.done = 1; + priv->wsm_cmd.ptr = NULL; + spin_unlock(&priv->wsm_cmd.lock); + if (priv->bh_error) { + /* Return ok to help system cleanup */ + ret = 0; + } else { + pr_err("CMD req (0x%04x) stuck in firmware, killing BH\n", priv->wsm_cmd.cmd); + print_hex_dump_bytes("REQDUMP: ", DUMP_PREFIX_NONE, + buf->begin, buf_len); + pr_err("Outstanding outgoing frames: %d\n", priv->hw_bufs_used); + + /* Kill BH thread to report the error to the top layer. */ + atomic_add(1, &priv->bh_term); + wake_up(&priv->bh_wq); + ret = -ETIMEDOUT; + } + } else { + spin_lock(&priv->wsm_cmd.lock); + BUG_ON(!priv->wsm_cmd.done); + ret = priv->wsm_cmd.ret; + spin_unlock(&priv->wsm_cmd.lock); + } +done: + wsm_buf_reset(buf); + return ret; +} + +/* ******************************************************************** */ +/* WSM TX port control */ + +void wsm_lock_tx(struct cw1200_common *priv) +{ + wsm_cmd_lock(priv); + if (atomic_add_return(1, &priv->tx_lock) == 1) { + if (wsm_flush_tx(priv)) + pr_debug("[WSM] TX is locked.\n"); + } + wsm_cmd_unlock(priv); +} + +void wsm_lock_tx_async(struct cw1200_common *priv) +{ + if (atomic_add_return(1, &priv->tx_lock) == 1) + pr_debug("[WSM] TX is locked (async).\n"); +} + +bool wsm_flush_tx(struct cw1200_common *priv) +{ + unsigned long timestamp = jiffies; + bool pending = false; + long timeout; + int i; + + /* Flush must be called with TX lock held. */ + BUG_ON(!atomic_read(&priv->tx_lock)); + + /* First check if we really need to do something. + * It is safe to use unprotected access, as hw_bufs_used + * can only decrements. + */ + if (!priv->hw_bufs_used) + return true; + + if (priv->bh_error) { + /* In case of failure do not wait for magic. */ + pr_err("[WSM] Fatal error occured, will not flush TX.\n"); + return false; + } else { + /* Get a timestamp of "oldest" frame */ + for (i = 0; i < 4; ++i) + pending |= cw1200_queue_get_xmit_timestamp( + &priv->tx_queue[i], + ×tamp, 0xffffffff); + /* If there's nothing pending, we're good */ + if (!pending) + return true; + + timeout = timestamp + WSM_CMD_LAST_CHANCE_TIMEOUT - jiffies; + if (timeout < 0 || wait_event_timeout(priv->bh_evt_wq, + !priv->hw_bufs_used, + timeout) <= 0) { + /* Hmmm... Not good. Frame had stuck in firmware. */ + priv->bh_error = 1; + wiphy_err(priv->hw->wiphy, "[WSM] TX Frames (%d) stuck in firmware, killing BH\n", priv->hw_bufs_used); + wake_up(&priv->bh_wq); + return false; + } + + /* Ok, everything is flushed. */ + return true; + } +} + +void wsm_unlock_tx(struct cw1200_common *priv) +{ + int tx_lock; + tx_lock = atomic_sub_return(1, &priv->tx_lock); + BUG_ON(tx_lock < 0); + + if (tx_lock == 0) { + if (!priv->bh_error) + cw1200_bh_wakeup(priv); + pr_debug("[WSM] TX is unlocked.\n"); + } +} + +/* ******************************************************************** */ +/* WSM RX */ + +int wsm_handle_exception(struct cw1200_common *priv, u8 *data, size_t len) +{ + struct wsm_buf buf; + u32 reason; + u32 reg[18]; + char fname[48]; + unsigned int i; + + static const char * const reason_str[] = { + "undefined instruction", + "prefetch abort", + "data abort", + "unknown error", + }; + + buf.begin = buf.data = data; + buf.end = &buf.begin[len]; + + reason = WSM_GET32(&buf); + for (i = 0; i < ARRAY_SIZE(reg); ++i) + reg[i] = WSM_GET32(&buf); + WSM_GET(&buf, fname, sizeof(fname)); + + if (reason < 4) + wiphy_err(priv->hw->wiphy, + "Firmware exception: %s.\n", + reason_str[reason]); + else + wiphy_err(priv->hw->wiphy, + "Firmware assert at %.*s, line %d\n", + (int) sizeof(fname), fname, reg[1]); + + for (i = 0; i < 12; i += 4) + wiphy_err(priv->hw->wiphy, + "R%d: 0x%.8X, R%d: 0x%.8X, R%d: 0x%.8X, R%d: 0x%.8X,\n", + i + 0, reg[i + 0], i + 1, reg[i + 1], + i + 2, reg[i + 2], i + 3, reg[i + 3]); + wiphy_err(priv->hw->wiphy, + "R12: 0x%.8X, SP: 0x%.8X, LR: 0x%.8X, PC: 0x%.8X,\n", + reg[i + 0], reg[i + 1], reg[i + 2], reg[i + 3]); + i += 4; + wiphy_err(priv->hw->wiphy, + "CPSR: 0x%.8X, SPSR: 0x%.8X\n", + reg[i + 0], reg[i + 1]); + + print_hex_dump_bytes("R1: ", DUMP_PREFIX_NONE, + fname, sizeof(fname)); + return 0; + +underflow: + wiphy_err(priv->hw->wiphy, "Firmware exception.\n"); + print_hex_dump_bytes("Exception: ", DUMP_PREFIX_NONE, + data, len); + return -EINVAL; +} + +int wsm_handle_rx(struct cw1200_common *priv, u16 id, + struct wsm_hdr *wsm, struct sk_buff **skb_p) +{ + int ret = 0; + struct wsm_buf wsm_buf; + int link_id = (id >> 6) & 0x0F; + + /* Strip link id. */ + id &= ~WSM_TX_LINK_ID(WSM_TX_LINK_ID_MAX); + + wsm_buf.begin = (u8 *)&wsm[0]; + wsm_buf.data = (u8 *)&wsm[1]; + wsm_buf.end = &wsm_buf.begin[__le32_to_cpu(wsm->len)]; + + pr_debug("[WSM] <<< 0x%.4X (%td)\n", id, + wsm_buf.end - wsm_buf.begin); + + if (id == WSM_TX_CONFIRM_IND_ID) { + ret = wsm_tx_confirm(priv, &wsm_buf, link_id); + } else if (id == WSM_MULTI_TX_CONFIRM_ID) { + ret = wsm_multi_tx_confirm(priv, &wsm_buf, link_id); + } else if (id & 0x0400) { + void *wsm_arg; + u16 wsm_cmd; + + /* Do not trust FW too much. Protection against repeated + * response and race condition removal (see above). + */ + spin_lock(&priv->wsm_cmd.lock); + wsm_arg = priv->wsm_cmd.arg; + wsm_cmd = priv->wsm_cmd.cmd & + ~WSM_TX_LINK_ID(WSM_TX_LINK_ID_MAX); + priv->wsm_cmd.cmd = 0xFFFF; + spin_unlock(&priv->wsm_cmd.lock); + + if (WARN_ON((id & ~0x0400) != wsm_cmd)) { + /* Note that any non-zero is a fatal retcode. */ + ret = -EINVAL; + goto out; + } + + /* Note that wsm_arg can be NULL in case of timeout in + * wsm_cmd_send(). + */ + + switch (id) { + case WSM_READ_MIB_RESP_ID: + if (wsm_arg) + ret = wsm_read_mib_confirm(priv, wsm_arg, + &wsm_buf); + break; + case WSM_WRITE_MIB_RESP_ID: + if (wsm_arg) + ret = wsm_write_mib_confirm(priv, wsm_arg, + &wsm_buf); + break; + case WSM_START_SCAN_RESP_ID: + if (wsm_arg) + ret = wsm_scan_started(priv, wsm_arg, &wsm_buf); + break; + case WSM_CONFIGURATION_RESP_ID: + if (wsm_arg) + ret = wsm_configuration_confirm(priv, wsm_arg, + &wsm_buf); + break; + case WSM_JOIN_RESP_ID: + if (wsm_arg) + ret = wsm_join_confirm(priv, wsm_arg, &wsm_buf); + break; + case WSM_STOP_SCAN_RESP_ID: + case WSM_RESET_RESP_ID: + case WSM_ADD_KEY_RESP_ID: + case WSM_REMOVE_KEY_RESP_ID: + case WSM_SET_PM_RESP_ID: + case WSM_SET_BSS_PARAMS_RESP_ID: + case 0x0412: /* set_tx_queue_params */ + case WSM_EDCA_PARAMS_RESP_ID: + case WSM_SWITCH_CHANNEL_RESP_ID: + case WSM_START_RESP_ID: + case WSM_BEACON_TRANSMIT_RESP_ID: + case 0x0419: /* start_find */ + case 0x041A: /* stop_find */ + case 0x041B: /* update_ie */ + case 0x041C: /* map_link */ + WARN_ON(wsm_arg != NULL); + ret = wsm_generic_confirm(priv, wsm_arg, &wsm_buf); + if (ret) { + wiphy_warn(priv->hw->wiphy, + "wsm_generic_confirm failed for request 0x%04x.\n", + id & ~0x0400); + + /* often 0x407 and 0x410 occur, this means we're dead.. */ + if (priv->join_status >= CW1200_JOIN_STATUS_JOINING) { + wsm_lock_tx(priv); + if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0) + wsm_unlock_tx(priv); + } + } + break; + default: + wiphy_warn(priv->hw->wiphy, + "Unrecognized confirmation 0x%04x\n", + id & ~0x0400); + } + + spin_lock(&priv->wsm_cmd.lock); + priv->wsm_cmd.ret = ret; + priv->wsm_cmd.done = 1; + spin_unlock(&priv->wsm_cmd.lock); + + ret = 0; /* Error response from device should ne stop BH. */ + + wake_up(&priv->wsm_cmd_wq); + } else if (id & 0x0800) { + switch (id) { + case WSM_STARTUP_IND_ID: + ret = wsm_startup_indication(priv, &wsm_buf); + break; + case WSM_RECEIVE_IND_ID: + ret = wsm_receive_indication(priv, link_id, + &wsm_buf, skb_p); + break; + case 0x0805: + ret = wsm_event_indication(priv, &wsm_buf); + break; + case WSM_SCAN_COMPLETE_IND_ID: + ret = wsm_scan_complete_indication(priv, &wsm_buf); + break; + case 0x0808: + ret = wsm_ba_timeout_indication(priv, &wsm_buf); + break; + case 0x0809: + ret = wsm_set_pm_indication(priv, &wsm_buf); + break; + case 0x080A: + ret = wsm_channel_switch_indication(priv, &wsm_buf); + break; + case 0x080B: + ret = wsm_find_complete_indication(priv, &wsm_buf); + break; + case 0x080C: + ret = wsm_suspend_resume_indication(priv, + link_id, &wsm_buf); + break; + case 0x080F: + ret = wsm_join_complete_indication(priv, &wsm_buf); + break; + default: + pr_warn("Unrecognised WSM ID %04x\n", id); + } + } else { + WARN_ON(1); + ret = -EINVAL; + } +out: + return ret; +} + +static bool wsm_handle_tx_data(struct cw1200_common *priv, + struct wsm_tx *wsm, + const struct ieee80211_tx_info *tx_info, + const struct cw1200_txpriv *txpriv, + struct cw1200_queue *queue) +{ + bool handled = false; + const struct ieee80211_hdr *frame = + (struct ieee80211_hdr *)&((u8 *)wsm)[txpriv->offset]; + __le16 fctl = frame->frame_control; + enum { + do_probe, + do_drop, + do_wep, + do_tx, + } action = do_tx; + + switch (priv->mode) { + case NL80211_IFTYPE_STATION: + if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) + action = do_tx; + else if (priv->join_status < CW1200_JOIN_STATUS_PRE_STA) + action = do_drop; + break; + case NL80211_IFTYPE_AP: + if (!priv->join_status) { + action = do_drop; + } else if (!(BIT(txpriv->raw_link_id) & + (BIT(0) | priv->link_id_map))) { + wiphy_warn(priv->hw->wiphy, + "A frame with expired link id is dropped.\n"); + action = do_drop; + } + if (cw1200_queue_get_generation(wsm->packet_id) > + CW1200_MAX_REQUEUE_ATTEMPTS) { + /* HACK!!! WSM324 firmware has tendency to requeue + * multicast frames in a loop, causing performance + * drop and high power consumption of the driver. + * In this situation it is better just to drop + * the problematic frame. + */ + wiphy_warn(priv->hw->wiphy, + "Too many attempts to requeue a frame; dropped.\n"); + action = do_drop; + } + break; + case NL80211_IFTYPE_ADHOC: + if (priv->join_status != CW1200_JOIN_STATUS_IBSS) + action = do_drop; + break; + case NL80211_IFTYPE_MESH_POINT: + action = do_tx; /* TODO: Test me! */ + break; + case NL80211_IFTYPE_MONITOR: + default: + action = do_drop; + break; + } + + if (action == do_tx) { + if (ieee80211_is_nullfunc(fctl)) { + spin_lock(&priv->bss_loss_lock); + if (priv->bss_loss_state) { + priv->bss_loss_confirm_id = wsm->packet_id; + wsm->queue_id = WSM_QUEUE_VOICE; + } + spin_unlock(&priv->bss_loss_lock); + } else if (ieee80211_is_probe_req(fctl)) { + action = do_probe; + } else if (ieee80211_is_deauth(fctl) && + priv->mode != NL80211_IFTYPE_AP) { + pr_debug("[WSM] Issue unjoin command due to tx deauth.\n"); + wsm_lock_tx_async(priv); + if (queue_work(priv->workqueue, + &priv->unjoin_work) <= 0) + wsm_unlock_tx(priv); + } else if (ieee80211_has_protected(fctl) && + tx_info->control.hw_key && + tx_info->control.hw_key->keyidx != priv->wep_default_key_id && + (tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_WEP40 || + tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_WEP104)) { + action = do_wep; + } + } + + switch (action) { + case do_probe: + /* An interesting FW "feature". Device filters probe responses. + * The easiest way to get it back is to convert + * probe request into WSM start_scan command. + */ + pr_debug("[WSM] Convert probe request to scan.\n"); + wsm_lock_tx_async(priv); + priv->pending_frame_id = __le32_to_cpu(wsm->packet_id); + if (queue_delayed_work(priv->workqueue, + &priv->scan.probe_work, 0) <= 0) + wsm_unlock_tx(priv); + handled = true; + break; + case do_drop: + pr_debug("[WSM] Drop frame (0x%.4X).\n", fctl); + BUG_ON(cw1200_queue_remove(queue, + __le32_to_cpu(wsm->packet_id))); + handled = true; + break; + case do_wep: + pr_debug("[WSM] Issue set_default_wep_key.\n"); + wsm_lock_tx_async(priv); + priv->wep_default_key_id = tx_info->control.hw_key->keyidx; + priv->pending_frame_id = __le32_to_cpu(wsm->packet_id); + if (queue_work(priv->workqueue, &priv->wep_key_work) <= 0) + wsm_unlock_tx(priv); + handled = true; + break; + case do_tx: + pr_debug("[WSM] Transmit frame.\n"); + break; + default: + /* Do nothing */ + break; + } + return handled; +} + +static int cw1200_get_prio_queue(struct cw1200_common *priv, + u32 link_id_map, int *total) +{ + static const int urgent = BIT(CW1200_LINK_ID_AFTER_DTIM) | + BIT(CW1200_LINK_ID_UAPSD); + struct wsm_edca_queue_params *edca; + unsigned score, best = -1; + int winner = -1; + int queued; + int i; + + /* search for a winner using edca params */ + for (i = 0; i < 4; ++i) { + queued = cw1200_queue_get_num_queued(&priv->tx_queue[i], + link_id_map); + if (!queued) + continue; + *total += queued; + edca = &priv->edca.params[i]; + score = ((edca->aifns + edca->cwmin) << 16) + + ((edca->cwmax - edca->cwmin) * + (get_random_int() & 0xFFFF)); + if (score < best && (winner < 0 || i != 3)) { + best = score; + winner = i; + } + } + + /* override winner if bursting */ + if (winner >= 0 && priv->tx_burst_idx >= 0 && + winner != priv->tx_burst_idx && + !cw1200_queue_get_num_queued( + &priv->tx_queue[winner], + link_id_map & urgent) && + cw1200_queue_get_num_queued( + &priv->tx_queue[priv->tx_burst_idx], + link_id_map)) + winner = priv->tx_burst_idx; + + return winner; +} + +static int wsm_get_tx_queue_and_mask(struct cw1200_common *priv, + struct cw1200_queue **queue_p, + u32 *tx_allowed_mask_p, + bool *more) +{ + int idx; + u32 tx_allowed_mask; + int total = 0; + + /* Search for a queue with multicast frames buffered */ + if (priv->tx_multicast) { + tx_allowed_mask = BIT(CW1200_LINK_ID_AFTER_DTIM); + idx = cw1200_get_prio_queue(priv, + tx_allowed_mask, &total); + if (idx >= 0) { + *more = total > 1; + goto found; + } + } + + /* Search for unicast traffic */ + tx_allowed_mask = ~priv->sta_asleep_mask; + tx_allowed_mask |= BIT(CW1200_LINK_ID_UAPSD); + if (priv->sta_asleep_mask) { + tx_allowed_mask |= priv->pspoll_mask; + tx_allowed_mask &= ~BIT(CW1200_LINK_ID_AFTER_DTIM); + } else { + tx_allowed_mask |= BIT(CW1200_LINK_ID_AFTER_DTIM); + } + idx = cw1200_get_prio_queue(priv, + tx_allowed_mask, &total); + if (idx < 0) + return -ENOENT; + +found: + *queue_p = &priv->tx_queue[idx]; + *tx_allowed_mask_p = tx_allowed_mask; + return 0; +} + +int wsm_get_tx(struct cw1200_common *priv, u8 **data, + size_t *tx_len, int *burst) +{ + struct wsm_tx *wsm = NULL; + struct ieee80211_tx_info *tx_info; + struct cw1200_queue *queue = NULL; + int queue_num; + u32 tx_allowed_mask = 0; + const struct cw1200_txpriv *txpriv = NULL; + int count = 0; + + /* More is used only for broadcasts. */ + bool more = false; + + if (priv->wsm_cmd.ptr) { /* CMD request */ + ++count; + spin_lock(&priv->wsm_cmd.lock); + BUG_ON(!priv->wsm_cmd.ptr); + *data = priv->wsm_cmd.ptr; + *tx_len = priv->wsm_cmd.len; + *burst = 1; + spin_unlock(&priv->wsm_cmd.lock); + } else { + for (;;) { + int ret; + + if (atomic_add_return(0, &priv->tx_lock)) + break; + + spin_lock_bh(&priv->ps_state_lock); + + ret = wsm_get_tx_queue_and_mask(priv, &queue, + &tx_allowed_mask, &more); + queue_num = queue - priv->tx_queue; + + if (priv->buffered_multicasts && + (ret || !more) && + (priv->tx_multicast || !priv->sta_asleep_mask)) { + priv->buffered_multicasts = false; + if (priv->tx_multicast) { + priv->tx_multicast = false; + queue_work(priv->workqueue, + &priv->multicast_stop_work); + } + } + + spin_unlock_bh(&priv->ps_state_lock); + + if (ret) + break; + + if (cw1200_queue_get(queue, + tx_allowed_mask, + &wsm, &tx_info, &txpriv)) + continue; + + if (wsm_handle_tx_data(priv, wsm, + tx_info, txpriv, queue)) + continue; /* Handled by WSM */ + + wsm->hdr.id &= __cpu_to_le16( + ~WSM_TX_LINK_ID(WSM_TX_LINK_ID_MAX)); + wsm->hdr.id |= cpu_to_le16( + WSM_TX_LINK_ID(txpriv->raw_link_id)); + priv->pspoll_mask &= ~BIT(txpriv->raw_link_id); + + *data = (u8 *)wsm; + *tx_len = __le16_to_cpu(wsm->hdr.len); + + /* allow bursting if txop is set */ + if (priv->edca.params[queue_num].txop_limit) + *burst = min(*burst, + (int)cw1200_queue_get_num_queued(queue, tx_allowed_mask) + 1); + else + *burst = 1; + + /* store index of bursting queue */ + if (*burst > 1) + priv->tx_burst_idx = queue_num; + else + priv->tx_burst_idx = -1; + + if (more) { + struct ieee80211_hdr *hdr = + (struct ieee80211_hdr *) + &((u8 *)wsm)[txpriv->offset]; + /* more buffered multicast/broadcast frames + * ==> set MoreData flag in IEEE 802.11 header + * to inform PS STAs + */ + hdr->frame_control |= + cpu_to_le16(IEEE80211_FCTL_MOREDATA); + } + + pr_debug("[WSM] >>> 0x%.4X (%zu) %p %c\n", + 0x0004, *tx_len, *data, + wsm->more ? 'M' : ' '); + ++count; + break; + } + } + + return count; +} + +void wsm_txed(struct cw1200_common *priv, u8 *data) +{ + if (data == priv->wsm_cmd.ptr) { + spin_lock(&priv->wsm_cmd.lock); + priv->wsm_cmd.ptr = NULL; + spin_unlock(&priv->wsm_cmd.lock); + } +} + +/* ******************************************************************** */ +/* WSM buffer */ + +void wsm_buf_init(struct wsm_buf *buf) +{ + BUG_ON(buf->begin); + buf->begin = kmalloc(FWLOAD_BLOCK_SIZE, GFP_KERNEL | GFP_DMA); + buf->end = buf->begin ? &buf->begin[FWLOAD_BLOCK_SIZE] : buf->begin; + wsm_buf_reset(buf); +} + +void wsm_buf_deinit(struct wsm_buf *buf) +{ + kfree(buf->begin); + buf->begin = buf->data = buf->end = NULL; +} + +static void wsm_buf_reset(struct wsm_buf *buf) +{ + if (buf->begin) { + buf->data = &buf->begin[4]; + *(u32 *)buf->begin = 0; + } else { + buf->data = buf->begin; + } +} + +static int wsm_buf_reserve(struct wsm_buf *buf, size_t extra_size) +{ + size_t pos = buf->data - buf->begin; + size_t size = pos + extra_size; + + size = round_up(size, FWLOAD_BLOCK_SIZE); + + buf->begin = krealloc(buf->begin, size, GFP_KERNEL | GFP_DMA); + if (buf->begin) { + buf->data = &buf->begin[pos]; + buf->end = &buf->begin[size]; + return 0; + } else { + buf->end = buf->data = buf->begin; + return -ENOMEM; + } +} diff --git a/drivers/net/wireless/cw1200/wsm.h b/drivers/net/wireless/cw1200/wsm.h new file mode 100644 index 000000000000..2816171f7a1d --- /dev/null +++ b/drivers/net/wireless/cw1200/wsm.h @@ -0,0 +1,1873 @@ +/* + * WSM host interface (HI) interface for ST-Ericsson CW1200 mac80211 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * Based on CW1200 UMAC WSM API, which is + * Copyright (C) ST-Ericsson SA 2010 + * Author: Stewart Mathers <stewart.mathers@stericsson.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CW1200_WSM_H_INCLUDED +#define CW1200_WSM_H_INCLUDED + +#include <linux/spinlock.h> + +struct cw1200_common; + +/* Bands */ +/* Radio band 2.412 -2.484 GHz. */ +#define WSM_PHY_BAND_2_4G (0) + +/* Radio band 4.9375-5.8250 GHz. */ +#define WSM_PHY_BAND_5G (1) + +/* Transmit rates */ +/* 1 Mbps ERP-DSSS */ +#define WSM_TRANSMIT_RATE_1 (0) + +/* 2 Mbps ERP-DSSS */ +#define WSM_TRANSMIT_RATE_2 (1) + +/* 5.5 Mbps ERP-CCK */ +#define WSM_TRANSMIT_RATE_5 (2) + +/* 11 Mbps ERP-CCK */ +#define WSM_TRANSMIT_RATE_11 (3) + +/* 22 Mbps ERP-PBCC (Not supported) */ +/* #define WSM_TRANSMIT_RATE_22 (4) */ + +/* 33 Mbps ERP-PBCC (Not supported) */ +/* #define WSM_TRANSMIT_RATE_33 (5) */ + +/* 6 Mbps (3 Mbps) ERP-OFDM, BPSK coding rate 1/2 */ +#define WSM_TRANSMIT_RATE_6 (6) + +/* 9 Mbps (4.5 Mbps) ERP-OFDM, BPSK coding rate 3/4 */ +#define WSM_TRANSMIT_RATE_9 (7) + +/* 12 Mbps (6 Mbps) ERP-OFDM, QPSK coding rate 1/2 */ +#define WSM_TRANSMIT_RATE_12 (8) + +/* 18 Mbps (9 Mbps) ERP-OFDM, QPSK coding rate 3/4 */ +#define WSM_TRANSMIT_RATE_18 (9) + +/* 24 Mbps (12 Mbps) ERP-OFDM, 16QAM coding rate 1/2 */ +#define WSM_TRANSMIT_RATE_24 (10) + +/* 36 Mbps (18 Mbps) ERP-OFDM, 16QAM coding rate 3/4 */ +#define WSM_TRANSMIT_RATE_36 (11) + +/* 48 Mbps (24 Mbps) ERP-OFDM, 64QAM coding rate 1/2 */ +#define WSM_TRANSMIT_RATE_48 (12) + +/* 54 Mbps (27 Mbps) ERP-OFDM, 64QAM coding rate 3/4 */ +#define WSM_TRANSMIT_RATE_54 (13) + +/* 6.5 Mbps HT-OFDM, BPSK coding rate 1/2 */ +#define WSM_TRANSMIT_RATE_HT_6 (14) + +/* 13 Mbps HT-OFDM, QPSK coding rate 1/2 */ +#define WSM_TRANSMIT_RATE_HT_13 (15) + +/* 19.5 Mbps HT-OFDM, QPSK coding rate 3/4 */ +#define WSM_TRANSMIT_RATE_HT_19 (16) + +/* 26 Mbps HT-OFDM, 16QAM coding rate 1/2 */ +#define WSM_TRANSMIT_RATE_HT_26 (17) + +/* 39 Mbps HT-OFDM, 16QAM coding rate 3/4 */ +#define WSM_TRANSMIT_RATE_HT_39 (18) + +/* 52 Mbps HT-OFDM, 64QAM coding rate 2/3 */ +#define WSM_TRANSMIT_RATE_HT_52 (19) + +/* 58.5 Mbps HT-OFDM, 64QAM coding rate 3/4 */ +#define WSM_TRANSMIT_RATE_HT_58 (20) + +/* 65 Mbps HT-OFDM, 64QAM coding rate 5/6 */ +#define WSM_TRANSMIT_RATE_HT_65 (21) + +/* Scan types */ +/* Foreground scan */ +#define WSM_SCAN_TYPE_FOREGROUND (0) + +/* Background scan */ +#define WSM_SCAN_TYPE_BACKGROUND (1) + +/* Auto scan */ +#define WSM_SCAN_TYPE_AUTO (2) + +/* Scan flags */ +/* Forced background scan means if the station cannot */ +/* enter the power-save mode, it shall force to perform a */ +/* background scan. Only valid when ScanType is */ +/* background scan. */ +#define WSM_SCAN_FLAG_FORCE_BACKGROUND (BIT(0)) + +/* The WLAN device scans one channel at a time so */ +/* that disturbance to the data traffic is minimized. */ +#define WSM_SCAN_FLAG_SPLIT_METHOD (BIT(1)) + +/* Preamble Type. Long if not set. */ +#define WSM_SCAN_FLAG_SHORT_PREAMBLE (BIT(2)) + +/* 11n Tx Mode. Mixed if not set. */ +#define WSM_SCAN_FLAG_11N_GREENFIELD (BIT(3)) + +/* Scan constraints */ +/* Maximum number of channels to be scanned. */ +#define WSM_SCAN_MAX_NUM_OF_CHANNELS (48) + +/* The maximum number of SSIDs that the device can scan for. */ +#define WSM_SCAN_MAX_NUM_OF_SSIDS (2) + +/* Power management modes */ +/* 802.11 Active mode */ +#define WSM_PSM_ACTIVE (0) + +/* 802.11 PS mode */ +#define WSM_PSM_PS BIT(0) + +/* Fast Power Save bit */ +#define WSM_PSM_FAST_PS_FLAG BIT(7) + +/* Dynamic aka Fast power save */ +#define WSM_PSM_FAST_PS (BIT(0) | BIT(7)) + +/* Undetermined */ +/* Note : Undetermined status is reported when the */ +/* NULL data frame used to advertise the PM mode to */ +/* the AP at Pre or Post Background Scan is not Acknowledged */ +#define WSM_PSM_UNKNOWN BIT(1) + +/* Queue IDs */ +/* best effort/legacy */ +#define WSM_QUEUE_BEST_EFFORT (0) + +/* background */ +#define WSM_QUEUE_BACKGROUND (1) + +/* video */ +#define WSM_QUEUE_VIDEO (2) + +/* voice */ +#define WSM_QUEUE_VOICE (3) + +/* HT TX parameters */ +/* Non-HT */ +#define WSM_HT_TX_NON_HT (0) + +/* Mixed format */ +#define WSM_HT_TX_MIXED (1) + +/* Greenfield format */ +#define WSM_HT_TX_GREENFIELD (2) + +/* STBC allowed */ +#define WSM_HT_TX_STBC (BIT(7)) + +/* EPTA prioirty flags for BT Coex */ +/* default epta priority */ +#define WSM_EPTA_PRIORITY_DEFAULT 4 +/* use for normal data */ +#define WSM_EPTA_PRIORITY_DATA 4 +/* use for connect/disconnect/roaming*/ +#define WSM_EPTA_PRIORITY_MGT 5 +/* use for action frames */ +#define WSM_EPTA_PRIORITY_ACTION 5 +/* use for AC_VI data */ +#define WSM_EPTA_PRIORITY_VIDEO 5 +/* use for AC_VO data */ +#define WSM_EPTA_PRIORITY_VOICE 6 +/* use for EAPOL exchange */ +#define WSM_EPTA_PRIORITY_EAPOL 7 + +/* TX status */ +/* Frame was sent aggregated */ +/* Only valid for WSM_SUCCESS status. */ +#define WSM_TX_STATUS_AGGREGATION (BIT(0)) + +/* Host should requeue this frame later. */ +/* Valid only when status is WSM_REQUEUE. */ +#define WSM_TX_STATUS_REQUEUE (BIT(1)) + +/* Normal Ack */ +#define WSM_TX_STATUS_NORMAL_ACK (0<<2) + +/* No Ack */ +#define WSM_TX_STATUS_NO_ACK (1<<2) + +/* No explicit acknowledgement */ +#define WSM_TX_STATUS_NO_EXPLICIT_ACK (2<<2) + +/* Block Ack */ +/* Only valid for WSM_SUCCESS status. */ +#define WSM_TX_STATUS_BLOCK_ACK (3<<2) + +/* RX status */ +/* Unencrypted */ +#define WSM_RX_STATUS_UNENCRYPTED (0<<0) + +/* WEP */ +#define WSM_RX_STATUS_WEP (1<<0) + +/* TKIP */ +#define WSM_RX_STATUS_TKIP (2<<0) + +/* AES */ +#define WSM_RX_STATUS_AES (3<<0) + +/* WAPI */ +#define WSM_RX_STATUS_WAPI (4<<0) + +/* Macro to fetch encryption subfield. */ +#define WSM_RX_STATUS_ENCRYPTION(status) ((status) & 0x07) + +/* Frame was part of an aggregation */ +#define WSM_RX_STATUS_AGGREGATE (BIT(3)) + +/* Frame was first in the aggregation */ +#define WSM_RX_STATUS_AGGREGATE_FIRST (BIT(4)) + +/* Frame was last in the aggregation */ +#define WSM_RX_STATUS_AGGREGATE_LAST (BIT(5)) + +/* Indicates a defragmented frame */ +#define WSM_RX_STATUS_DEFRAGMENTED (BIT(6)) + +/* Indicates a Beacon frame */ +#define WSM_RX_STATUS_BEACON (BIT(7)) + +/* Indicates STA bit beacon TIM field */ +#define WSM_RX_STATUS_TIM (BIT(8)) + +/* Indicates Beacon frame's virtual bitmap contains multicast bit */ +#define WSM_RX_STATUS_MULTICAST (BIT(9)) + +/* Indicates frame contains a matching SSID */ +#define WSM_RX_STATUS_MATCHING_SSID (BIT(10)) + +/* Indicates frame contains a matching BSSI */ +#define WSM_RX_STATUS_MATCHING_BSSI (BIT(11)) + +/* Indicates More bit set in Framectl field */ +#define WSM_RX_STATUS_MORE_DATA (BIT(12)) + +/* Indicates frame received during a measurement process */ +#define WSM_RX_STATUS_MEASUREMENT (BIT(13)) + +/* Indicates frame received as an HT packet */ +#define WSM_RX_STATUS_HT (BIT(14)) + +/* Indicates frame received with STBC */ +#define WSM_RX_STATUS_STBC (BIT(15)) + +/* Indicates Address 1 field matches dot11StationId */ +#define WSM_RX_STATUS_ADDRESS1 (BIT(16)) + +/* Indicates Group address present in the Address 1 field */ +#define WSM_RX_STATUS_GROUP (BIT(17)) + +/* Indicates Broadcast address present in the Address 1 field */ +#define WSM_RX_STATUS_BROADCAST (BIT(18)) + +/* Indicates group key used with encrypted frames */ +#define WSM_RX_STATUS_GROUP_KEY (BIT(19)) + +/* Macro to fetch encryption key index. */ +#define WSM_RX_STATUS_KEY_IDX(status) (((status >> 20)) & 0x0F) + +/* Indicates TSF inclusion after 802.11 frame body */ +#define WSM_RX_STATUS_TSF_INCLUDED (BIT(24)) + +/* Frame Control field starts at Frame offset + 2 */ +#define WSM_TX_2BYTES_SHIFT (BIT(7)) + +/* Join mode */ +/* IBSS */ +#define WSM_JOIN_MODE_IBSS (0) + +/* BSS */ +#define WSM_JOIN_MODE_BSS (1) + +/* PLCP preamble type */ +/* For long preamble */ +#define WSM_JOIN_PREAMBLE_LONG (0) + +/* For short preamble (Long for 1Mbps) */ +#define WSM_JOIN_PREAMBLE_SHORT (1) + +/* For short preamble (Long for 1 and 2Mbps) */ +#define WSM_JOIN_PREAMBLE_SHORT_2 (2) + +/* Join flags */ +/* Unsynchronized */ +#define WSM_JOIN_FLAGS_UNSYNCRONIZED BIT(0) +/* The BSS owner is a P2P GO */ +#define WSM_JOIN_FLAGS_P2P_GO BIT(1) +/* Force to join BSS with the BSSID and the + * SSID specified without waiting for beacons. The + * ProbeForJoin parameter is ignored. + */ +#define WSM_JOIN_FLAGS_FORCE BIT(2) +/* Give probe request/response higher + * priority over the BT traffic + */ +#define WSM_JOIN_FLAGS_PRIO BIT(3) +/* Issue immediate join confirmation and use + * join complete to notify about completion + */ +#define WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND BIT(5) + +/* Key types */ +#define WSM_KEY_TYPE_WEP_DEFAULT (0) +#define WSM_KEY_TYPE_WEP_PAIRWISE (1) +#define WSM_KEY_TYPE_TKIP_GROUP (2) +#define WSM_KEY_TYPE_TKIP_PAIRWISE (3) +#define WSM_KEY_TYPE_AES_GROUP (4) +#define WSM_KEY_TYPE_AES_PAIRWISE (5) +#define WSM_KEY_TYPE_WAPI_GROUP (6) +#define WSM_KEY_TYPE_WAPI_PAIRWISE (7) + +/* Key indexes */ +#define WSM_KEY_MAX_INDEX (10) + +/* ACK policy */ +#define WSM_ACK_POLICY_NORMAL (0) +#define WSM_ACK_POLICY_NO_ACK (1) + +/* Start modes */ +#define WSM_START_MODE_AP (0) /* Mini AP */ +#define WSM_START_MODE_P2P_GO (1) /* P2P GO */ +#define WSM_START_MODE_P2P_DEV (2) /* P2P device */ + +/* SetAssociationMode MIB flags */ +#define WSM_ASSOCIATION_MODE_USE_PREAMBLE_TYPE (BIT(0)) +#define WSM_ASSOCIATION_MODE_USE_HT_MODE (BIT(1)) +#define WSM_ASSOCIATION_MODE_USE_BASIC_RATE_SET (BIT(2)) +#define WSM_ASSOCIATION_MODE_USE_MPDU_START_SPACING (BIT(3)) +#define WSM_ASSOCIATION_MODE_SNOOP_ASSOC_FRAMES (BIT(4)) + +/* RcpiRssiThreshold MIB flags */ +#define WSM_RCPI_RSSI_THRESHOLD_ENABLE (BIT(0)) +#define WSM_RCPI_RSSI_USE_RSSI (BIT(1)) +#define WSM_RCPI_RSSI_DONT_USE_UPPER (BIT(2)) +#define WSM_RCPI_RSSI_DONT_USE_LOWER (BIT(3)) + +/* Update-ie constants */ +#define WSM_UPDATE_IE_BEACON (BIT(0)) +#define WSM_UPDATE_IE_PROBE_RESP (BIT(1)) +#define WSM_UPDATE_IE_PROBE_REQ (BIT(2)) + +/* WSM events */ +/* Error */ +#define WSM_EVENT_ERROR (0) + +/* BSS lost */ +#define WSM_EVENT_BSS_LOST (1) + +/* BSS regained */ +#define WSM_EVENT_BSS_REGAINED (2) + +/* Radar detected */ +#define WSM_EVENT_RADAR_DETECTED (3) + +/* RCPI or RSSI threshold triggered */ +#define WSM_EVENT_RCPI_RSSI (4) + +/* BT inactive */ +#define WSM_EVENT_BT_INACTIVE (5) + +/* BT active */ +#define WSM_EVENT_BT_ACTIVE (6) + +/* MIB IDs */ +/* 4.1 dot11StationId */ +#define WSM_MIB_ID_DOT11_STATION_ID 0x0000 + +/* 4.2 dot11MaxtransmitMsduLifeTime */ +#define WSM_MIB_ID_DOT11_MAX_TRANSMIT_LIFTIME 0x0001 + +/* 4.3 dot11MaxReceiveLifeTime */ +#define WSM_MIB_ID_DOT11_MAX_RECEIVE_LIFETIME 0x0002 + +/* 4.4 dot11SlotTime */ +#define WSM_MIB_ID_DOT11_SLOT_TIME 0x0003 + +/* 4.5 dot11GroupAddressesTable */ +#define WSM_MIB_ID_DOT11_GROUP_ADDRESSES_TABLE 0x0004 +#define WSM_MAX_GRP_ADDRTABLE_ENTRIES 8 + +/* 4.6 dot11WepDefaultKeyId */ +#define WSM_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID 0x0005 + +/* 4.7 dot11CurrentTxPowerLevel */ +#define WSM_MIB_ID_DOT11_CURRENT_TX_POWER_LEVEL 0x0006 + +/* 4.8 dot11RTSThreshold */ +#define WSM_MIB_ID_DOT11_RTS_THRESHOLD 0x0007 + +/* 4.9 NonErpProtection */ +#define WSM_MIB_ID_NON_ERP_PROTECTION 0x1000 + +/* 4.10 ArpIpAddressesTable */ +#define WSM_MIB_ID_ARP_IP_ADDRESSES_TABLE 0x1001 +#define WSM_MAX_ARP_IP_ADDRTABLE_ENTRIES 1 + +/* 4.11 TemplateFrame */ +#define WSM_MIB_ID_TEMPLATE_FRAME 0x1002 + +/* 4.12 RxFilter */ +#define WSM_MIB_ID_RX_FILTER 0x1003 + +/* 4.13 BeaconFilterTable */ +#define WSM_MIB_ID_BEACON_FILTER_TABLE 0x1004 + +/* 4.14 BeaconFilterEnable */ +#define WSM_MIB_ID_BEACON_FILTER_ENABLE 0x1005 + +/* 4.15 OperationalPowerMode */ +#define WSM_MIB_ID_OPERATIONAL_POWER_MODE 0x1006 + +/* 4.16 BeaconWakeUpPeriod */ +#define WSM_MIB_ID_BEACON_WAKEUP_PERIOD 0x1007 + +/* 4.17 RcpiRssiThreshold */ +#define WSM_MIB_ID_RCPI_RSSI_THRESHOLD 0x1009 + +/* 4.18 StatisticsTable */ +#define WSM_MIB_ID_STATISTICS_TABLE 0x100A + +/* 4.19 IbssPsConfig */ +#define WSM_MIB_ID_IBSS_PS_CONFIG 0x100B + +/* 4.20 CountersTable */ +#define WSM_MIB_ID_COUNTERS_TABLE 0x100C + +/* 4.21 BlockAckPolicy */ +#define WSM_MIB_ID_BLOCK_ACK_POLICY 0x100E + +/* 4.22 OverrideInternalTxRate */ +#define WSM_MIB_ID_OVERRIDE_INTERNAL_TX_RATE 0x100F + +/* 4.23 SetAssociationMode */ +#define WSM_MIB_ID_SET_ASSOCIATION_MODE 0x1010 + +/* 4.24 UpdateEptaConfigData */ +#define WSM_MIB_ID_UPDATE_EPTA_CONFIG_DATA 0x1011 + +/* 4.25 SelectCcaMethod */ +#define WSM_MIB_ID_SELECT_CCA_METHOD 0x1012 + +/* 4.26 SetUpasdInformation */ +#define WSM_MIB_ID_SET_UAPSD_INFORMATION 0x1013 + +/* 4.27 SetAutoCalibrationMode WBF00004073 */ +#define WSM_MIB_ID_SET_AUTO_CALIBRATION_MODE 0x1015 + +/* 4.28 SetTxRateRetryPolicy */ +#define WSM_MIB_ID_SET_TX_RATE_RETRY_POLICY 0x1016 + +/* 4.29 SetHostMessageTypeFilter */ +#define WSM_MIB_ID_SET_HOST_MSG_TYPE_FILTER 0x1017 + +/* 4.30 P2PFindInfo */ +#define WSM_MIB_ID_P2P_FIND_INFO 0x1018 + +/* 4.31 P2PPsModeInfo */ +#define WSM_MIB_ID_P2P_PS_MODE_INFO 0x1019 + +/* 4.32 SetEtherTypeDataFrameFilter */ +#define WSM_MIB_ID_SET_ETHERTYPE_DATAFRAME_FILTER 0x101A + +/* 4.33 SetUDPPortDataFrameFilter */ +#define WSM_MIB_ID_SET_UDPPORT_DATAFRAME_FILTER 0x101B + +/* 4.34 SetMagicDataFrameFilter */ +#define WSM_MIB_ID_SET_MAGIC_DATAFRAME_FILTER 0x101C + +/* 4.35 P2PDeviceInfo */ +#define WSM_MIB_ID_P2P_DEVICE_INFO 0x101D + +/* 4.36 SetWCDMABand */ +#define WSM_MIB_ID_SET_WCDMA_BAND 0x101E + +/* 4.37 GroupTxSequenceCounter */ +#define WSM_MIB_ID_GRP_SEQ_COUNTER 0x101F + +/* 4.38 ProtectedMgmtPolicy */ +#define WSM_MIB_ID_PROTECTED_MGMT_POLICY 0x1020 + +/* 4.39 SetHtProtection */ +#define WSM_MIB_ID_SET_HT_PROTECTION 0x1021 + +/* 4.40 GPIO Command */ +#define WSM_MIB_ID_GPIO_COMMAND 0x1022 + +/* 4.41 TSF Counter Value */ +#define WSM_MIB_ID_TSF_COUNTER 0x1023 + +/* Test Purposes Only */ +#define WSM_MIB_ID_BLOCK_ACK_INFO 0x100D + +/* 4.42 UseMultiTxConfMessage */ +#define WSM_MIB_USE_MULTI_TX_CONF 0x1024 + +/* 4.43 Keep-alive period */ +#define WSM_MIB_ID_KEEP_ALIVE_PERIOD 0x1025 + +/* 4.44 Disable BSSID filter */ +#define WSM_MIB_ID_DISABLE_BSSID_FILTER 0x1026 + +/* Frame template types */ +#define WSM_FRAME_TYPE_PROBE_REQUEST (0) +#define WSM_FRAME_TYPE_BEACON (1) +#define WSM_FRAME_TYPE_NULL (2) +#define WSM_FRAME_TYPE_QOS_NULL (3) +#define WSM_FRAME_TYPE_PS_POLL (4) +#define WSM_FRAME_TYPE_PROBE_RESPONSE (5) + +#define WSM_FRAME_GREENFIELD (0x80) /* See 4.11 */ + +/* Status */ +/* The WSM firmware has completed a request */ +/* successfully. */ +#define WSM_STATUS_SUCCESS (0) + +/* This is a generic failure code if other error codes do */ +/* not apply. */ +#define WSM_STATUS_FAILURE (1) + +/* A request contains one or more invalid parameters. */ +#define WSM_INVALID_PARAMETER (2) + +/* The request cannot perform because the device is in */ +/* an inappropriate mode. */ +#define WSM_ACCESS_DENIED (3) + +/* The frame received includes a decryption error. */ +#define WSM_STATUS_DECRYPTFAILURE (4) + +/* A MIC failure is detected in the received packets. */ +#define WSM_STATUS_MICFAILURE (5) + +/* The transmit request failed due to retry limit being */ +/* exceeded. */ +#define WSM_STATUS_RETRY_EXCEEDED (6) + +/* The transmit request failed due to MSDU life time */ +/* being exceeded. */ +#define WSM_STATUS_TX_LIFETIME_EXCEEDED (7) + +/* The link to the AP is lost. */ +#define WSM_STATUS_LINK_LOST (8) + +/* No key was found for the encrypted frame */ +#define WSM_STATUS_NO_KEY_FOUND (9) + +/* Jammer was detected when transmitting this frame */ +#define WSM_STATUS_JAMMER_DETECTED (10) + +/* The message should be requeued later. */ +/* This is applicable only to Transmit */ +#define WSM_REQUEUE (11) + +/* Advanced filtering options */ +#define WSM_MAX_FILTER_ELEMENTS (4) + +#define WSM_FILTER_ACTION_IGNORE (0) +#define WSM_FILTER_ACTION_FILTER_IN (1) +#define WSM_FILTER_ACTION_FILTER_OUT (2) + +#define WSM_FILTER_PORT_TYPE_DST (0) +#define WSM_FILTER_PORT_TYPE_SRC (1) + +/* Actual header of WSM messages */ +struct wsm_hdr { + __le16 len; + __le16 id; +}; + +#define WSM_TX_SEQ_MAX (7) +#define WSM_TX_SEQ(seq) \ + ((seq & WSM_TX_SEQ_MAX) << 13) +#define WSM_TX_LINK_ID_MAX (0x0F) +#define WSM_TX_LINK_ID(link_id) \ + ((link_id & WSM_TX_LINK_ID_MAX) << 6) + +#define MAX_BEACON_SKIP_TIME_MS 1000 + +#define WSM_CMD_LAST_CHANCE_TIMEOUT (HZ * 3 / 2) + +/* ******************************************************************** */ +/* WSM capability */ + +#define WSM_STARTUP_IND_ID 0x0801 + +struct wsm_startup_ind { + u16 input_buffers; + u16 input_buffer_size; + u16 status; + u16 hw_id; + u16 hw_subid; + u16 fw_cap; + u16 fw_type; + u16 fw_api; + u16 fw_build; + u16 fw_ver; + char fw_label[128]; + u32 config[4]; +}; + +/* ******************************************************************** */ +/* WSM commands */ + +/* 3.1 */ +#define WSM_CONFIGURATION_REQ_ID 0x0009 +#define WSM_CONFIGURATION_RESP_ID 0x0409 + +struct wsm_tx_power_range { + int min_power_level; + int max_power_level; + u32 stepping; +}; + +struct wsm_configuration { + /* [in] */ u32 dot11MaxTransmitMsduLifeTime; + /* [in] */ u32 dot11MaxReceiveLifeTime; + /* [in] */ u32 dot11RtsThreshold; + /* [in, out] */ u8 *dot11StationId; + /* [in] */ const void *dpdData; + /* [in] */ size_t dpdData_size; + /* [out] */ u8 dot11FrequencyBandsSupported; + /* [out] */ u32 supportedRateMask; + /* [out] */ struct wsm_tx_power_range txPowerRange[2]; +}; + +int wsm_configuration(struct cw1200_common *priv, + struct wsm_configuration *arg); + +/* 3.3 */ +#define WSM_RESET_REQ_ID 0x000A +#define WSM_RESET_RESP_ID 0x040A +struct wsm_reset { + /* [in] */ int link_id; + /* [in] */ bool reset_statistics; +}; + +int wsm_reset(struct cw1200_common *priv, const struct wsm_reset *arg); + +/* 3.5 */ +#define WSM_READ_MIB_REQ_ID 0x0005 +#define WSM_READ_MIB_RESP_ID 0x0405 +int wsm_read_mib(struct cw1200_common *priv, u16 mib_id, void *buf, + size_t buf_size); + +/* 3.7 */ +#define WSM_WRITE_MIB_REQ_ID 0x0006 +#define WSM_WRITE_MIB_RESP_ID 0x0406 +int wsm_write_mib(struct cw1200_common *priv, u16 mib_id, void *buf, + size_t buf_size); + +/* 3.9 */ +#define WSM_START_SCAN_REQ_ID 0x0007 +#define WSM_START_SCAN_RESP_ID 0x0407 + +struct wsm_ssid { + u8 ssid[32]; + u32 length; +}; + +struct wsm_scan_ch { + u16 number; + u32 min_chan_time; + u32 max_chan_time; + u32 tx_power_level; +}; + +struct wsm_scan { + /* WSM_PHY_BAND_... */ + u8 band; + + /* WSM_SCAN_TYPE_... */ + u8 type; + + /* WSM_SCAN_FLAG_... */ + u8 flags; + + /* WSM_TRANSMIT_RATE_... */ + u8 max_tx_rate; + + /* Interval period in TUs that the device shall the re- */ + /* execute the requested scan. Max value supported by the device */ + /* is 256s. */ + u32 auto_scan_interval; + + /* Number of probe requests (per SSID) sent to one (1) */ + /* channel. Zero (0) means that none is send, which */ + /* means that a passive scan is to be done. Value */ + /* greater than zero (0) means that an active scan is to */ + /* be done. */ + u32 num_probes; + + /* Number of channels to be scanned. */ + /* Maximum value is WSM_SCAN_MAX_NUM_OF_CHANNELS. */ + u8 num_channels; + + /* Number of SSID provided in the scan command (this */ + /* is zero (0) in broadcast scan) */ + /* The maximum number of SSIDs is WSM_SCAN_MAX_NUM_OF_SSIDS. */ + u8 num_ssids; + + /* The delay time (in microseconds) period */ + /* before sending a probe-request. */ + u8 probe_delay; + + /* SSIDs to be scanned [numOfSSIDs]; */ + struct wsm_ssid *ssids; + + /* Channels to be scanned [numOfChannels]; */ + struct wsm_scan_ch *ch; +}; + +int wsm_scan(struct cw1200_common *priv, const struct wsm_scan *arg); + +/* 3.11 */ +#define WSM_STOP_SCAN_REQ_ID 0x0008 +#define WSM_STOP_SCAN_RESP_ID 0x0408 +int wsm_stop_scan(struct cw1200_common *priv); + +/* 3.13 */ +#define WSM_SCAN_COMPLETE_IND_ID 0x0806 +struct wsm_scan_complete { + /* WSM_STATUS_... */ + u32 status; + + /* WSM_PSM_... */ + u8 psm; + + /* Number of channels that the scan operation completed. */ + u8 num_channels; +}; + +/* 3.14 */ +#define WSM_TX_CONFIRM_IND_ID 0x0404 +#define WSM_MULTI_TX_CONFIRM_ID 0x041E + +struct wsm_tx_confirm { + /* Packet identifier used in wsm_tx. */ + u32 packet_id; + + /* WSM_STATUS_... */ + u32 status; + + /* WSM_TRANSMIT_RATE_... */ + u8 tx_rate; + + /* The number of times the frame was transmitted */ + /* without receiving an acknowledgement. */ + u8 ack_failures; + + /* WSM_TX_STATUS_... */ + u16 flags; + + /* The total time in microseconds that the frame spent in */ + /* the WLAN device before transmission as completed. */ + u32 media_delay; + + /* The total time in microseconds that the frame spent in */ + /* the WLAN device before transmission was started. */ + u32 tx_queue_delay; +}; + +/* 3.15 */ +typedef void (*wsm_tx_confirm_cb) (struct cw1200_common *priv, + struct wsm_tx_confirm *arg); + +/* Note that ideology of wsm_tx struct is different against the rest of + * WSM API. wsm_hdr is /not/ a caller-adapted struct to be used as an input + * argument for WSM call, but a prepared bytestream to be sent to firmware. + * It is filled partly in cw1200_tx, partly in low-level WSM code. + * Please pay attention once again: ideology is different. + * + * Legend: + * - [in]: cw1200_tx must fill this field. + * - [wsm]: the field is filled by low-level WSM. + */ +struct wsm_tx { + /* common WSM header */ + struct wsm_hdr hdr; + + /* Packet identifier that meant to be used in completion. */ + __le32 packet_id; + + /* WSM_TRANSMIT_RATE_... */ + u8 max_tx_rate; + + /* WSM_QUEUE_... */ + u8 queue_id; + + /* True: another packet is pending on the host for transmission. */ + u8 more; + + /* Bit 0 = 0 - Start expiry time from first Tx attempt (default) */ + /* Bit 0 = 1 - Start expiry time from receipt of Tx Request */ + /* Bits 3:1 - PTA Priority */ + /* Bits 6:4 - Tx Rate Retry Policy */ + /* Bit 7 - Reserved */ + u8 flags; + + /* Should be 0. */ + __le32 reserved; + + /* The elapsed time in TUs, after the initial transmission */ + /* of an MSDU, after which further attempts to transmit */ + /* the MSDU shall be terminated. Overrides the global */ + /* dot11MaxTransmitMsduLifeTime setting [optional] */ + /* Device will set the default value if this is 0. */ + __le32 expire_time; + + /* WSM_HT_TX_... */ + __le32 ht_tx_parameters; +}; + +/* = sizeof(generic hi hdr) + sizeof(wsm hdr) + sizeof(alignment) */ +#define WSM_TX_EXTRA_HEADROOM (28) + +/* 3.16 */ +#define WSM_RECEIVE_IND_ID 0x0804 + +struct wsm_rx { + /* WSM_STATUS_... */ + __le32 status; + + /* Specifies the channel of the received packet. */ + __le16 channel_number; + + /* WSM_TRANSMIT_RATE_... */ + u8 rx_rate; + + /* This value is expressed in signed Q8.0 format for */ + /* RSSI and unsigned Q7.1 format for RCPI. */ + u8 rcpi_rssi; + + /* WSM_RX_STATUS_... */ + __le32 flags; + + /* Payload */ + u8 data[0]; +} __packed; + +/* = sizeof(generic hi hdr) + sizeof(wsm hdr) */ +#define WSM_RX_EXTRA_HEADROOM (16) + +typedef void (*wsm_rx_cb) (struct cw1200_common *priv, struct wsm_rx *arg, + struct sk_buff **skb_p); + +/* 3.17 */ +struct wsm_event { + /* WSM_STATUS_... */ + /* [out] */ u32 id; + + /* Indication parameters. */ + /* For error indication, this shall be a 32-bit WSM status. */ + /* For RCPI or RSSI indication, this should be an 8-bit */ + /* RCPI or RSSI value. */ + /* [out] */ u32 data; +}; + +struct cw1200_wsm_event { + struct list_head link; + struct wsm_event evt; +}; + +/* 3.18 - 3.22 */ +/* Measurement. Skipped for now. Irrelevent. */ + +typedef void (*wsm_event_cb) (struct cw1200_common *priv, + struct wsm_event *arg); + +/* 3.23 */ +#define WSM_JOIN_REQ_ID 0x000B +#define WSM_JOIN_RESP_ID 0x040B + +struct wsm_join { + /* WSM_JOIN_MODE_... */ + u8 mode; + + /* WSM_PHY_BAND_... */ + u8 band; + + /* Specifies the channel number to join. The channel */ + /* number will be mapped to an actual frequency */ + /* according to the band */ + u16 channel_number; + + /* Specifies the BSSID of the BSS or IBSS to be joined */ + /* or the IBSS to be started. */ + u8 bssid[6]; + + /* ATIM window of IBSS */ + /* When ATIM window is zero the initiated IBSS does */ + /* not support power saving. */ + u16 atim_window; + + /* WSM_JOIN_PREAMBLE_... */ + u8 preamble_type; + + /* Specifies if a probe request should be send with the */ + /* specified SSID when joining to the network. */ + u8 probe_for_join; + + /* DTIM Period (In multiples of beacon interval) */ + u8 dtim_period; + + /* WSM_JOIN_FLAGS_... */ + u8 flags; + + /* Length of the SSID */ + u32 ssid_len; + + /* Specifies the SSID of the IBSS to join or start */ + u8 ssid[32]; + + /* Specifies the time between TBTTs in TUs */ + u32 beacon_interval; + + /* A bit mask that defines the BSS basic rate set. */ + u32 basic_rate_set; +}; + +struct wsm_join_cnf { + u32 status; + + /* Minimum transmission power level in units of 0.1dBm */ + u32 min_power_level; + + /* Maximum transmission power level in units of 0.1dBm */ + u32 max_power_level; +}; + +int wsm_join(struct cw1200_common *priv, struct wsm_join *arg); + +/* 3.24 */ +struct wsm_join_complete { + /* WSM_STATUS_... */ + u32 status; +}; + +/* 3.25 */ +#define WSM_SET_PM_REQ_ID 0x0010 +#define WSM_SET_PM_RESP_ID 0x0410 +struct wsm_set_pm { + /* WSM_PSM_... */ + u8 mode; + + /* in unit of 500us; 0 to use default */ + u8 fast_psm_idle_period; + + /* in unit of 500us; 0 to use default */ + u8 ap_psm_change_period; + + /* in unit of 500us; 0 to disable auto-pspoll */ + u8 min_auto_pspoll_period; +}; + +int wsm_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg); + +/* 3.27 */ +struct wsm_set_pm_complete { + u8 psm; /* WSM_PSM_... */ +}; + +/* 3.28 */ +#define WSM_SET_BSS_PARAMS_REQ_ID 0x0011 +#define WSM_SET_BSS_PARAMS_RESP_ID 0x0411 +struct wsm_set_bss_params { + /* This resets the beacon loss counters only */ + u8 reset_beacon_loss; + + /* The number of lost consecutive beacons after which */ + /* the WLAN device should indicate the BSS-Lost event */ + /* to the WLAN host driver. */ + u8 beacon_lost_count; + + /* The AID received during the association process. */ + u16 aid; + + /* The operational rate set mask */ + u32 operational_rate_set; +}; + +int wsm_set_bss_params(struct cw1200_common *priv, + const struct wsm_set_bss_params *arg); + +/* 3.30 */ +#define WSM_ADD_KEY_REQ_ID 0x000C +#define WSM_ADD_KEY_RESP_ID 0x040C +struct wsm_add_key { + u8 type; /* WSM_KEY_TYPE_... */ + u8 index; /* Key entry index: 0 -- WSM_KEY_MAX_INDEX */ + u16 reserved; + union { + struct { + u8 peer[6]; /* MAC address of the peer station */ + u8 reserved; + u8 keylen; /* Key length in bytes */ + u8 keydata[16]; /* Key data */ + } __packed wep_pairwise; + struct { + u8 keyid; /* Unique per key identifier (0..3) */ + u8 keylen; /* Key length in bytes */ + u16 reserved; + u8 keydata[16]; /* Key data */ + } __packed wep_group; + struct { + u8 peer[6]; /* MAC address of the peer station */ + u16 reserved; + u8 keydata[16]; /* TKIP key data */ + u8 rx_mic_key[8]; /* Rx MIC key */ + u8 tx_mic_key[8]; /* Tx MIC key */ + } __packed tkip_pairwise; + struct { + u8 keydata[16]; /* TKIP key data */ + u8 rx_mic_key[8]; /* Rx MIC key */ + u8 keyid; /* Key ID */ + u8 reserved[3]; + u8 rx_seqnum[8]; /* Receive Sequence Counter */ + } __packed tkip_group; + struct { + u8 peer[6]; /* MAC address of the peer station */ + u16 reserved; + u8 keydata[16]; /* AES key data */ + } __packed aes_pairwise; + struct { + u8 keydata[16]; /* AES key data */ + u8 keyid; /* Key ID */ + u8 reserved[3]; + u8 rx_seqnum[8]; /* Receive Sequence Counter */ + } __packed aes_group; + struct { + u8 peer[6]; /* MAC address of the peer station */ + u8 keyid; /* Key ID */ + u8 reserved; + u8 keydata[16]; /* WAPI key data */ + u8 mic_key[16]; /* MIC key data */ + } __packed wapi_pairwise; + struct { + u8 keydata[16]; /* WAPI key data */ + u8 mic_key[16]; /* MIC key data */ + u8 keyid; /* Key ID */ + u8 reserved[3]; + } __packed wapi_group; + } __packed; +} __packed; + +int wsm_add_key(struct cw1200_common *priv, const struct wsm_add_key *arg); + +/* 3.32 */ +#define WSM_REMOVE_KEY_REQ_ID 0x000D +#define WSM_REMOVE_KEY_RESP_ID 0x040D +struct wsm_remove_key { + u8 index; /* Key entry index : 0-10 */ +}; + +int wsm_remove_key(struct cw1200_common *priv, + const struct wsm_remove_key *arg); + +/* 3.34 */ +struct wsm_set_tx_queue_params { + /* WSM_ACK_POLICY_... */ + u8 ackPolicy; + + /* Medium Time of TSPEC (in 32us units) allowed per */ + /* One Second Averaging Period for this queue. */ + u16 allowedMediumTime; + + /* dot11MaxTransmitMsduLifetime to be used for the */ + /* specified queue. */ + u32 maxTransmitLifetime; +}; + +struct wsm_tx_queue_params { + /* NOTE: index is a linux queue id. */ + struct wsm_set_tx_queue_params params[4]; +}; + + +#define WSM_TX_QUEUE_SET(queue_params, queue, ack_policy, allowed_time,\ + max_life_time) \ +do { \ + struct wsm_set_tx_queue_params *p = &(queue_params)->params[queue]; \ + p->ackPolicy = (ack_policy); \ + p->allowedMediumTime = (allowed_time); \ + p->maxTransmitLifetime = (max_life_time); \ +} while (0) + +int wsm_set_tx_queue_params(struct cw1200_common *priv, + const struct wsm_set_tx_queue_params *arg, u8 id); + +/* 3.36 */ +#define WSM_EDCA_PARAMS_REQ_ID 0x0013 +#define WSM_EDCA_PARAMS_RESP_ID 0x0413 +struct wsm_edca_queue_params { + /* CWmin (in slots) for the access class. */ + __le16 cwmin; + + /* CWmax (in slots) for the access class. */ + __le16 cwmax; + + /* AIFS (in slots) for the access class. */ + __le16 aifns; + + /* TX OP Limit (in microseconds) for the access class. */ + __le16 txop_limit; + + /* dot11MaxReceiveLifetime to be used for the specified */ + /* the access class. Overrides the global */ + /* dot11MaxReceiveLifetime value */ + __le32 max_rx_lifetime; +} __packed; + +struct wsm_edca_params { + /* NOTE: index is a linux queue id. */ + struct wsm_edca_queue_params params[4]; + bool uapsd_enable[4]; +}; + +#define TXOP_UNIT 32 +#define WSM_EDCA_SET(__edca, __queue, __aifs, __cw_min, __cw_max, __txop, __lifetime,\ + __uapsd) \ + do { \ + struct wsm_edca_queue_params *p = &(__edca)->params[__queue]; \ + p->cwmin = (__cw_min); \ + p->cwmax = (__cw_max); \ + p->aifns = (__aifs); \ + p->txop_limit = ((__txop) * TXOP_UNIT); \ + p->max_rx_lifetime = (__lifetime); \ + (__edca)->uapsd_enable[__queue] = (__uapsd); \ + } while (0) + +int wsm_set_edca_params(struct cw1200_common *priv, + const struct wsm_edca_params *arg); + +int wsm_set_uapsd_param(struct cw1200_common *priv, + const struct wsm_edca_params *arg); + +/* 3.38 */ +/* Set-System info. Skipped for now. Irrelevent. */ + +/* 3.40 */ +#define WSM_SWITCH_CHANNEL_REQ_ID 0x0016 +#define WSM_SWITCH_CHANNEL_RESP_ID 0x0416 + +struct wsm_switch_channel { + /* 1 - means the STA shall not transmit any further */ + /* frames until the channel switch has completed */ + u8 mode; + + /* Number of TBTTs until channel switch occurs. */ + /* 0 - indicates switch shall occur at any time */ + /* 1 - occurs immediately before the next TBTT */ + u8 switch_count; + + /* The new channel number to switch to. */ + /* Note this is defined as per section 2.7. */ + u16 channel_number; +}; + +int wsm_switch_channel(struct cw1200_common *priv, + const struct wsm_switch_channel *arg); + +typedef void (*wsm_channel_switch_cb) (struct cw1200_common *priv); + +#define WSM_START_REQ_ID 0x0017 +#define WSM_START_RESP_ID 0x0417 + +struct wsm_start { + /* WSM_START_MODE_... */ + /* [in] */ u8 mode; + + /* WSM_PHY_BAND_... */ + /* [in] */ u8 band; + + /* Channel number */ + /* [in] */ u16 channel_number; + + /* Client Traffic window in units of TU */ + /* Valid only when mode == ..._P2P */ + /* [in] */ u32 ct_window; + + /* Interval between two consecutive */ + /* beacon transmissions in TU. */ + /* [in] */ u32 beacon_interval; + + /* DTIM period in terms of beacon intervals */ + /* [in] */ u8 dtim_period; + + /* WSM_JOIN_PREAMBLE_... */ + /* [in] */ u8 preamble; + + /* The delay time (in microseconds) period */ + /* before sending a probe-request. */ + /* [in] */ u8 probe_delay; + + /* Length of the SSID */ + /* [in] */ u8 ssid_len; + + /* SSID of the BSS or P2P_GO to be started now. */ + /* [in] */ u8 ssid[32]; + + /* The basic supported rates for the MiniAP. */ + /* [in] */ u32 basic_rate_set; +}; + +int wsm_start(struct cw1200_common *priv, const struct wsm_start *arg); + +#define WSM_BEACON_TRANSMIT_REQ_ID 0x0018 +#define WSM_BEACON_TRANSMIT_RESP_ID 0x0418 + +struct wsm_beacon_transmit { + /* 1: enable; 0: disable */ + /* [in] */ u8 enable_beaconing; +}; + +int wsm_beacon_transmit(struct cw1200_common *priv, + const struct wsm_beacon_transmit *arg); + +int wsm_start_find(struct cw1200_common *priv); + +int wsm_stop_find(struct cw1200_common *priv); + +typedef void (*wsm_find_complete_cb) (struct cw1200_common *priv, u32 status); + +struct wsm_suspend_resume { + /* See 3.52 */ + /* Link ID */ + /* [out] */ int link_id; + /* Stop sending further Tx requests down to device for this link */ + /* [out] */ bool stop; + /* Transmit multicast Frames */ + /* [out] */ bool multicast; + /* The AC on which Tx to be suspended /resumed. */ + /* This is applicable only for U-APSD */ + /* WSM_QUEUE_... */ + /* [out] */ int queue; +}; + +typedef void (*wsm_suspend_resume_cb) (struct cw1200_common *priv, + struct wsm_suspend_resume *arg); + +/* 3.54 Update-IE request. */ +struct wsm_update_ie { + /* WSM_UPDATE_IE_... */ + /* [in] */ u16 what; + /* [in] */ u16 count; + /* [in] */ u8 *ies; + /* [in] */ size_t length; +}; + +int wsm_update_ie(struct cw1200_common *priv, + const struct wsm_update_ie *arg); + +/* 3.56 */ +struct wsm_map_link { + /* MAC address of the remote device */ + /* [in] */ u8 mac_addr[6]; + /* [in] */ u8 link_id; +}; + +int wsm_map_link(struct cw1200_common *priv, const struct wsm_map_link *arg); + +/* ******************************************************************** */ +/* MIB shortcats */ + +static inline int wsm_set_output_power(struct cw1200_common *priv, + int power_level) +{ + __le32 val = __cpu_to_le32(power_level); + return wsm_write_mib(priv, WSM_MIB_ID_DOT11_CURRENT_TX_POWER_LEVEL, + &val, sizeof(val)); +} + +static inline int wsm_set_beacon_wakeup_period(struct cw1200_common *priv, + unsigned dtim_interval, + unsigned listen_interval) +{ + struct { + u8 numBeaconPeriods; + u8 reserved; + __le16 listenInterval; + } val = { + dtim_interval, 0, __cpu_to_le16(listen_interval) + }; + + if (dtim_interval > 0xFF || listen_interval > 0xFFFF) + return -EINVAL; + else + return wsm_write_mib(priv, WSM_MIB_ID_BEACON_WAKEUP_PERIOD, + &val, sizeof(val)); +} + +struct wsm_rcpi_rssi_threshold { + u8 rssiRcpiMode; /* WSM_RCPI_RSSI_... */ + u8 lowerThreshold; + u8 upperThreshold; + u8 rollingAverageCount; +}; + +static inline int wsm_set_rcpi_rssi_threshold(struct cw1200_common *priv, + struct wsm_rcpi_rssi_threshold *arg) +{ + return wsm_write_mib(priv, WSM_MIB_ID_RCPI_RSSI_THRESHOLD, arg, + sizeof(*arg)); +} + +struct wsm_mib_counters_table { + __le32 plcp_errors; + __le32 fcs_errors; + __le32 tx_packets; + __le32 rx_packets; + __le32 rx_packet_errors; + __le32 rx_decryption_failures; + __le32 rx_mic_failures; + __le32 rx_no_key_failures; + __le32 tx_multicast_frames; + __le32 tx_frames_success; + __le32 tx_frame_failures; + __le32 tx_frames_retried; + __le32 tx_frames_multi_retried; + __le32 rx_frame_duplicates; + __le32 rts_success; + __le32 rts_failures; + __le32 ack_failures; + __le32 rx_multicast_frames; + __le32 rx_frames_success; + __le32 rx_cmac_icv_errors; + __le32 rx_cmac_replays; + __le32 rx_mgmt_ccmp_replays; +} __packed; + +static inline int wsm_get_counters_table(struct cw1200_common *priv, + struct wsm_mib_counters_table *arg) +{ + return wsm_read_mib(priv, WSM_MIB_ID_COUNTERS_TABLE, + arg, sizeof(*arg)); +} + +static inline int wsm_get_station_id(struct cw1200_common *priv, u8 *mac) +{ + return wsm_read_mib(priv, WSM_MIB_ID_DOT11_STATION_ID, mac, ETH_ALEN); +} + +struct wsm_rx_filter { + bool promiscuous; + bool bssid; + bool fcs; + bool probeResponder; +}; + +static inline int wsm_set_rx_filter(struct cw1200_common *priv, + const struct wsm_rx_filter *arg) +{ + __le32 val = 0; + if (arg->promiscuous) + val |= __cpu_to_le32(BIT(0)); + if (arg->bssid) + val |= __cpu_to_le32(BIT(1)); + if (arg->fcs) + val |= __cpu_to_le32(BIT(2)); + if (arg->probeResponder) + val |= __cpu_to_le32(BIT(3)); + return wsm_write_mib(priv, WSM_MIB_ID_RX_FILTER, &val, sizeof(val)); +} + +int wsm_set_probe_responder(struct cw1200_common *priv, bool enable); + +#define WSM_BEACON_FILTER_IE_HAS_CHANGED BIT(0) +#define WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT BIT(1) +#define WSM_BEACON_FILTER_IE_HAS_APPEARED BIT(2) + +struct wsm_beacon_filter_table_entry { + u8 ie_id; + u8 flags; + u8 oui[3]; + u8 match_data[3]; +} __packed; + +struct wsm_mib_beacon_filter_table { + __le32 num; + struct wsm_beacon_filter_table_entry entry[10]; +} __packed; + +static inline int wsm_set_beacon_filter_table(struct cw1200_common *priv, + struct wsm_mib_beacon_filter_table *ft) +{ + size_t size = __le32_to_cpu(ft->num) * + sizeof(struct wsm_beacon_filter_table_entry) + + sizeof(__le32); + + return wsm_write_mib(priv, WSM_MIB_ID_BEACON_FILTER_TABLE, ft, size); +} + +#define WSM_BEACON_FILTER_ENABLE BIT(0) /* Enable/disable beacon filtering */ +#define WSM_BEACON_FILTER_AUTO_ERP BIT(1) /* If 1 FW will handle ERP IE changes internally */ + +struct wsm_beacon_filter_control { + int enabled; + int bcn_count; +}; + +static inline int wsm_beacon_filter_control(struct cw1200_common *priv, + struct wsm_beacon_filter_control *arg) +{ + struct { + __le32 enabled; + __le32 bcn_count; + } val; + val.enabled = __cpu_to_le32(arg->enabled); + val.bcn_count = __cpu_to_le32(arg->bcn_count); + return wsm_write_mib(priv, WSM_MIB_ID_BEACON_FILTER_ENABLE, &val, + sizeof(val)); +} + +enum wsm_power_mode { + wsm_power_mode_active = 0, + wsm_power_mode_doze = 1, + wsm_power_mode_quiescent = 2, +}; + +struct wsm_operational_mode { + enum wsm_power_mode power_mode; + int disable_more_flag_usage; + int perform_ant_diversity; +}; + +static inline int wsm_set_operational_mode(struct cw1200_common *priv, + const struct wsm_operational_mode *arg) +{ + u8 val = arg->power_mode; + if (arg->disable_more_flag_usage) + val |= BIT(4); + if (arg->perform_ant_diversity) + val |= BIT(5); + return wsm_write_mib(priv, WSM_MIB_ID_OPERATIONAL_POWER_MODE, &val, + sizeof(val)); +} + +struct wsm_template_frame { + u8 frame_type; + u8 rate; + struct sk_buff *skb; +}; + +static inline int wsm_set_template_frame(struct cw1200_common *priv, + struct wsm_template_frame *arg) +{ + int ret; + u8 *p = skb_push(arg->skb, 4); + p[0] = arg->frame_type; + p[1] = arg->rate; + ((u16 *)p)[1] = __cpu_to_le16(arg->skb->len - 4); + ret = wsm_write_mib(priv, WSM_MIB_ID_TEMPLATE_FRAME, p, arg->skb->len); + skb_pull(arg->skb, 4); + return ret; +} + + +struct wsm_protected_mgmt_policy { + bool protectedMgmtEnable; + bool unprotectedMgmtFramesAllowed; + bool encryptionForAuthFrame; +}; + +static inline int wsm_set_protected_mgmt_policy(struct cw1200_common *priv, + struct wsm_protected_mgmt_policy *arg) +{ + __le32 val = 0; + int ret; + if (arg->protectedMgmtEnable) + val |= __cpu_to_le32(BIT(0)); + if (arg->unprotectedMgmtFramesAllowed) + val |= __cpu_to_le32(BIT(1)); + if (arg->encryptionForAuthFrame) + val |= __cpu_to_le32(BIT(2)); + ret = wsm_write_mib(priv, WSM_MIB_ID_PROTECTED_MGMT_POLICY, + &val, sizeof(val)); + return ret; +} + +struct wsm_mib_block_ack_policy { + u8 tx_tid; + u8 reserved1; + u8 rx_tid; + u8 reserved2; +} __packed; + +static inline int wsm_set_block_ack_policy(struct cw1200_common *priv, + u8 tx_tid_policy, + u8 rx_tid_policy) +{ + struct wsm_mib_block_ack_policy val = { + .tx_tid = tx_tid_policy, + .rx_tid = rx_tid_policy, + }; + return wsm_write_mib(priv, WSM_MIB_ID_BLOCK_ACK_POLICY, &val, + sizeof(val)); +} + +struct wsm_mib_association_mode { + u8 flags; /* WSM_ASSOCIATION_MODE_... */ + u8 preamble; /* WSM_JOIN_PREAMBLE_... */ + u8 greenfield; /* 1 for greenfield */ + u8 mpdu_start_spacing; + __le32 basic_rate_set; +} __packed; + +static inline int wsm_set_association_mode(struct cw1200_common *priv, + struct wsm_mib_association_mode *arg) +{ + return wsm_write_mib(priv, WSM_MIB_ID_SET_ASSOCIATION_MODE, arg, + sizeof(*arg)); +} + +#define WSM_TX_RATE_POLICY_FLAG_TERMINATE_WHEN_FINISHED BIT(2) +#define WSM_TX_RATE_POLICY_FLAG_COUNT_INITIAL_TRANSMIT BIT(3) +struct wsm_tx_rate_retry_policy { + u8 index; + u8 short_retries; + u8 long_retries; + /* BIT(2) - Terminate retries when Tx rate retry policy + * finishes. + * BIT(3) - Count initial frame transmission as part of + * rate retry counting but not as a retry + * attempt + */ + u8 flags; + u8 rate_recoveries; + u8 reserved[3]; + __le32 rate_count_indices[3]; +} __packed; + +struct wsm_set_tx_rate_retry_policy { + u8 num; + u8 reserved[3]; + struct wsm_tx_rate_retry_policy tbl[8]; +} __packed; + +static inline int wsm_set_tx_rate_retry_policy(struct cw1200_common *priv, + struct wsm_set_tx_rate_retry_policy *arg) +{ + size_t size = 4 + arg->num * sizeof(struct wsm_tx_rate_retry_policy); + return wsm_write_mib(priv, WSM_MIB_ID_SET_TX_RATE_RETRY_POLICY, arg, + size); +} + +/* 4.32 SetEtherTypeDataFrameFilter */ +struct wsm_ether_type_filter_hdr { + u8 num; /* Up to WSM_MAX_FILTER_ELEMENTS */ + u8 reserved[3]; +} __packed; + +struct wsm_ether_type_filter { + u8 action; /* WSM_FILTER_ACTION_XXX */ + u8 reserved; + __le16 type; /* Type of ethernet frame */ +} __packed; + +static inline int wsm_set_ether_type_filter(struct cw1200_common *priv, + struct wsm_ether_type_filter_hdr *arg) +{ + size_t size = sizeof(struct wsm_ether_type_filter_hdr) + + arg->num * sizeof(struct wsm_ether_type_filter); + return wsm_write_mib(priv, WSM_MIB_ID_SET_ETHERTYPE_DATAFRAME_FILTER, + arg, size); +} + +/* 4.33 SetUDPPortDataFrameFilter */ +struct wsm_udp_port_filter_hdr { + u8 num; /* Up to WSM_MAX_FILTER_ELEMENTS */ + u8 reserved[3]; +} __packed; + +struct wsm_udp_port_filter { + u8 action; /* WSM_FILTER_ACTION_XXX */ + u8 type; /* WSM_FILTER_PORT_TYPE_XXX */ + __le16 port; /* Port number */ +} __packed; + +static inline int wsm_set_udp_port_filter(struct cw1200_common *priv, + struct wsm_udp_port_filter_hdr *arg) +{ + size_t size = sizeof(struct wsm_udp_port_filter_hdr) + + arg->num * sizeof(struct wsm_udp_port_filter); + return wsm_write_mib(priv, WSM_MIB_ID_SET_UDPPORT_DATAFRAME_FILTER, + arg, size); +} + +/* Undocumented MIBs: */ +/* 4.35 P2PDeviceInfo */ +#define D11_MAX_SSID_LEN (32) + +struct wsm_p2p_device_type { + __le16 category_id; + u8 oui[4]; + __le16 subcategory_id; +} __packed; + +struct wsm_p2p_device_info { + struct wsm_p2p_device_type primaryDevice; + u8 reserved1[3]; + u8 devname_size; + u8 local_devname[D11_MAX_SSID_LEN]; + u8 reserved2[3]; + u8 num_secdev_supported; + struct wsm_p2p_device_type secdevs[0]; +} __packed; + +/* 4.36 SetWCDMABand - WO */ +struct wsm_cdma_band { + u8 wcdma_band; + u8 reserved[3]; +} __packed; + +/* 4.37 GroupTxSequenceCounter - RO */ +struct wsm_group_tx_seq { + __le32 bits_47_16; + __le16 bits_15_00; + __le16 reserved; +} __packed; + +/* 4.39 SetHtProtection - WO */ +#define WSM_DUAL_CTS_PROT_ENB (1 << 0) +#define WSM_NON_GREENFIELD_STA_PRESENT (1 << 1) +#define WSM_HT_PROT_MODE__NO_PROT (0 << 2) +#define WSM_HT_PROT_MODE__NON_MEMBER (1 << 2) +#define WSM_HT_PROT_MODE__20_MHZ (2 << 2) +#define WSM_HT_PROT_MODE__NON_HT_MIXED (3 << 2) +#define WSM_LSIG_TXOP_PROT_FULL (1 << 4) +#define WSM_LARGE_L_LENGTH_PROT (1 << 5) + +struct wsm_ht_protection { + __le32 flags; +} __packed; + +/* 4.40 GPIO Command - R/W */ +#define WSM_GPIO_COMMAND_SETUP 0 +#define WSM_GPIO_COMMAND_READ 1 +#define WSM_GPIO_COMMAND_WRITE 2 +#define WSM_GPIO_COMMAND_RESET 3 +#define WSM_GPIO_ALL_PINS 0xFF + +struct wsm_gpio_command { + u8 command; + u8 pin; + __le16 config; +} __packed; + +/* 4.41 TSFCounter - RO */ +struct wsm_tsf_counter { + __le64 tsf_counter; +} __packed; + +/* 4.43 Keep alive period */ +struct wsm_keep_alive_period { + __le16 period; + u8 reserved[2]; +} __packed; + +static inline int wsm_keep_alive_period(struct cw1200_common *priv, + int period) +{ + struct wsm_keep_alive_period arg = { + .period = __cpu_to_le16(period), + }; + return wsm_write_mib(priv, WSM_MIB_ID_KEEP_ALIVE_PERIOD, + &arg, sizeof(arg)); +}; + +/* BSSID filtering */ +struct wsm_set_bssid_filtering { + u8 filter; + u8 reserved[3]; +} __packed; + +static inline int wsm_set_bssid_filtering(struct cw1200_common *priv, + bool enabled) +{ + struct wsm_set_bssid_filtering arg = { + .filter = !enabled, + }; + return wsm_write_mib(priv, WSM_MIB_ID_DISABLE_BSSID_FILTER, + &arg, sizeof(arg)); +} + +/* Multicast filtering - 4.5 */ +struct wsm_mib_multicast_filter { + __le32 enable; + __le32 num_addrs; + u8 macaddrs[WSM_MAX_GRP_ADDRTABLE_ENTRIES][ETH_ALEN]; +} __packed; + +static inline int wsm_set_multicast_filter(struct cw1200_common *priv, + struct wsm_mib_multicast_filter *fp) +{ + return wsm_write_mib(priv, WSM_MIB_ID_DOT11_GROUP_ADDRESSES_TABLE, + fp, sizeof(*fp)); +} + +/* ARP IPv4 filtering - 4.10 */ +struct wsm_mib_arp_ipv4_filter { + __le32 enable; + __be32 ipv4addrs[WSM_MAX_ARP_IP_ADDRTABLE_ENTRIES]; +} __packed; + +static inline int wsm_set_arp_ipv4_filter(struct cw1200_common *priv, + struct wsm_mib_arp_ipv4_filter *fp) +{ + return wsm_write_mib(priv, WSM_MIB_ID_ARP_IP_ADDRESSES_TABLE, + fp, sizeof(*fp)); +} + +/* P2P Power Save Mode Info - 4.31 */ +struct wsm_p2p_ps_modeinfo { + u8 opp_ps_ct_window; + u8 count; + u8 reserved; + u8 dtim_count; + __le32 duration; + __le32 interval; + __le32 start_time; +} __packed; + +static inline int wsm_set_p2p_ps_modeinfo(struct cw1200_common *priv, + struct wsm_p2p_ps_modeinfo *mi) +{ + return wsm_write_mib(priv, WSM_MIB_ID_P2P_PS_MODE_INFO, + mi, sizeof(*mi)); +} + +static inline int wsm_get_p2p_ps_modeinfo(struct cw1200_common *priv, + struct wsm_p2p_ps_modeinfo *mi) +{ + return wsm_read_mib(priv, WSM_MIB_ID_P2P_PS_MODE_INFO, + mi, sizeof(*mi)); +} + +/* UseMultiTxConfMessage */ + +static inline int wsm_use_multi_tx_conf(struct cw1200_common *priv, + bool enabled) +{ + __le32 arg = enabled ? __cpu_to_le32(1) : 0; + + return wsm_write_mib(priv, WSM_MIB_USE_MULTI_TX_CONF, + &arg, sizeof(arg)); +} + + +/* 4.26 SetUpasdInformation */ +struct wsm_uapsd_info { + __le16 uapsd_flags; + __le16 min_auto_trigger_interval; + __le16 max_auto_trigger_interval; + __le16 auto_trigger_step; +}; + +static inline int wsm_set_uapsd_info(struct cw1200_common *priv, + struct wsm_uapsd_info *arg) +{ + return wsm_write_mib(priv, WSM_MIB_ID_SET_UAPSD_INFORMATION, + arg, sizeof(*arg)); +} + +/* 4.22 OverrideInternalTxRate */ +struct wsm_override_internal_txrate { + u8 internalTxRate; + u8 nonErpInternalTxRate; + u8 reserved[2]; +} __packed; + +static inline int wsm_set_override_internal_txrate(struct cw1200_common *priv, + struct wsm_override_internal_txrate *arg) +{ + return wsm_write_mib(priv, WSM_MIB_ID_OVERRIDE_INTERNAL_TX_RATE, + arg, sizeof(*arg)); +} + +/* ******************************************************************** */ +/* WSM TX port control */ + +void wsm_lock_tx(struct cw1200_common *priv); +void wsm_lock_tx_async(struct cw1200_common *priv); +bool wsm_flush_tx(struct cw1200_common *priv); +void wsm_unlock_tx(struct cw1200_common *priv); + +/* ******************************************************************** */ +/* WSM / BH API */ + +int wsm_handle_exception(struct cw1200_common *priv, u8 *data, size_t len); +int wsm_handle_rx(struct cw1200_common *priv, u16 id, struct wsm_hdr *wsm, + struct sk_buff **skb_p); + +/* ******************************************************************** */ +/* wsm_buf API */ + +struct wsm_buf { + u8 *begin; + u8 *data; + u8 *end; +}; + +void wsm_buf_init(struct wsm_buf *buf); +void wsm_buf_deinit(struct wsm_buf *buf); + +/* ******************************************************************** */ +/* wsm_cmd API */ + +struct wsm_cmd { + spinlock_t lock; /* Protect structure from multiple access */ + int done; + u8 *ptr; + size_t len; + void *arg; + int ret; + u16 cmd; +}; + +/* ******************************************************************** */ +/* WSM TX buffer access */ + +int wsm_get_tx(struct cw1200_common *priv, u8 **data, + size_t *tx_len, int *burst); +void wsm_txed(struct cw1200_common *priv, u8 *data); + +/* ******************************************************************** */ +/* Queue mapping: WSM <---> linux */ +/* Linux: VO VI BE BK */ +/* WSM: BE BK VI VO */ + +static inline u8 wsm_queue_id_to_linux(u8 queue_id) +{ + static const u8 queue_mapping[] = { + 2, 3, 1, 0 + }; + return queue_mapping[queue_id]; +} + +static inline u8 wsm_queue_id_to_wsm(u8 queue_id) +{ + static const u8 queue_mapping[] = { + 3, 2, 0, 1 + }; + return queue_mapping[queue_id]; +} + +#endif /* CW1200_HWIO_H_INCLUDED */ diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 15920aaa5dd6..f8ab193009cd 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c @@ -6242,8 +6242,6 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, if ((val & 0x0000ff00) != 0) pci_write_config_dword(pci_dev, 0x40, val & 0xffff00ff); - pci_set_power_state(pci_dev, PCI_D0); - if (!ipw2100_hw_is_adapter_in_system(dev)) { printk(KERN_WARNING DRV_NAME "Device not found via register read.\n"); diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c index b37a582ccbe7..866ce6c68405 100644 --- a/drivers/net/wireless/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/iwlegacy/3945-mac.c @@ -3727,7 +3727,8 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * 5. Setup HW Constants * ********************/ /* Device-specific setup */ - if (il3945_hw_set_hw_params(il)) { + err = il3945_hw_set_hw_params(il); + if (err) { IL_ERR("failed to set hw settings\n"); goto out_eeprom_free; } diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c index dc1e6da9976a..c092033945cc 100644 --- a/drivers/net/wireless/iwlegacy/3945.c +++ b/drivers/net/wireless/iwlegacy/3945.c @@ -331,6 +331,19 @@ il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb) return; } + /* + * Firmware will not transmit frame on passive channel, if it not yet + * received some valid frame on that channel. When this error happen + * we have to wait until firmware will unblock itself i.e. when we + * note received beacon or other frame. We unblock queues in + * il3945_pass_packet_to_mac80211 or in il_mac_bss_info_changed. + */ + if (unlikely((status & TX_STATUS_MSK) == TX_STATUS_FAIL_PASSIVE_NO_RX) && + il->iw_mode == NL80211_IFTYPE_STATION) { + il_stop_queues_by_reason(il, IL_STOP_REASON_PASSIVE); + D_INFO("Stopped queues - RX waiting on passive channel\n"); + } + txq->time_stamp = jiffies; info = IEEE80211_SKB_CB(txq->skbs[txq->q.read_ptr]); ieee80211_tx_info_clear_status(info); @@ -488,6 +501,11 @@ il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb, return; } + if (unlikely(test_bit(IL_STOP_REASON_PASSIVE, &il->stop_reason))) { + il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE); + D_INFO("Woke queues - frame received on passive channel\n"); + } + skb = dev_alloc_skb(128); if (!skb) { IL_ERR("dev_alloc_skb failed\n"); diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c index 9a95045c97b6..d287fd2dce42 100644 --- a/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/iwlegacy/4965-mac.c @@ -588,6 +588,11 @@ il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr, return; } + if (unlikely(test_bit(IL_STOP_REASON_PASSIVE, &il->stop_reason))) { + il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE); + D_INFO("Woke queues - frame received on passive channel\n"); + } + /* In case of HW accelerated crypto and bad decryption, drop */ if (!il->cfg->mod_params->sw_crypto && il_set_decrypted_flag(il, hdr, ampdu_status, stats)) @@ -2806,6 +2811,19 @@ il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb) return; } + /* + * Firmware will not transmit frame on passive channel, if it not yet + * received some valid frame on that channel. When this error happen + * we have to wait until firmware will unblock itself i.e. when we + * note received beacon or other frame. We unblock queues in + * il4965_pass_packet_to_mac80211 or in il_mac_bss_info_changed. + */ + if (unlikely((status & TX_STATUS_MSK) == TX_STATUS_FAIL_PASSIVE_NO_RX) && + il->iw_mode == NL80211_IFTYPE_STATION) { + il_stop_queues_by_reason(il, IL_STOP_REASON_PASSIVE); + D_INFO("Stopped queues - RX waiting on passive channel\n"); + } + spin_lock_irqsave(&il->sta_lock, flags); if (txq->sched_retry) { const u32 scd_ssn = il4965_get_scd_ssn(tx_resp); @@ -5741,7 +5759,8 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length) hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION | IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT | - IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS; + IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS | + IEEE80211_HW_SUPPORTS_DYNAMIC_PS; if (il->cfg->sku & IL_SKU_N) hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | diff --git a/drivers/net/wireless/iwlegacy/commands.h b/drivers/net/wireless/iwlegacy/commands.h index 3b6c99400892..048421511988 100644 --- a/drivers/net/wireless/iwlegacy/commands.h +++ b/drivers/net/wireless/iwlegacy/commands.h @@ -1348,14 +1348,6 @@ struct il_rx_mpdu_res_start { #define TX_CMD_SEC_KEY128 0x08 /* - * security overhead sizes - */ -#define WEP_IV_LEN 4 -#define WEP_ICV_LEN 4 -#define CCMP_MIC_LEN 8 -#define TKIP_ICV_LEN 4 - -/* * C_TX = 0x1c (command) */ diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c index e9a3cbc409ae..3195aad440dd 100644 --- a/drivers/net/wireless/iwlegacy/common.c +++ b/drivers/net/wireless/iwlegacy/common.c @@ -5307,6 +5307,17 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, D_MAC80211("BSSID %pM\n", bss_conf->bssid); /* + * On passive channel we wait with blocked queues to see if + * there is traffic on that channel. If no frame will be + * received (what is very unlikely since scan detects AP on + * that channel, but theoretically possible), mac80211 associate + * procedure will time out and mac80211 will call us with NULL + * bssid. We have to unblock queues on such condition. + */ + if (is_zero_ether_addr(bss_conf->bssid)) + il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE); + + /* * If there is currently a HW scan going on in the background, * then we need to cancel it, otherwise sometimes we are not * able to authenticate (FIXME: why ?) diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h index 4caaf52986a4..83f8ed8a5528 100644 --- a/drivers/net/wireless/iwlegacy/common.h +++ b/drivers/net/wireless/iwlegacy/common.h @@ -1299,6 +1299,8 @@ struct il_priv { /* queue refcounts */ #define IL_MAX_HW_QUEUES 32 unsigned long queue_stopped[BITS_TO_LONGS(IL_MAX_HW_QUEUES)]; +#define IL_STOP_REASON_PASSIVE 0 + unsigned long stop_reason; /* for each AC */ atomic_t queue_stop_count[4]; @@ -2257,6 +2259,19 @@ il_set_swq_id(struct il_tx_queue *txq, u8 ac, u8 hwq) } static inline void +_il_wake_queue(struct il_priv *il, u8 ac) +{ + if (atomic_dec_return(&il->queue_stop_count[ac]) <= 0) + ieee80211_wake_queue(il->hw, ac); +} + +static inline void +_il_stop_queue(struct il_priv *il, u8 ac) +{ + if (atomic_inc_return(&il->queue_stop_count[ac]) > 0) + ieee80211_stop_queue(il->hw, ac); +} +static inline void il_wake_queue(struct il_priv *il, struct il_tx_queue *txq) { u8 queue = txq->swq_id; @@ -2264,8 +2279,7 @@ il_wake_queue(struct il_priv *il, struct il_tx_queue *txq) u8 hwq = (queue >> 2) & 0x1f; if (test_and_clear_bit(hwq, il->queue_stopped)) - if (atomic_dec_return(&il->queue_stop_count[ac]) <= 0) - ieee80211_wake_queue(il->hw, ac); + _il_wake_queue(il, ac); } static inline void @@ -2276,8 +2290,27 @@ il_stop_queue(struct il_priv *il, struct il_tx_queue *txq) u8 hwq = (queue >> 2) & 0x1f; if (!test_and_set_bit(hwq, il->queue_stopped)) - if (atomic_inc_return(&il->queue_stop_count[ac]) > 0) - ieee80211_stop_queue(il->hw, ac); + _il_stop_queue(il, ac); +} + +static inline void +il_wake_queues_by_reason(struct il_priv *il, int reason) +{ + u8 ac; + + if (test_and_clear_bit(reason, &il->stop_reason)) + for (ac = 0; ac < 4; ac++) + _il_wake_queue(il, ac); +} + +static inline void +il_stop_queues_by_reason(struct il_priv *il, int reason) +{ + u8 ac; + + if (!test_and_set_bit(reason, &il->stop_reason)) + for (ac = 0; ac < 4; ac++) + _il_stop_queue(il, ac); } #ifdef ieee80211_stop_queue diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile index 3b5613ea458b..f55a758b87f6 100644 --- a/drivers/net/wireless/iwlwifi/Makefile +++ b/drivers/net/wireless/iwlwifi/Makefile @@ -7,14 +7,16 @@ iwlwifi-objs += iwl-notif-wait.o iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o -iwlwifi-objs += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o iwl-7000.o +iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o +iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o + +iwlwifi-objs += $(iwlwifi-m) iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o ccflags-y += -D__CHECK_ENDIAN__ -I$(src) - obj-$(CONFIG_IWLDVM) += dvm/ obj-$(CONFIG_IWLMVM) += mvm/ diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h index 48545ab00311..de2c9514bef6 100644 --- a/drivers/net/wireless/iwlwifi/dvm/agn.h +++ b/drivers/net/wireless/iwlwifi/dvm/agn.h @@ -76,13 +76,16 @@ #define IWL_INVALID_STATION 255 /* device operations */ -extern struct iwl_lib_ops iwl1000_lib; -extern struct iwl_lib_ops iwl2000_lib; -extern struct iwl_lib_ops iwl2030_lib; -extern struct iwl_lib_ops iwl5000_lib; -extern struct iwl_lib_ops iwl5150_lib; -extern struct iwl_lib_ops iwl6000_lib; -extern struct iwl_lib_ops iwl6030_lib; +extern const struct iwl_dvm_cfg iwl_dvm_1000_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_2000_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_105_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_2030_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_5000_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_5150_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_6000_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_6005_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_6050_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_6030_cfg; #define TIME_UNIT 1024 @@ -291,8 +294,8 @@ void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena); static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv) { - return priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist; + return priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist; } #ifdef CONFIG_IWLWIFI_DEBUG diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c index d6c4cf2ad7c5..1b0f0d502568 100644 --- a/drivers/net/wireless/iwlwifi/dvm/calib.c +++ b/drivers/net/wireless/iwlwifi/dvm/calib.c @@ -521,7 +521,7 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv) iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]); - if (priv->cfg->base_params->hd_v2) { + if (priv->lib->hd_v2) { cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] = HD_INA_NON_SQUARE_DET_OFDM_DATA_V2; cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] = @@ -895,7 +895,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv, continue; } - delta_g = (priv->cfg->base_params->chain_noise_scale * + delta_g = (priv->lib->chain_noise_scale * ((s32)average_noise[default_chain] - (s32)average_noise[i])) / 1500; @@ -1051,8 +1051,8 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv) return; /* Analyze signal for disconnected antenna */ - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist) { + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist) { /* Disable disconnected antenna algorithm for advanced bt coex, assuming valid antennas are connected */ data->active_chains = priv->nvm_data->valid_rx_ant; diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h index 95ca026ecc9d..ebdac909f0cd 100644 --- a/drivers/net/wireless/iwlwifi/dvm/commands.h +++ b/drivers/net/wireless/iwlwifi/dvm/commands.h @@ -838,10 +838,6 @@ struct iwl_qosparam_cmd { #define STA_MODIFY_DELBA_TID_MSK 0x10 #define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20 -/* Receiver address (actually, Rx station's index into station table), - * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ -#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) - /* agn */ struct iwl_keyinfo { __le16 key_flags; @@ -1225,14 +1221,6 @@ struct iwl_rx_mpdu_res_start { #define TX_CMD_SEC_KEY128 0x08 /* - * security overhead sizes - */ -#define WEP_IV_LEN 4 -#define WEP_ICV_LEN 4 -#define CCMP_MIC_LEN 8 -#define TKIP_ICV_LEN 4 - -/* * REPLY_TX = 0x1c (command) */ diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h index 71ea77576d22..5cd87f949266 100644 --- a/drivers/net/wireless/iwlwifi/dvm/dev.h +++ b/drivers/net/wireless/iwlwifi/dvm/dev.h @@ -568,16 +568,61 @@ struct iwl_hw_params { const struct iwl_sensitivity_ranges *sens; }; -struct iwl_lib_ops { - /* set hw dependent parameters */ +/** + * struct iwl_dvm_bt_params - DVM specific BT (coex) parameters + * @advanced_bt_coexist: support advanced bt coexist + * @bt_init_traffic_load: specify initial bt traffic load + * @bt_prio_boost: default bt priority boost value + * @agg_time_limit: maximum number of uSec in aggregation + * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode + */ +struct iwl_dvm_bt_params { + bool advanced_bt_coexist; + u8 bt_init_traffic_load; + u32 bt_prio_boost; + u16 agg_time_limit; + bool bt_sco_disable; + bool bt_session_2; +}; + +/** + * struct iwl_dvm_cfg - DVM firmware specific device configuration + * @set_hw_params: set hardware parameters + * @set_channel_switch: send channel switch command + * @nic_config: apply device specific configuration + * @temperature: read temperature + * @adv_thermal_throttle: support advance thermal throttle + * @support_ct_kill_exit: support ct kill exit condition + * @plcp_delta_threshold: plcp error rate threshold used to trigger + * radio tuning when there is a high receiving plcp error rate + * @chain_noise_scale: default chain noise scale used for gain computation + * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up + * @no_idle_support: do not support idle mode + * @bt_params: pointer to BT parameters + * @need_temp_offset_calib: need to perform temperature offset calibration + * @no_xtal_calib: some devices do not need crystal calibration data, + * don't send it to those + * @temp_offset_v2: support v2 of temperature offset calibration + * @adv_pm: advanced power management + */ +struct iwl_dvm_cfg { void (*set_hw_params)(struct iwl_priv *priv); int (*set_channel_switch)(struct iwl_priv *priv, struct ieee80211_channel_switch *ch_switch); - /* device specific configuration */ void (*nic_config)(struct iwl_priv *priv); - - /* temperature */ void (*temperature)(struct iwl_priv *priv); + + const struct iwl_dvm_bt_params *bt_params; + s32 chain_noise_scale; + u8 plcp_delta_threshold; + bool adv_thermal_throttle; + bool support_ct_kill_exit; + bool hd_v2; + bool no_idle_support; + bool need_temp_offset_calib; + bool no_xtal_calib; + bool temp_offset_v2; + bool adv_pm; }; struct iwl_wipan_noa_data { @@ -610,7 +655,7 @@ struct iwl_priv { struct device *dev; /* for debug prints only */ const struct iwl_cfg *cfg; const struct iwl_fw *fw; - const struct iwl_lib_ops *lib; + const struct iwl_dvm_cfg *lib; unsigned long status; spinlock_t sta_lock; @@ -870,6 +915,9 @@ struct iwl_priv { __le64 replay_ctr; __le16 last_seq_ctl; bool have_rekey_data; +#ifdef CONFIG_PM_SLEEP + struct wiphy_wowlan_support wowlan_support; +#endif /* device_pointers: pointers to ucode event tables */ struct { diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c index c48907c8ab43..352c6cb7b4f1 100644 --- a/drivers/net/wireless/iwlwifi/dvm/devices.c +++ b/drivers/net/wireless/iwlwifi/dvm/devices.c @@ -174,10 +174,13 @@ static void iwl1000_hw_set_hw_params(struct iwl_priv *priv) priv->hw_params.sens = &iwl1000_sensitivity; } -struct iwl_lib_ops iwl1000_lib = { +const struct iwl_dvm_cfg iwl_dvm_1000_cfg = { .set_hw_params = iwl1000_hw_set_hw_params, .nic_config = iwl1000_nic_config, .temperature = iwlagn_temperature, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, }; @@ -232,16 +235,56 @@ static void iwl2000_hw_set_hw_params(struct iwl_priv *priv) priv->hw_params.sens = &iwl2000_sensitivity; } -struct iwl_lib_ops iwl2000_lib = { +const struct iwl_dvm_cfg iwl_dvm_2000_cfg = { .set_hw_params = iwl2000_hw_set_hw_params, .nic_config = iwl2000_nic_config, .temperature = iwlagn_temperature, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .hd_v2 = true, + .need_temp_offset_calib = true, + .temp_offset_v2 = true, }; -struct iwl_lib_ops iwl2030_lib = { +const struct iwl_dvm_cfg iwl_dvm_105_cfg = { .set_hw_params = iwl2000_hw_set_hw_params, .nic_config = iwl2000_nic_config, .temperature = iwlagn_temperature, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .hd_v2 = true, + .need_temp_offset_calib = true, + .temp_offset_v2 = true, + .adv_pm = true, +}; + +static const struct iwl_dvm_bt_params iwl2030_bt_params = { + /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ + .advanced_bt_coexist = true, + .agg_time_limit = BT_AGG_THRESHOLD_DEF, + .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE, + .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT32, + .bt_sco_disable = true, + .bt_session_2 = true, +}; + +const struct iwl_dvm_cfg iwl_dvm_2030_cfg = { + .set_hw_params = iwl2000_hw_set_hw_params, + .nic_config = iwl2000_nic_config, + .temperature = iwlagn_temperature, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .hd_v2 = true, + .bt_params = &iwl2030_bt_params, + .need_temp_offset_calib = true, + .temp_offset_v2 = true, + .adv_pm = true, }; /* @@ -420,16 +463,23 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv, return iwl_dvm_send_cmd(priv, &hcmd); } -struct iwl_lib_ops iwl5000_lib = { +const struct iwl_dvm_cfg iwl_dvm_5000_cfg = { .set_hw_params = iwl5000_hw_set_hw_params, .set_channel_switch = iwl5000_hw_channel_switch, .temperature = iwlagn_temperature, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .no_idle_support = true, }; -struct iwl_lib_ops iwl5150_lib = { +const struct iwl_dvm_cfg iwl_dvm_5150_cfg = { .set_hw_params = iwl5150_hw_set_hw_params, .set_channel_switch = iwl5000_hw_channel_switch, .temperature = iwl5150_temperature, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .no_idle_support = true, + .no_xtal_calib = true, }; @@ -584,16 +634,59 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv, return err; } -struct iwl_lib_ops iwl6000_lib = { +const struct iwl_dvm_cfg iwl_dvm_6000_cfg = { .set_hw_params = iwl6000_hw_set_hw_params, .set_channel_switch = iwl6000_hw_channel_switch, .nic_config = iwl6000_nic_config, .temperature = iwlagn_temperature, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, +}; + +const struct iwl_dvm_cfg iwl_dvm_6005_cfg = { + .set_hw_params = iwl6000_hw_set_hw_params, + .set_channel_switch = iwl6000_hw_channel_switch, + .nic_config = iwl6000_nic_config, + .temperature = iwlagn_temperature, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .need_temp_offset_calib = true, +}; + +const struct iwl_dvm_cfg iwl_dvm_6050_cfg = { + .set_hw_params = iwl6000_hw_set_hw_params, + .set_channel_switch = iwl6000_hw_channel_switch, + .nic_config = iwl6000_nic_config, + .temperature = iwlagn_temperature, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1500, +}; + +static const struct iwl_dvm_bt_params iwl6000_bt_params = { + /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ + .advanced_bt_coexist = true, + .agg_time_limit = BT_AGG_THRESHOLD_DEF, + .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE, + .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT, + .bt_sco_disable = true, }; -struct iwl_lib_ops iwl6030_lib = { +const struct iwl_dvm_cfg iwl_dvm_6030_cfg = { .set_hw_params = iwl6000_hw_set_hw_params, .set_channel_switch = iwl6000_hw_channel_switch, .nic_config = iwl6000_nic_config, .temperature = iwlagn_temperature, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .bt_params = &iwl6000_bt_params, + .need_temp_offset_calib = true, + .adv_pm = true, }; diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c index 54f553380aa8..9879550a0fea 100644 --- a/drivers/net/wireless/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/iwlwifi/dvm/lib.c @@ -254,23 +254,23 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv) BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) != sizeof(basic.bt3_lookup_table)); - if (priv->cfg->bt_params) { + if (priv->lib->bt_params) { /* * newer generation of devices (2000 series and newer) * use the version 2 of the bt command * we need to make sure sending the host command * with correct data structure to avoid uCode assert */ - if (priv->cfg->bt_params->bt_session_2) { + if (priv->lib->bt_params->bt_session_2) { bt_cmd_v2.prio_boost = cpu_to_le32( - priv->cfg->bt_params->bt_prio_boost); + priv->lib->bt_params->bt_prio_boost); bt_cmd_v2.tx_prio_boost = 0; bt_cmd_v2.rx_prio_boost = 0; } else { /* older version only has 8 bits */ - WARN_ON(priv->cfg->bt_params->bt_prio_boost & ~0xFF); + WARN_ON(priv->lib->bt_params->bt_prio_boost & ~0xFF); bt_cmd_v1.prio_boost = - priv->cfg->bt_params->bt_prio_boost; + priv->lib->bt_params->bt_prio_boost; bt_cmd_v1.tx_prio_boost = 0; bt_cmd_v1.rx_prio_boost = 0; } @@ -330,7 +330,7 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv) priv->bt_full_concurrent ? "full concurrency" : "3-wire"); - if (priv->cfg->bt_params->bt_session_2) { + if (priv->lib->bt_params->bt_session_2) { memcpy(&bt_cmd_v2.basic, &basic, sizeof(basic)); ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG, @@ -758,8 +758,8 @@ static bool is_single_rx_stream(struct iwl_priv *priv) */ static int iwl_get_active_rx_chain_count(struct iwl_priv *priv) { - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist && + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist && (priv->bt_full_concurrent || priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) { /* @@ -830,8 +830,8 @@ void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx) else active_chains = priv->nvm_data->valid_rx_ant; - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist && + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist && (priv->bt_full_concurrent || priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) { /* diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index cab23af0be9e..eef64bb854f7 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c @@ -208,20 +208,21 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, priv->trans->ops->d3_suspend && priv->trans->ops->d3_resume && device_can_wakeup(priv->trans->dev)) { - hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | - WIPHY_WOWLAN_DISCONNECT | - WIPHY_WOWLAN_EAP_IDENTITY_REQ | - WIPHY_WOWLAN_RFKILL_RELEASE; + priv->wowlan_support.flags = WIPHY_WOWLAN_MAGIC_PKT | + WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_EAP_IDENTITY_REQ | + WIPHY_WOWLAN_RFKILL_RELEASE; if (!iwlwifi_mod_params.sw_crypto) - hw->wiphy->wowlan.flags |= + priv->wowlan_support.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | WIPHY_WOWLAN_GTK_REKEY_FAILURE; - hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS; - hw->wiphy->wowlan.pattern_min_len = + priv->wowlan_support.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS; + priv->wowlan_support.pattern_min_len = IWLAGN_WOWLAN_MIN_PATTERN_LEN; - hw->wiphy->wowlan.pattern_max_len = + priv->wowlan_support.pattern_max_len = IWLAGN_WOWLAN_MAX_PATTERN_LEN; + hw->wiphy->wowlan = &priv->wowlan_support; } #endif @@ -426,7 +427,11 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw, if (ret) goto error; - iwl_trans_d3_suspend(priv->trans); + /* let the ucode operate on its own */ + iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); + + iwl_trans_d3_suspend(priv->trans, false); goto out; @@ -500,7 +505,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw) /* we'll clear ctx->vif during iwlagn_prepare_restart() */ vif = ctx->vif; - ret = iwl_trans_d3_resume(priv->trans, &d3_status); + ret = iwl_trans_d3_resume(priv->trans, &d3_status, false); if (ret) goto out_unlock; @@ -509,6 +514,10 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw) goto out_unlock; } + /* uCode is no longer operating by itself */ + iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); + base = priv->device_pointers.error_event_table; if (!iwlagn_hw_valid_rtc_data_addr(base)) { IWL_WARN(priv, "Invalid error table during resume!\n"); @@ -1276,8 +1285,8 @@ static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw, IWL_DEBUG_MAC80211(priv, "enter\n"); mutex_lock(&priv->mutex); - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist) { + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist) { if (rssi_event == RSSI_EVENT_LOW) priv->bt_enable_pspoll = true; else if (rssi_event == RSSI_EVENT_HIGH) @@ -1387,7 +1396,7 @@ static int iwl_setup_interface(struct iwl_priv *priv, return err; } - if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist && + if (priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist && vif->type == NL80211_IFTYPE_ADHOC) { /* * pretend to have high BT traffic as long as we diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c index 74d7572e7091..7aa9c8dc33ef 100644 --- a/drivers/net/wireless/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/iwlwifi/dvm/main.c @@ -615,7 +615,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv) priv->thermal_throttle.ct_kill_toggle = false; - if (priv->cfg->base_params->support_ct_kill_exit) { + if (priv->lib->support_ct_kill_exit) { adv_cmd.critical_temperature_enter = cpu_to_le32(priv->hw_params.ct_kill_threshold); adv_cmd.critical_temperature_exit = @@ -732,10 +732,10 @@ int iwl_alive_start(struct iwl_priv *priv) } /* download priority table before any calibration request */ - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist) { + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist) { /* Configure Bluetooth device coexistence support */ - if (priv->cfg->bt_params->bt_sco_disable) + if (priv->lib->bt_params->bt_sco_disable) priv->bt_enable_pspoll = false; else priv->bt_enable_pspoll = true; @@ -873,9 +873,9 @@ void iwl_down(struct iwl_priv *priv) priv->bt_status = 0; priv->cur_rssi_ctx = NULL; priv->bt_is_sco = 0; - if (priv->cfg->bt_params) + if (priv->lib->bt_params) priv->bt_traffic_load = - priv->cfg->bt_params->bt_init_traffic_load; + priv->lib->bt_params->bt_init_traffic_load; else priv->bt_traffic_load = 0; priv->bt_full_concurrent = false; @@ -1058,7 +1058,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv) iwl_setup_scan_deferred_work(priv); - if (priv->cfg->bt_params) + if (priv->lib->bt_params) iwlagn_bt_setup_deferred_work(priv); init_timer(&priv->statistics_periodic); @@ -1072,7 +1072,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv) void iwl_cancel_deferred_work(struct iwl_priv *priv) { - if (priv->cfg->bt_params) + if (priv->lib->bt_params) iwlagn_bt_cancel_deferred_work(priv); cancel_work_sync(&priv->run_time_calib_work); @@ -1098,8 +1098,7 @@ static int iwl_init_drv(struct iwl_priv *priv) priv->band = IEEE80211_BAND_2GHZ; - priv->plcp_delta_threshold = - priv->cfg->base_params->plcp_delta_threshold; + priv->plcp_delta_threshold = priv->lib->plcp_delta_threshold; priv->iw_mode = NL80211_IFTYPE_STATION; priv->current_ht_config.smps = IEEE80211_SMPS_STATIC; @@ -1116,8 +1115,8 @@ static int iwl_init_drv(struct iwl_priv *priv) iwl_init_scan_params(priv); /* init bt coex */ - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist) { + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist) { priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT; priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT; priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK; @@ -1264,31 +1263,37 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, switch (priv->cfg->device_family) { case IWL_DEVICE_FAMILY_1000: case IWL_DEVICE_FAMILY_100: - priv->lib = &iwl1000_lib; + priv->lib = &iwl_dvm_1000_cfg; break; case IWL_DEVICE_FAMILY_2000: + priv->lib = &iwl_dvm_2000_cfg; + break; case IWL_DEVICE_FAMILY_105: - priv->lib = &iwl2000_lib; + priv->lib = &iwl_dvm_105_cfg; break; case IWL_DEVICE_FAMILY_2030: case IWL_DEVICE_FAMILY_135: - priv->lib = &iwl2030_lib; + priv->lib = &iwl_dvm_2030_cfg; break; case IWL_DEVICE_FAMILY_5000: - priv->lib = &iwl5000_lib; + priv->lib = &iwl_dvm_5000_cfg; break; case IWL_DEVICE_FAMILY_5150: - priv->lib = &iwl5150_lib; + priv->lib = &iwl_dvm_5150_cfg; break; case IWL_DEVICE_FAMILY_6000: - case IWL_DEVICE_FAMILY_6005: case IWL_DEVICE_FAMILY_6000i: + priv->lib = &iwl_dvm_6000_cfg; + break; + case IWL_DEVICE_FAMILY_6005: + priv->lib = &iwl_dvm_6005_cfg; + break; case IWL_DEVICE_FAMILY_6050: case IWL_DEVICE_FAMILY_6150: - priv->lib = &iwl6000_lib; + priv->lib = &iwl_dvm_6050_cfg; break; case IWL_DEVICE_FAMILY_6030: - priv->lib = &iwl6030_lib; + priv->lib = &iwl_dvm_6030_cfg; break; default: break; @@ -1854,14 +1859,9 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log, return pos; } -#ifdef CONFIG_IWLWIFI_DEBUG if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS)) && !full_log) size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; -#else - size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) - ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; -#endif IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n", size); @@ -1905,10 +1905,8 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand) unsigned int reload_msec; unsigned long reload_jiffies; -#ifdef CONFIG_IWLWIFI_DEBUG if (iwl_have_debug_level(IWL_DL_FW_ERRORS)) iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS); -#endif /* uCode is no longer loaded. */ priv->ucode_loaded = false; diff --git a/drivers/net/wireless/iwlwifi/dvm/power.c b/drivers/net/wireless/iwlwifi/dvm/power.c index bd69018d07a9..77cb59712235 100644 --- a/drivers/net/wireless/iwlwifi/dvm/power.c +++ b/drivers/net/wireless/iwlwifi/dvm/power.c @@ -163,7 +163,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv, u8 skip; u32 slp_itrvl; - if (priv->cfg->adv_pm) { + if (priv->lib->adv_pm) { table = apm_range_2; if (period <= IWL_DTIM_RANGE_1_MAX) table = apm_range_1; @@ -217,7 +217,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv, cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; if (iwl_advanced_bt_coexist(priv)) { - if (!priv->cfg->bt_params->bt_sco_disable) + if (!priv->lib->bt_params->bt_sco_disable) cmd->flags |= IWL_POWER_BT_SCO_ENA; else cmd->flags &= ~IWL_POWER_BT_SCO_ENA; @@ -293,7 +293,7 @@ static void iwl_power_build_cmd(struct iwl_priv *priv, if (priv->wowlan) iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper); - else if (!priv->cfg->base_params->no_idle_support && + else if (!priv->lib->no_idle_support && priv->hw->conf.flags & IEEE80211_CONF_IDLE) iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20); else if (iwl_tt_is_low_power_state(priv)) { diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c index 10fbb176cc8e..8fe76dc4f373 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/iwlwifi/dvm/rs.c @@ -1088,7 +1088,7 @@ done: (priv->tm_fixed_rate != lq_sta->dbg_fixed_rate)) rs_program_fix_rate(priv, lq_sta); #endif - if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist) + if (priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist) rs_bt_update_lq(priv, ctx, lq_sta); } @@ -3064,11 +3064,11 @@ static void rs_fill_link_cmd(struct iwl_priv *priv, * overwrite if needed, pass aggregation time limit * to uCode in uSec */ - if (priv && priv->cfg->bt_params && - priv->cfg->bt_params->agg_time_limit && + if (priv && priv->lib->bt_params && + priv->lib->bt_params->agg_time_limit && priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) lq_cmd->agg_params.agg_time_limit = - cpu_to_le16(priv->cfg->bt_params->agg_time_limit); + cpu_to_le16(priv->lib->bt_params->agg_time_limit); } static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c index a4eed2055fdb..2f3fd160ab44 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rx.c +++ b/drivers/net/wireless/iwlwifi/dvm/rx.c @@ -1102,7 +1102,7 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv) iwl_notification_wait_init(&priv->notif_wait); /* Set up BT Rx handlers */ - if (priv->cfg->bt_params) + if (priv->lib->bt_params) iwlagn_bt_rx_handler_setup(priv); } diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c index d69b55866714..8c686a5b90ac 100644 --- a/drivers/net/wireless/iwlwifi/dvm/scan.c +++ b/drivers/net/wireless/iwlwifi/dvm/scan.c @@ -801,8 +801,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) * Internal scans are passive, so we can indiscriminately set * the BT ignore flag on 2.4 GHz since it applies to TX only. */ - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist) + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist) scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT; break; case IEEE80211_BAND_5GHZ: @@ -844,8 +844,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) band = priv->scan_band; if (band == IEEE80211_BAND_2GHZ && - priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist) { + priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist) { /* transmit 2.4 GHz probes only on first antenna */ scan_tx_antennas = first_antenna(scan_tx_antennas); } @@ -873,8 +873,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) rx_ant = first_antenna(active_chains); } - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist && + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist && priv->bt_full_concurrent) { /* operated as 1x1 in full concurrency mode */ rx_ant = first_antenna(rx_ant); diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.c b/drivers/net/wireless/iwlwifi/dvm/tt.c index 03f9bc01c0cc..fbeee081ee2f 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tt.c +++ b/drivers/net/wireless/iwlwifi/dvm/tt.c @@ -627,7 +627,7 @@ void iwl_tt_initialize(struct iwl_priv *priv) INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter); INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit); - if (priv->cfg->base_params->adv_thermal_throttle) { + if (priv->lib->adv_thermal_throttle) { IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n"); tt->restriction = kcalloc(IWL_TI_STATE_MAX, sizeof(struct iwl_tt_restriction), diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c index a900aaf47790..353a053b4eb1 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c @@ -83,8 +83,8 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, else if (ieee80211_is_back_req(fc)) tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; else if (info->band == IEEE80211_BAND_2GHZ && - priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist && + priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist && (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc) || skb->protocol == cpu_to_be16(ETH_P_PAE))) @@ -202,8 +202,8 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, rate_flags |= RATE_MCS_CCK_MSK; /* Set up antennas */ - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist && + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist && priv->bt_full_concurrent) { /* operated as 1x1 in full concurrency mode */ priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, @@ -986,8 +986,8 @@ static void iwl_rx_reply_tx_agg(struct iwl_priv *priv, * notification again. */ if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 && - priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist) { + priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist) { IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n"); } diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c index 0a1cdc5e856b..86270b69cd02 100644 --- a/drivers/net/wireless/iwlwifi/dvm/ucode.c +++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c @@ -132,8 +132,8 @@ int iwl_init_alive_start(struct iwl_priv *priv) { int ret; - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist) { + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist) { /* * Tell uCode we are ready to perform calibration * need to perform this before any calibration @@ -155,8 +155,8 @@ int iwl_init_alive_start(struct iwl_priv *priv) * temperature offset calibration is only needed for runtime ucode, * so prepare the value now. */ - if (priv->cfg->need_temp_offset_calib) { - if (priv->cfg->temp_offset_v2) + if (priv->lib->need_temp_offset_calib) { + if (priv->lib->temp_offset_v2) return iwl_set_temperature_offset_calib_v2(priv); else return iwl_set_temperature_offset_calib(priv); @@ -277,7 +277,7 @@ static int iwl_alive_notify(struct iwl_priv *priv) if (ret) return ret; - if (!priv->cfg->no_xtal_calib) { + if (!priv->lib->no_xtal_calib) { ret = iwl_set_Xtal_calib(priv); if (ret) return ret; diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c index c080ae3070b2..0d2afe098afc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c @@ -60,9 +60,6 @@ static const struct iwl_base_params iwl1000_base_params = { .max_ll_items = OTP_MAX_LL_ITEMS_1000, .shadow_ram_support = false, .led_compensation = 51, - .support_ct_kill_exit = true, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, - .chain_noise_scale = 1000, .wd_timeout = IWL_WATCHDOG_DISABLED, .max_event_log_size = 128, }; diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c index a6ddd2f9fba0..c727ec7c90a6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-2000.c +++ b/drivers/net/wireless/iwlwifi/iwl-2000.c @@ -72,14 +72,9 @@ static const struct iwl_base_params iwl2000_base_params = { .max_ll_items = OTP_MAX_LL_ITEMS_2x00, .shadow_ram_support = true, .led_compensation = 51, - .adv_thermal_throttle = true, - .support_ct_kill_exit = true, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, - .chain_noise_scale = 1000, .wd_timeout = IWL_DEF_WD_TIMEOUT, .max_event_log_size = 512, .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ - .hd_v2 = true, }; @@ -90,14 +85,9 @@ static const struct iwl_base_params iwl2030_base_params = { .max_ll_items = OTP_MAX_LL_ITEMS_2x00, .shadow_ram_support = true, .led_compensation = 57, - .adv_thermal_throttle = true, - .support_ct_kill_exit = true, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, - .chain_noise_scale = 1000, .wd_timeout = IWL_LONG_WD_TIMEOUT, .max_event_log_size = 512, .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ - .hd_v2 = true, }; static const struct iwl_ht_params iwl2000_ht_params = { @@ -106,16 +96,6 @@ static const struct iwl_ht_params iwl2000_ht_params = { .ht40_bands = BIT(IEEE80211_BAND_2GHZ), }; -static const struct iwl_bt_params iwl2030_bt_params = { - /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ - .advanced_bt_coexist = true, - .agg_time_limit = BT_AGG_THRESHOLD_DEF, - .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE, - .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT32, - .bt_sco_disable = true, - .bt_session_2 = true, -}; - static const struct iwl_eeprom_params iwl20x0_eeprom_params = { .regulatory_bands = { EEPROM_REG_BAND_1_CHANNELS, @@ -137,12 +117,10 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = { .device_family = IWL_DEVICE_FAMILY_2000, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ - .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ - .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ + .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ + .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ .base_params = &iwl2000_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ - .need_temp_offset_calib = true, \ - .temp_offset_v2 = true, \ .led_mode = IWL_LED_RF_STATE const struct iwl_cfg iwl2000_2bgn_cfg = { @@ -168,12 +146,8 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = { .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ .base_params = &iwl2030_base_params, \ - .bt_params = &iwl2030_bt_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ - .need_temp_offset_calib = true, \ - .temp_offset_v2 = true, \ - .led_mode = IWL_LED_RF_STATE, \ - .adv_pm = true + .led_mode = IWL_LED_RF_STATE const struct iwl_cfg iwl2030_2bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", @@ -193,10 +167,7 @@ const struct iwl_cfg iwl2030_2bgn_cfg = { .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ .base_params = &iwl2000_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ - .need_temp_offset_calib = true, \ - .temp_offset_v2 = true, \ .led_mode = IWL_LED_RF_STATE, \ - .adv_pm = true, \ .rx_with_siso_diversity = true const struct iwl_cfg iwl105_bgn_cfg = { @@ -222,12 +193,8 @@ const struct iwl_cfg iwl105_bgn_d_cfg = { .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ .base_params = &iwl2030_base_params, \ - .bt_params = &iwl2030_bt_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ - .need_temp_offset_calib = true, \ - .temp_offset_v2 = true, \ .led_mode = IWL_LED_RF_STATE, \ - .adv_pm = true, \ .rx_with_siso_diversity = true const struct iwl_cfg iwl135_bgn_cfg = { diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index 403f3f224bf6..ecc01e1a61a1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -59,11 +59,8 @@ static const struct iwl_base_params iwl5000_base_params = { .num_of_queues = IWLAGN_NUM_QUEUES, .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, .led_compensation = 51, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, - .chain_noise_scale = 1000, .wd_timeout = IWL_WATCHDOG_DISABLED, .max_event_log_size = 512, - .no_idle_support = true, }; static const struct iwl_ht_params iwl5000_ht_params = { @@ -159,7 +156,6 @@ const struct iwl_cfg iwl5350_agn_cfg = { .nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION, \ .base_params = &iwl5000_base_params, \ .eeprom_params = &iwl5000_eeprom_params, \ - .no_xtal_calib = true, \ .led_mode = IWL_LED_BLINK, \ .internal_wimax_coex = true diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index b5ab8d1bcac0..30d45e2fc193 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c @@ -82,10 +82,6 @@ static const struct iwl_base_params iwl6000_base_params = { .max_ll_items = OTP_MAX_LL_ITEMS_6x00, .shadow_ram_support = true, .led_compensation = 51, - .adv_thermal_throttle = true, - .support_ct_kill_exit = true, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, - .chain_noise_scale = 1000, .wd_timeout = IWL_DEF_WD_TIMEOUT, .max_event_log_size = 512, .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ @@ -98,10 +94,6 @@ static const struct iwl_base_params iwl6050_base_params = { .max_ll_items = OTP_MAX_LL_ITEMS_6x50, .shadow_ram_support = true, .led_compensation = 51, - .adv_thermal_throttle = true, - .support_ct_kill_exit = true, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, - .chain_noise_scale = 1500, .wd_timeout = IWL_DEF_WD_TIMEOUT, .max_event_log_size = 1024, .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ @@ -114,10 +106,6 @@ static const struct iwl_base_params iwl6000_g2_base_params = { .max_ll_items = OTP_MAX_LL_ITEMS_6x00, .shadow_ram_support = true, .led_compensation = 57, - .adv_thermal_throttle = true, - .support_ct_kill_exit = true, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, - .chain_noise_scale = 1000, .wd_timeout = IWL_LONG_WD_TIMEOUT, .max_event_log_size = 512, .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ @@ -129,15 +117,6 @@ static const struct iwl_ht_params iwl6000_ht_params = { .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), }; -static const struct iwl_bt_params iwl6000_bt_params = { - /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ - .advanced_bt_coexist = true, - .agg_time_limit = BT_AGG_THRESHOLD_DEF, - .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE, - .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT, - .bt_sco_disable = true, -}; - static const struct iwl_eeprom_params iwl6000_eeprom_params = { .regulatory_bands = { EEPROM_REG_BAND_1_CHANNELS, @@ -163,7 +142,6 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = { .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ .base_params = &iwl6000_g2_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ - .need_temp_offset_calib = true, \ .led_mode = IWL_LED_RF_STATE const struct iwl_cfg iwl6005_2agn_cfg = { @@ -217,11 +195,8 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = { .nvm_ver = EEPROM_6030_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ .base_params = &iwl6000_g2_base_params, \ - .bt_params = &iwl6000_bt_params, \ .eeprom_params = &iwl6000_eeprom_params, \ - .need_temp_offset_calib = true, \ - .led_mode = IWL_LED_RF_STATE, \ - .adv_pm = true \ + .led_mode = IWL_LED_RF_STATE const struct iwl_cfg iwl6030_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN", @@ -256,11 +231,8 @@ const struct iwl_cfg iwl6030_2bg_cfg = { .nvm_ver = EEPROM_6030_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ .base_params = &iwl6000_g2_base_params, \ - .bt_params = &iwl6000_bt_params, \ .eeprom_params = &iwl6000_eeprom_params, \ - .need_temp_offset_calib = true, \ - .led_mode = IWL_LED_RF_STATE, \ - .adv_pm = true + .led_mode = IWL_LED_RF_STATE const struct iwl_cfg iwl6035_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index 50263e87fe15..d4f3b4864ab1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -96,13 +96,9 @@ static const struct iwl_base_params iwl7000_base_params = { .pll_cfg_val = 0, .shadow_ram_support = true, .led_compensation = 57, - .adv_thermal_throttle = true, - .support_ct_kill_exit = true, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, - .chain_noise_scale = 1000, .wd_timeout = IWL_LONG_WD_TIMEOUT, .max_event_log_size = 512, - .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ + .shadow_reg_enable = true, }; static const struct iwl_ht_params iwl7000_ht_params = { @@ -118,14 +114,11 @@ static const struct iwl_ht_params iwl7000_ht_params = { .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .base_params = &iwl7000_base_params, \ - /* TODO: .bt_params? */ \ - .need_temp_offset_calib = true, \ - .led_mode = IWL_LED_RF_STATE, \ - .adv_pm = true \ + .led_mode = IWL_LED_RF_STATE const struct iwl_cfg iwl7260_2ac_cfg = { - .name = "Intel(R) Dual Band Wireless AC7260", + .name = "Intel(R) Dual Band Wireless AC 7260", .fw_name_pre = IWL7260_FW_PRE, IWL_DEVICE_7000, .ht_params = &iwl7000_ht_params, @@ -133,8 +126,44 @@ const struct iwl_cfg iwl7260_2ac_cfg = { .nvm_calib_ver = IWL7260_TX_POWER_VERSION, }; -const struct iwl_cfg iwl3160_ac_cfg = { - .name = "Intel(R) Dual Band Wireless AC3160", +const struct iwl_cfg iwl7260_2n_cfg = { + .name = "Intel(R) Dual Band Wireless N 7260", + .fw_name_pre = IWL7260_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7000_ht_params, + .nvm_ver = IWL7260_NVM_VERSION, + .nvm_calib_ver = IWL7260_TX_POWER_VERSION, +}; + +const struct iwl_cfg iwl7260_n_cfg = { + .name = "Intel(R) Wireless N 7260", + .fw_name_pre = IWL7260_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7000_ht_params, + .nvm_ver = IWL7260_NVM_VERSION, + .nvm_calib_ver = IWL7260_TX_POWER_VERSION, +}; + +const struct iwl_cfg iwl3160_2ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC 3160", + .fw_name_pre = IWL3160_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7000_ht_params, + .nvm_ver = IWL3160_NVM_VERSION, + .nvm_calib_ver = IWL3160_TX_POWER_VERSION, +}; + +const struct iwl_cfg iwl3160_2n_cfg = { + .name = "Intel(R) Dual Band Wireless N 3160", + .fw_name_pre = IWL3160_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7000_ht_params, + .nvm_ver = IWL3160_NVM_VERSION, + .nvm_calib_ver = IWL3160_TX_POWER_VERSION, +}; + +const struct iwl_cfg iwl3160_n_cfg = { + .name = "Intel(R) Wireless N 3160", .fw_name_pre = IWL3160_FW_PRE, IWL_DEVICE_7000, .ht_params = &iwl7000_ht_params, diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h index c38aa8f77554..0189b9050f22 100644 --- a/drivers/net/wireless/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/iwlwifi/iwl-config.h @@ -136,17 +136,9 @@ enum iwl_led_mode { * @led_compensation: compensate on the led on/off time per HW according * to the deviation to achieve the desired led frequency. * The detail algorithm is described in iwl-led.c - * @chain_noise_num_beacons: number of beacons used to compute chain noise - * @adv_thermal_throttle: support advance thermal throttle - * @support_ct_kill_exit: support ct kill exit condition - * @plcp_delta_threshold: plcp error rate threshold used to trigger - * radio tuning when there is a high receiving plcp error rate - * @chain_noise_scale: default chain noise scale used for gain computation * @wd_timeout: TX queues watchdog timeout * @max_event_log_size: size of event log buffer size for ucode event logging * @shadow_reg_enable: HW shadow register support - * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up - * @no_idle_support: do not support idle mode */ struct iwl_base_params { int eeprom_size; @@ -157,31 +149,9 @@ struct iwl_base_params { const u16 max_ll_items; const bool shadow_ram_support; u16 led_compensation; - bool adv_thermal_throttle; - bool support_ct_kill_exit; - u8 plcp_delta_threshold; - s32 chain_noise_scale; unsigned int wd_timeout; u32 max_event_log_size; const bool shadow_reg_enable; - const bool hd_v2; - const bool no_idle_support; -}; - -/* - * @advanced_bt_coexist: support advanced bt coexist - * @bt_init_traffic_load: specify initial bt traffic load - * @bt_prio_boost: default bt priority boost value - * @agg_time_limit: maximum number of uSec in aggregation - * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode - */ -struct iwl_bt_params { - bool advanced_bt_coexist; - u8 bt_init_traffic_load; - u32 bt_prio_boost; - u16 agg_time_limit; - bool bt_sco_disable; - bool bt_session_2; }; /* @@ -231,16 +201,10 @@ struct iwl_eeprom_params { * @nvm_calib_ver: NVM calibration version * @lib: pointer to the lib ops * @base_params: pointer to basic parameters - * @ht_params: point to ht patameters - * @bt_params: pointer to bt parameters - * @need_temp_offset_calib: need to perform temperature offset calibration - * @no_xtal_calib: some devices do not need crystal calibration data, - * don't send it to those + * @ht_params: point to ht parameters * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) - * @adv_pm: advance power management * @rx_with_siso_diversity: 1x1 device with rx antenna diversity * @internal_wimax_coex: internal wifi/wimax combo device - * @temp_offset_v2: support v2 of temperature offset calibration * * We enable the driver to be backward compatible wrt. hardware features. * API differences in uCode shouldn't be handled here but through TLVs @@ -264,20 +228,16 @@ struct iwl_cfg { const struct iwl_base_params *base_params; /* params likely to change within a device family */ const struct iwl_ht_params *ht_params; - const struct iwl_bt_params *bt_params; const struct iwl_eeprom_params *eeprom_params; - const bool need_temp_offset_calib; /* if used set to true */ - const bool no_xtal_calib; enum iwl_led_mode led_mode; - const bool adv_pm; const bool rx_with_siso_diversity; const bool internal_wimax_coex; - const bool temp_offset_v2; }; /* * This list declares the config structures for all devices. */ +#if IS_ENABLED(CONFIG_IWLDVM) extern const struct iwl_cfg iwl5300_agn_cfg; extern const struct iwl_cfg iwl5100_agn_cfg; extern const struct iwl_cfg iwl5350_agn_cfg; @@ -319,7 +279,14 @@ extern const struct iwl_cfg iwl6035_2agn_cfg; extern const struct iwl_cfg iwl105_bgn_cfg; extern const struct iwl_cfg iwl105_bgn_d_cfg; extern const struct iwl_cfg iwl135_bgn_cfg; +#endif /* CONFIG_IWLDVM */ +#if IS_ENABLED(CONFIG_IWLMVM) extern const struct iwl_cfg iwl7260_2ac_cfg; -extern const struct iwl_cfg iwl3160_ac_cfg; +extern const struct iwl_cfg iwl7260_2n_cfg; +extern const struct iwl_cfg iwl7260_n_cfg; +extern const struct iwl_cfg iwl3160_2ac_cfg; +extern const struct iwl_cfg iwl3160_2n_cfg; +extern const struct iwl_cfg iwl3160_n_cfg; +#endif /* CONFIG_IWLMVM */ #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h index 20e845d4da04..a276af476e2d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/iwlwifi/iwl-csr.h @@ -472,4 +472,23 @@ #define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10) #define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0) +/***************************************************************************** + * 7000/3000 series SHR DTS addresses * + *****************************************************************************/ + +/* Diode Results Register Structure: */ +enum dtd_diode_reg { + DTS_DIODE_REG_DIG_VAL = 0x000000FF, /* bits [7:0] */ + DTS_DIODE_REG_VREF_LOW = 0x0000FF00, /* bits [15:8] */ + DTS_DIODE_REG_VREF_HIGH = 0x00FF0000, /* bits [23:16] */ + DTS_DIODE_REG_VREF_ID = 0x03000000, /* bits [25:24] */ + DTS_DIODE_REG_PASS_ONCE = 0x80000000, /* bits [31:31] */ + DTS_DIODE_REG_FLAGS_MSK = 0xFF000000, /* bits [31:24] */ +/* Those are the masks INSIDE the flags bit-field: */ + DTS_DIODE_REG_FLAGS_VREFS_ID_POS = 0, + DTS_DIODE_REG_FLAGS_VREFS_ID = 0x00000003, /* bits [1:0] */ + DTS_DIODE_REG_FLAGS_PASS_ONCE_POS = 7, + DTS_DIODE_REG_FLAGS_PASS_ONCE = 0x00000080, /* bits [7:7] */ +}; + #endif /* !__iwl_csr_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h index 8cf5db7fb5c9..7edb8519c8a4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h @@ -34,7 +34,11 @@ static inline bool iwl_have_debug_level(u32 level) { +#ifdef CONFIG_IWLWIFI_DEBUG return iwlwifi_mod_params.debug_level & level; +#else + return false; +#endif } void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace, diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index 40fed1f511e2..2f690e578b5c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c @@ -1236,6 +1236,9 @@ MODULE_PARM_DESC(wd_disable, "Disable stuck queue watchdog timer 0=system default, " "1=disable, 2=enable (default: 0)"); +module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO); +MODULE_PARM_DESC(nvm_file, "NVM file name"); + /* * set bt_coex_active to true, uCode will do kill/defer * every time the priority line is asserted (BT is sending signals on the diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h index 7d1450916308..429337a2b9a1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.h +++ b/drivers/net/wireless/iwlwifi/iwl-drv.h @@ -62,8 +62,7 @@ #ifndef __iwl_drv_h__ #define __iwl_drv_h__ - -#include <linux/module.h> +#include <linux/export.h> /* for all modules */ #define DRV_NAME "iwlwifi" diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c index 600c9fdd7f71..4c887f365908 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c @@ -732,17 +732,16 @@ int iwl_init_sband_channels(struct iwl_nvm_data *data, void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, struct iwl_nvm_data *data, struct ieee80211_sta_ht_cap *ht_info, - enum ieee80211_band band) + enum ieee80211_band band, + u8 tx_chains, u8 rx_chains) { int max_bit_rate = 0; - u8 rx_chains; - u8 tx_chains; - tx_chains = hweight8(data->valid_tx_ant); + tx_chains = hweight8(tx_chains); if (cfg->rx_with_siso_diversity) rx_chains = 1; else - rx_chains = hweight8(data->valid_rx_ant); + rx_chains = hweight8(rx_chains); if (!(data->sku_cap_11n_enable) || !cfg->ht_params) { ht_info->ht_supported = false; @@ -806,7 +805,8 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, sband->n_bitrates = N_RATES_24; n_used += iwl_init_sband_channels(data, sband, n_channels, IEEE80211_BAND_2GHZ); - iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ); + iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ, + data->valid_tx_ant, data->valid_rx_ant); sband = &data->bands[IEEE80211_BAND_5GHZ]; sband->band = IEEE80211_BAND_5GHZ; @@ -814,7 +814,8 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, sband->n_bitrates = N_RATES_52; n_used += iwl_init_sband_channels(data, sband, n_channels, IEEE80211_BAND_5GHZ); - iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ); + iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ, + data->valid_tx_ant, data->valid_rx_ant); if (n_channels != n_used) IWL_ERR_DEV(dev, "EEPROM: used only %d of %d channels\n", diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h index 37f115390b19..d73304a23ec2 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h @@ -133,6 +133,7 @@ int iwl_init_sband_channels(struct iwl_nvm_data *data, void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, struct iwl_nvm_data *data, struct ieee80211_sta_ht_cap *ht_info, - enum ieee80211_band band); + enum ieee80211_band band, + u8 tx_chains, u8 rx_chains); #endif /* __iwl_eeprom_parse_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h index c4c446d41eb0..f844d5c748c0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw.h @@ -106,11 +106,14 @@ enum iwl_ucode_type { /* * enumeration of ucode section. - * This enumeration is used for legacy tlv style (before 16.0 uCode). + * This enumeration is used directly for older firmware (before 16.0). + * For new firmware, there can be up to 4 sections (see below) but the + * first one packaged into the firmware file is the DATA section and + * some debugging code accesses that. */ enum iwl_ucode_sec { - IWL_UCODE_SECTION_INST, IWL_UCODE_SECTION_DATA, + IWL_UCODE_SECTION_INST, }; /* * For 16.0 uCode and above, there is no differentiation between sections, diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h index d6f6c37c09fd..d4ad505b0a4b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h @@ -115,10 +115,13 @@ struct iwl_mod_params { int led_mode; bool power_save; int power_level; +#ifdef CONFIG_IWLWIFI_DEBUG u32 debug_level; +#endif int ant_coupling; bool bt_ch_announce; bool auto_agg; + char *nvm_file; }; #endif /* #__iwl_modparams_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c index 6199a0a597a6..acd2665afb8c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c @@ -89,6 +89,7 @@ enum nvm_sku_bits { NVM_SKU_CAP_BAND_24GHZ = BIT(0), NVM_SKU_CAP_BAND_52GHZ = BIT(1), NVM_SKU_CAP_11N_ENABLE = BIT(2), + NVM_SKU_CAP_11AC_ENABLE = BIT(3), }; /* radio config bits (actual values from NVM definition) */ @@ -258,8 +259,6 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, struct iwl_nvm_data *data, struct ieee80211_sta_vht_cap *vht_cap) { - /* For now, assume new devices with NVM are VHT capable */ - vht_cap->vht_supported = true; vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 | @@ -292,7 +291,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, } static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, - struct iwl_nvm_data *data, const __le16 *nvm_sw) + struct iwl_nvm_data *data, const __le16 *nvm_sw, + bool enable_vht, u8 tx_chains, u8 rx_chains) { int n_channels = iwl_init_channel_map(dev, cfg, data, &nvm_sw[NVM_CHANNELS]); @@ -305,7 +305,8 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, sband->n_bitrates = N_RATES_24; n_used += iwl_init_sband_channels(data, sband, n_channels, IEEE80211_BAND_2GHZ); - iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ); + iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ, + tx_chains, rx_chains); sband = &data->bands[IEEE80211_BAND_5GHZ]; sband->band = IEEE80211_BAND_5GHZ; @@ -313,8 +314,10 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, sband->n_bitrates = N_RATES_52; n_used += iwl_init_sband_channels(data, sband, n_channels, IEEE80211_BAND_5GHZ); - iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ); - iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap); + iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ, + tx_chains, rx_chains); + if (enable_vht) + iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap); if (n_channels != n_used) IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n", @@ -324,7 +327,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, struct iwl_nvm_data * iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, const __le16 *nvm_hw, const __le16 *nvm_sw, - const __le16 *nvm_calib) + const __le16 *nvm_calib, u8 tx_chains, u8 rx_chains) { struct iwl_nvm_data *data; u8 hw_addr[ETH_ALEN]; @@ -380,7 +383,8 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, data->hw_addr[4] = hw_addr[5]; data->hw_addr[5] = hw_addr[4]; - iwl_init_sbands(dev, cfg, data, nvm_sw); + iwl_init_sbands(dev, cfg, data, nvm_sw, sku & NVM_SKU_CAP_11AC_ENABLE, + tx_chains, rx_chains); data->calib_version = 255; /* TODO: this value will prevent some checks from diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h index e57fb989661e..3325059c52d4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h @@ -75,6 +75,6 @@ struct iwl_nvm_data * iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, const __le16 *nvm_hw, const __le16 *nvm_sw, - const __le16 *nvm_calib); + const __le16 *nvm_calib, u8 tx_chains, u8 rx_chains); #endif /* __iwl_nvm_parse_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c index 25745daa0d5d..1a405ae6a9c5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c +++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c @@ -92,20 +92,16 @@ struct iwl_phy_db_entry { struct iwl_phy_db { struct iwl_phy_db_entry cfg; struct iwl_phy_db_entry calib_nch; - struct iwl_phy_db_entry calib_ch; struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS]; struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS]; - u32 channel_num; - u32 channel_size; - struct iwl_trans *trans; }; enum iwl_phy_db_section_type { IWL_PHY_DB_CFG = 1, IWL_PHY_DB_CALIB_NCH, - IWL_PHY_DB_CALIB_CH, + IWL_PHY_DB_UNUSED, IWL_PHY_DB_CALIB_CHG_PAPD, IWL_PHY_DB_CALIB_CHG_TXP, IWL_PHY_DB_MAX @@ -169,8 +165,6 @@ iwl_phy_db_get_section(struct iwl_phy_db *phy_db, return &phy_db->cfg; case IWL_PHY_DB_CALIB_NCH: return &phy_db->calib_nch; - case IWL_PHY_DB_CALIB_CH: - return &phy_db->calib_ch; case IWL_PHY_DB_CALIB_CHG_PAPD: if (chg_id >= IWL_NUM_PAPD_CH_GROUPS) return NULL; @@ -208,7 +202,6 @@ void iwl_phy_db_free(struct iwl_phy_db *phy_db) iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0); iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0); - iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CH, 0); for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++) iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i); for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) @@ -248,13 +241,6 @@ int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt, entry->size = size; - if (type == IWL_PHY_DB_CALIB_CH) { - phy_db->channel_num = - le32_to_cpup((__le32 *)phy_db_notif->data); - phy_db->channel_size = - (size - CHANNEL_NUM_SIZE) / phy_db->channel_num; - } - IWL_DEBUG_INFO(phy_db->trans, "%s(%d): [PHYDB]SET: Type %d , Size: %d\n", __func__, __LINE__, type, size); @@ -328,10 +314,7 @@ int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db, u32 type, u8 **data, u16 *size, u16 ch_id) { struct iwl_phy_db_entry *entry; - u32 channel_num; - u32 channel_size; u16 ch_group_id = 0; - u16 index; if (!phy_db) return -EINVAL; @@ -346,21 +329,8 @@ int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db, if (!entry) return -EINVAL; - if (type == IWL_PHY_DB_CALIB_CH) { - index = ch_id_to_ch_index(ch_id); - channel_num = phy_db->channel_num; - channel_size = phy_db->channel_size; - if (index >= channel_num) { - IWL_ERR(phy_db->trans, "Wrong channel number %d\n", - ch_id); - return -EINVAL; - } - *data = entry->data + CHANNEL_NUM_SIZE + index * channel_size; - *size = channel_size; - } else { - *data = entry->data; - *size = entry->size; - } + *data = entry->data; + *size = entry->size; IWL_DEBUG_INFO(phy_db->trans, "%s(%d): [PHYDB] GET: Type %d , Size: %d\n", @@ -413,6 +383,9 @@ static int iwl_phy_db_send_all_channel_groups( if (!entry) return -EINVAL; + if (WARN_ON_ONCE(!entry->size)) + continue; + /* Send the requested PHY DB section */ err = iwl_send_phy_db_cmd(phy_db, type, diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h index 386f2a7c87cb..ff8cc75c189d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/iwlwifi/iwl-prph.h @@ -100,6 +100,18 @@ /* Device system time */ #define DEVICE_SYSTEM_TIME_REG 0xA0206C +/***************************************************************************** + * 7000/3000 series SHR DTS addresses * + *****************************************************************************/ + +#define SHR_MISC_WFM_DTS_EN (0x00a10024) +#define DTSC_CFG_MODE (0x00a10604) +#define DTSC_VREF_AVG (0x00a10648) +#define DTSC_VREF5_AVG (0x00a1064c) +#define DTSC_CFG_MODE_PERIODIC (0x2) +#define DTSC_PTAT_AVG (0x00a10650) + + /** * Tx Scheduler * diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index 7a13790b5bfe..be4b2ac3dbbf 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -189,7 +189,8 @@ enum CMD_MODE { CMD_SYNC = 0, CMD_ASYNC = BIT(0), CMD_WANT_SKB = BIT(1), - CMD_ON_DEMAND = BIT(2), + CMD_SEND_IN_RFKILL = BIT(2), + CMD_ON_DEMAND = BIT(3), }; #define DEF_CMD_PAYLOAD_SIZE 320 @@ -427,8 +428,9 @@ struct iwl_trans_ops { void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); void (*stop_device)(struct iwl_trans *trans); - void (*d3_suspend)(struct iwl_trans *trans); - int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status); + void (*d3_suspend)(struct iwl_trans *trans, bool test); + int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status, + bool test); int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); @@ -455,7 +457,7 @@ struct iwl_trans_ops { int (*read_mem)(struct iwl_trans *trans, u32 addr, void *buf, int dwords); int (*write_mem)(struct iwl_trans *trans, u32 addr, - void *buf, int dwords); + const void *buf, int dwords); void (*configure)(struct iwl_trans *trans, const struct iwl_trans_config *trans_cfg); void (*set_pmi)(struct iwl_trans *trans, bool state); @@ -587,17 +589,18 @@ static inline void iwl_trans_stop_device(struct iwl_trans *trans) trans->state = IWL_TRANS_NO_FW; } -static inline void iwl_trans_d3_suspend(struct iwl_trans *trans) +static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test) { might_sleep(); - trans->ops->d3_suspend(trans); + trans->ops->d3_suspend(trans, test); } static inline int iwl_trans_d3_resume(struct iwl_trans *trans, - enum iwl_d3_status *status) + enum iwl_d3_status *status, + bool test) { might_sleep(); - return trans->ops->d3_resume(trans, status); + return trans->ops->d3_resume(trans, status, test); } static inline int iwl_trans_send_cmd(struct iwl_trans *trans, @@ -761,7 +764,7 @@ static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr) } static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr, - void *buf, int dwords) + const void *buf, int dwords) { return trans->ops->write_mem(trans, addr, buf, dwords); } diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile index 2acc44b40986..ff856e543ae8 100644 --- a/drivers/net/wireless/iwlwifi/mvm/Makefile +++ b/drivers/net/wireless/iwlwifi/mvm/Makefile @@ -3,7 +3,7 @@ iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o iwlmvm-y += scan.o time-event.o rs.o iwlmvm-y += power.o bt-coex.o -iwlmvm-y += led.o +iwlmvm-y += led.o tt.o iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o iwlmvm-$(CONFIG_PM_SLEEP) += d3.o diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c index 810bfa5f6de0..9a4d94a1f90d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c @@ -174,7 +174,7 @@ static const __le32 iwl_tight_lookup[BT_COEX_LUT_SIZE] = { static const __le32 iwl_loose_lookup[BT_COEX_LUT_SIZE] = { cpu_to_le32(0xaaaaaaaa), cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaeaaaaaa), + cpu_to_le32(0xaaaaaaaa), cpu_to_le32(0xaaaaaaaa), cpu_to_le32(0xcc00ff28), cpu_to_le32(0x0000aaaa), @@ -351,6 +351,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, enum ieee80211_band band; int ave_rssi; + lockdep_assert_held(&mvm->mutex); if (vif->type != NL80211_IFTYPE_STATION) return; @@ -365,7 +366,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, smps_mode = IEEE80211_SMPS_AUTOMATIC; if (band != IEEE80211_BAND_2GHZ) { - ieee80211_request_smps(vif, smps_mode); + iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, + smps_mode); return; } @@ -380,7 +382,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, mvmvif->id, data->notif->bt_status, data->notif->bt_traffic_load, smps_mode); - ieee80211_request_smps(vif, smps_mode); + iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode); /* don't reduce the Tx power if in loose scheme */ if (is_loose_coex()) diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index 16bbdcc8627a..8c49db02c9c1 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -63,6 +63,7 @@ #include <linux/etherdevice.h> #include <linux/ip.h> +#include <linux/fs.h> #include <net/cfg80211.h> #include <net/ipv6.h> #include <net/tcp.h> @@ -419,8 +420,7 @@ static __le16 pseudo_hdr_check(int len, __be32 saddr, __be32 daddr) return cpu_to_le16(be16_to_cpu((__force __be16)check)); } -static void iwl_mvm_build_tcp_packet(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, +static void iwl_mvm_build_tcp_packet(struct ieee80211_vif *vif, struct cfg80211_wowlan_tcp *tcp, void *_pkt, u8 *mask, __le16 *pseudo_hdr_csum, @@ -566,21 +566,21 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm, /* SYN (TX) */ iwl_mvm_build_tcp_packet( - mvm, vif, tcp, cfg->syn_tx.data, NULL, + vif, tcp, cfg->syn_tx.data, NULL, &cfg->syn_tx.info.tcp_pseudo_header_checksum, MVM_TCP_TX_SYN); cfg->syn_tx.info.tcp_payload_length = 0; /* SYN/ACK (RX) */ iwl_mvm_build_tcp_packet( - mvm, vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask, + vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask, &cfg->synack_rx.info.tcp_pseudo_header_checksum, MVM_TCP_RX_SYNACK); cfg->synack_rx.info.tcp_payload_length = 0; /* KEEPALIVE/ACK (TX) */ iwl_mvm_build_tcp_packet( - mvm, vif, tcp, cfg->keepalive_tx.data, NULL, + vif, tcp, cfg->keepalive_tx.data, NULL, &cfg->keepalive_tx.info.tcp_pseudo_header_checksum, MVM_TCP_TX_DATA); cfg->keepalive_tx.info.tcp_payload_length = @@ -604,7 +604,7 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm, /* ACK (RX) */ iwl_mvm_build_tcp_packet( - mvm, vif, tcp, cfg->keepalive_ack_rx.data, + vif, tcp, cfg->keepalive_ack_rx.data, cfg->keepalive_ack_rx.rx_mask, &cfg->keepalive_ack_rx.info.tcp_pseudo_header_checksum, MVM_TCP_RX_ACK); @@ -612,7 +612,7 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm, /* WAKEUP (RX) */ iwl_mvm_build_tcp_packet( - mvm, vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask, + vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask, &cfg->wake_rx.info.tcp_pseudo_header_checksum, MVM_TCP_RX_WAKE); cfg->wake_rx.info.tcp_payload_length = @@ -620,7 +620,7 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm, /* FIN */ iwl_mvm_build_tcp_packet( - mvm, vif, tcp, cfg->fin_tx.data, NULL, + vif, tcp, cfg->fin_tx.data, NULL, &cfg->fin_tx.info.tcp_pseudo_header_checksum, MVM_TCP_TX_FIN); cfg->fin_tx.info.tcp_payload_length = 0; @@ -756,7 +756,9 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return 0; } -int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) +static int __iwl_mvm_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan, + bool test) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_d3_iter_data suspend_iter_data = { @@ -769,7 +771,7 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {}; struct iwl_wowlan_tkip_params_cmd tkip_cmd = {}; - struct iwl_d3_manager_config d3_cfg_cmd = { + struct iwl_d3_manager_config d3_cfg_cmd_data = { /* * Program the minimum sleep time to 10 seconds, as many * platforms have issues processing a wakeup signal while @@ -777,17 +779,30 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) */ .min_sleep_time = cpu_to_le32(10 * 1000 * 1000), }; + struct iwl_host_cmd d3_cfg_cmd = { + .id = D3_CONFIG_CMD, + .flags = CMD_SYNC | CMD_WANT_SKB, + .data[0] = &d3_cfg_cmd_data, + .len[0] = sizeof(d3_cfg_cmd_data), + }; struct wowlan_key_data key_data = { .use_rsc_tsc = false, .tkip = &tkip_cmd, .use_tkip = false, }; int ret, i; + int len __maybe_unused; u16 seq; u8 old_aux_sta_id, old_ap_sta_id = IWL_MVM_STATION_COUNT; - if (WARN_ON(!wowlan)) + if (!wowlan) { + /* + * mac80211 shouldn't get here, but for D3 test + * it doesn't warrant a warning + */ + WARN_ON(!test); return -EINVAL; + } key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL); if (!key_data.rsc_tsc) @@ -1007,15 +1022,31 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) if (ret) goto out; + ret = iwl_mvm_power_update_mode(mvm, vif); + if (ret) + goto out; + /* must be last -- this switches firmware state */ - ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, CMD_SYNC, - sizeof(d3_cfg_cmd), &d3_cfg_cmd); + ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd); if (ret) goto out; +#ifdef CONFIG_IWLWIFI_DEBUGFS + len = le32_to_cpu(d3_cfg_cmd.resp_pkt->len_n_flags) & + FH_RSCSR_FRAME_SIZE_MSK; + if (len >= sizeof(u32) * 2) { + mvm->d3_test_pme_ptr = + le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data); + } else if (test) { + /* in test mode we require the pointer */ + ret = -EIO; + goto out; + } +#endif + iwl_free_resp(&d3_cfg_cmd); clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); - iwl_trans_d3_suspend(mvm->trans); + iwl_trans_d3_suspend(mvm->trans, test); out: mvm->aux_sta.sta_id = old_aux_sta_id; mvm_ap_sta->sta_id = old_ap_sta_id; @@ -1030,6 +1061,11 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) return ret; } +int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) +{ + return __iwl_mvm_suspend(hw, wowlan, false); +} + static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { @@ -1214,9 +1250,28 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, iwl_free_resp(&cmd); } -int iwl_mvm_resume(struct ieee80211_hw *hw) +static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm) +{ +#ifdef CONFIG_IWLWIFI_DEBUGFS + const struct fw_img *img = &mvm->fw->img[IWL_UCODE_WOWLAN]; + u32 len = img->sec[IWL_UCODE_SECTION_DATA].len; + u32 offs = img->sec[IWL_UCODE_SECTION_DATA].offset; + + if (!mvm->store_d3_resume_sram) + return; + + if (!mvm->d3_resume_sram) { + mvm->d3_resume_sram = kzalloc(len, GFP_KERNEL); + if (!mvm->d3_resume_sram) + return; + } + + iwl_trans_read_mem_bytes(mvm->trans, offs, mvm->d3_resume_sram, len); +#endif +} + +static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) { - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_d3_iter_data resume_iter_data = { .mvm = mvm, }; @@ -1236,7 +1291,7 @@ int iwl_mvm_resume(struct ieee80211_hw *hw) vif = resume_iter_data.vif; - ret = iwl_trans_d3_resume(mvm->trans, &d3_status); + ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test); if (ret) goto out_unlock; @@ -1245,12 +1300,15 @@ int iwl_mvm_resume(struct ieee80211_hw *hw) goto out_unlock; } + /* query SRAM first in case we want event logging */ + iwl_mvm_read_d3_sram(mvm); + iwl_mvm_query_wakeup_reasons(mvm, vif); out_unlock: mutex_unlock(&mvm->mutex); - if (vif) + if (!test && vif) ieee80211_resume_disconnect(vif); /* return 1 to reconfigure the device */ @@ -1258,9 +1316,106 @@ int iwl_mvm_resume(struct ieee80211_hw *hw) return 1; } +int iwl_mvm_resume(struct ieee80211_hw *hw) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + return __iwl_mvm_resume(mvm, false); +} + void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); device_set_wakeup_enable(mvm->trans->dev, enabled); } + +#ifdef CONFIG_IWLWIFI_DEBUGFS +static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file) +{ + struct iwl_mvm *mvm = inode->i_private; + int err; + + if (mvm->d3_test_active) + return -EBUSY; + + file->private_data = inode->i_private; + + ieee80211_stop_queues(mvm->hw); + synchronize_net(); + + /* start pseudo D3 */ + rtnl_lock(); + err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true); + rtnl_unlock(); + if (err > 0) + err = -EINVAL; + if (err) { + ieee80211_wake_queues(mvm->hw); + return err; + } + mvm->d3_test_active = true; + return 0; +} + +static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + u32 pme_asserted; + + while (true) { + pme_asserted = iwl_trans_read_mem32(mvm->trans, + mvm->d3_test_pme_ptr); + if (pme_asserted) + break; + if (msleep_interruptible(100)) + break; + } + + return 0; +} + +static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + if (vif->type == NL80211_IFTYPE_STATION) + ieee80211_connection_loss(vif); +} + +static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) +{ + struct iwl_mvm *mvm = inode->i_private; + int remaining_time = 10; + + mvm->d3_test_active = false; + __iwl_mvm_resume(mvm, true); + iwl_abort_notification_waits(&mvm->notif_wait); + ieee80211_restart_hw(mvm->hw); + + /* wait for restart and disconnect all interfaces */ + while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && + remaining_time > 0) { + remaining_time--; + msleep(1000); + } + + if (remaining_time == 0) + IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n"); + + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_d3_test_disconn_work_iter, NULL); + + ieee80211_wake_queues(mvm->hw); + + return 0; +} + +const struct file_operations iwl_dbgfs_d3_test_ops = { + .llseek = no_llseek, + .open = iwl_mvm_d3_test_open, + .read = iwl_mvm_d3_test_read, + .release = iwl_mvm_d3_test_release, +}; +#endif diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c index 2053dccefcd6..b7643c16201f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c @@ -145,15 +145,18 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf, char *buf; u8 *ptr; + if (!mvm->ucode_loaded) + return -EINVAL; + /* default is to dump the entire data segment */ if (!mvm->dbgfs_sram_offset && !mvm->dbgfs_sram_len) { - mvm->dbgfs_sram_offset = 0x800000; - if (!mvm->ucode_loaded) - return -EINVAL; img = &mvm->fw->img[mvm->cur_ucode]; - mvm->dbgfs_sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; + ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; + len = img->sec[IWL_UCODE_SECTION_DATA].len; + } else { + ofs = mvm->dbgfs_sram_offset; + len = mvm->dbgfs_sram_len; } - len = mvm->dbgfs_sram_len; bufsz = len * 4 + 256; buf = kzalloc(bufsz, GFP_KERNEL); @@ -167,12 +170,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf, } pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", len); - pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", - mvm->dbgfs_sram_offset); + pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", ofs); - iwl_trans_read_mem_bytes(mvm->trans, - mvm->dbgfs_sram_offset, - ptr, len); + iwl_trans_read_mem_bytes(mvm->trans, ofs, ptr, len); for (ofs = 0; ofs < len; ofs += 16) { pos += scnprintf(buf + pos, bufsz - pos, "0x%.4x ", ofs); hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos, @@ -300,6 +300,146 @@ static ssize_t iwl_dbgfs_power_down_d3_allow_write(struct file *file, return count; } +static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + enum iwl_dbgfs_pm_mask param, int val) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_dbgfs_pm *dbgfs_pm = &mvmvif->dbgfs_pm; + + dbgfs_pm->mask |= param; + + switch (param) { + case MVM_DEBUGFS_PM_KEEP_ALIVE: { + struct ieee80211_hw *hw = mvm->hw; + int dtimper = hw->conf.ps_dtim_period ?: 1; + int dtimper_msec = dtimper * vif->bss_conf.beacon_int; + + IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val); + if (val * MSEC_PER_SEC < 3 * dtimper_msec) { + IWL_WARN(mvm, + "debugfs: keep alive period (%ld msec) is less than minimum required (%d msec)\n", + val * MSEC_PER_SEC, 3 * dtimper_msec); + } + dbgfs_pm->keep_alive_seconds = val; + break; + } + case MVM_DEBUGFS_PM_SKIP_OVER_DTIM: + IWL_DEBUG_POWER(mvm, "skip_over_dtim %s\n", + val ? "enabled" : "disabled"); + dbgfs_pm->skip_over_dtim = val; + break; + case MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS: + IWL_DEBUG_POWER(mvm, "skip_dtim_periods=%d\n", val); + dbgfs_pm->skip_dtim_periods = val; + break; + case MVM_DEBUGFS_PM_RX_DATA_TIMEOUT: + IWL_DEBUG_POWER(mvm, "rx_data_timeout=%d\n", val); + dbgfs_pm->rx_data_timeout = val; + break; + case MVM_DEBUGFS_PM_TX_DATA_TIMEOUT: + IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val); + dbgfs_pm->tx_data_timeout = val; + break; + case MVM_DEBUGFS_PM_DISABLE_POWER_OFF: + IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val); + dbgfs_pm->disable_power_off = val; + break; + } +} + +static ssize_t iwl_dbgfs_pm_params_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->dbgfs_data; + enum iwl_dbgfs_pm_mask param; + char buf[32] = {}; + int val; + int ret; + + if (copy_from_user(buf, user_buf, sizeof(buf))) + return -EFAULT; + + if (!strncmp("keep_alive=", buf, 11)) { + if (sscanf(buf + 11, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_KEEP_ALIVE; + } else if (!strncmp("skip_over_dtim=", buf, 15)) { + if (sscanf(buf + 15, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_SKIP_OVER_DTIM; + } else if (!strncmp("skip_dtim_periods=", buf, 18)) { + if (sscanf(buf + 18, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS; + } else if (!strncmp("rx_data_timeout=", buf, 16)) { + if (sscanf(buf + 16, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_RX_DATA_TIMEOUT; + } else if (!strncmp("tx_data_timeout=", buf, 16)) { + if (sscanf(buf + 16, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT; + } else if (!strncmp("disable_power_off=", buf, 18)) { + if (sscanf(buf + 18, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF; + } else { + return -EINVAL; + } + + mutex_lock(&mvm->mutex); + iwl_dbgfs_update_pm(mvm, vif, param, val); + ret = iwl_mvm_power_update_mode(mvm, vif); + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_pm_params_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->dbgfs_data; + struct iwl_powertable_cmd cmd = {}; + char buf[256]; + int bufsz = sizeof(buf); + int pos = 0; + + iwl_mvm_power_build_cmd(mvm, vif, &cmd); + + pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n", + (cmd.flags & + cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ? + 0 : 1); + pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n", + le32_to_cpu(cmd.skip_dtim_periods)); + pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n", + iwlmvm_mod_params.power_scheme); + pos += scnprintf(buf+pos, bufsz-pos, "flags = %d\n", + le16_to_cpu(cmd.flags)); + pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n", + cmd.keep_alive_seconds); + + if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) { + pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n", + (cmd.flags & + cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ? + 1 : 0); + pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n", + le32_to_cpu(cmd.rx_data_timeout)); + pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n", + le32_to_cpu(cmd.tx_data_timeout)); + } + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + static ssize_t iwl_dbgfs_mac_params_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -481,6 +621,255 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file, return count; } +static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif, + enum iwl_dbgfs_bf_mask param, int value) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf; + + dbgfs_bf->mask |= param; + + switch (param) { + case MVM_DEBUGFS_BF_ENERGY_DELTA: + dbgfs_bf->bf_energy_delta = value; + break; + case MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA: + dbgfs_bf->bf_roaming_energy_delta = value; + break; + case MVM_DEBUGFS_BF_ROAMING_STATE: + dbgfs_bf->bf_roaming_state = value; + break; + case MVM_DEBUGFS_BF_TEMPERATURE_DELTA: + dbgfs_bf->bf_temperature_delta = value; + break; + case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER: + dbgfs_bf->bf_enable_beacon_filter = value; + break; + case MVM_DEBUGFS_BF_DEBUG_FLAG: + dbgfs_bf->bf_debug_flag = value; + break; + case MVM_DEBUGFS_BF_ESCAPE_TIMER: + dbgfs_bf->bf_escape_timer = value; + break; + case MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT: + dbgfs_bf->ba_enable_beacon_abort = value; + break; + case MVM_DEBUGFS_BA_ESCAPE_TIMER: + dbgfs_bf->ba_escape_timer = value; + break; + } +} + +static ssize_t iwl_dbgfs_bf_params_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->dbgfs_data; + enum iwl_dbgfs_bf_mask param; + char buf[256]; + int buf_size; + int value; + int ret = 0; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + if (!strncmp("bf_energy_delta=", buf, 16)) { + if (sscanf(buf+16, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BF_ENERGY_DELTA_MIN || + value > IWL_BF_ENERGY_DELTA_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BF_ENERGY_DELTA; + } else if (!strncmp("bf_roaming_energy_delta=", buf, 24)) { + if (sscanf(buf+24, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BF_ROAMING_ENERGY_DELTA_MIN || + value > IWL_BF_ROAMING_ENERGY_DELTA_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA; + } else if (!strncmp("bf_roaming_state=", buf, 17)) { + if (sscanf(buf+17, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BF_ROAMING_STATE_MIN || + value > IWL_BF_ROAMING_STATE_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BF_ROAMING_STATE; + } else if (!strncmp("bf_temperature_delta=", buf, 21)) { + if (sscanf(buf+21, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BF_TEMPERATURE_DELTA_MIN || + value > IWL_BF_TEMPERATURE_DELTA_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BF_TEMPERATURE_DELTA; + } else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) { + if (sscanf(buf+24, "%d", &value) != 1) + return -EINVAL; + if (value < 0 || value > 1) + return -EINVAL; + param = MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER; + } else if (!strncmp("bf_debug_flag=", buf, 14)) { + if (sscanf(buf+14, "%d", &value) != 1) + return -EINVAL; + if (value < 0 || value > 1) + return -EINVAL; + param = MVM_DEBUGFS_BF_DEBUG_FLAG; + } else if (!strncmp("bf_escape_timer=", buf, 16)) { + if (sscanf(buf+16, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BF_ESCAPE_TIMER_MIN || + value > IWL_BF_ESCAPE_TIMER_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BF_ESCAPE_TIMER; + } else if (!strncmp("ba_escape_timer=", buf, 16)) { + if (sscanf(buf+16, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BA_ESCAPE_TIMER_MIN || + value > IWL_BA_ESCAPE_TIMER_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BA_ESCAPE_TIMER; + } else if (!strncmp("ba_enable_beacon_abort=", buf, 23)) { + if (sscanf(buf+23, "%d", &value) != 1) + return -EINVAL; + if (value < 0 || value > 1) + return -EINVAL; + param = MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT; + } else { + return -EINVAL; + } + + mutex_lock(&mvm->mutex); + iwl_dbgfs_update_bf(vif, param, value); + if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) { + ret = iwl_mvm_disable_beacon_filter(mvm, vif); + } else { + if (mvmvif->bf_enabled) + ret = iwl_mvm_enable_beacon_filter(mvm, vif); + else + ret = iwl_mvm_disable_beacon_filter(mvm, vif); + } + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_bf_params_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + char buf[256]; + int pos = 0; + const size_t bufsz = sizeof(buf); + struct iwl_beacon_filter_cmd cmd = { + .bf_energy_delta = IWL_BF_ENERGY_DELTA_DEFAULT, + .bf_roaming_energy_delta = IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT, + .bf_roaming_state = IWL_BF_ROAMING_STATE_DEFAULT, + .bf_temperature_delta = IWL_BF_TEMPERATURE_DELTA_DEFAULT, + .bf_enable_beacon_filter = IWL_BF_ENABLE_BEACON_FILTER_DEFAULT, + .bf_debug_flag = IWL_BF_DEBUG_FLAG_DEFAULT, + .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT), + .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT), + .ba_enable_beacon_abort = IWL_BA_ENABLE_BEACON_ABORT_DEFAULT, + }; + + iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); + if (mvmvif->bf_enabled) + cmd.bf_enable_beacon_filter = 1; + else + cmd.bf_enable_beacon_filter = 0; + + pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n", + cmd.bf_energy_delta); + pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n", + cmd.bf_roaming_energy_delta); + pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n", + cmd.bf_roaming_state); + pos += scnprintf(buf+pos, bufsz-pos, "bf_temperature_delta = %d\n", + cmd.bf_temperature_delta); + pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n", + cmd.bf_enable_beacon_filter); + pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n", + cmd.bf_debug_flag); + pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n", + cmd.bf_escape_timer); + pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n", + cmd.ba_escape_timer); + pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n", + cmd.ba_enable_beacon_abort); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +#ifdef CONFIG_PM_SLEEP +static ssize_t iwl_dbgfs_d3_sram_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + char buf[8] = {}; + int store; + + if (copy_from_user(buf, user_buf, sizeof(buf))) + return -EFAULT; + + if (sscanf(buf, "%d", &store) != 1) + return -EINVAL; + + mvm->store_d3_resume_sram = store; + + return count; +} + +static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + const struct fw_img *img; + int ofs, len, pos = 0; + size_t bufsz, ret; + char *buf; + u8 *ptr = mvm->d3_resume_sram; + + img = &mvm->fw->img[IWL_UCODE_WOWLAN]; + len = img->sec[IWL_UCODE_SECTION_DATA].len; + + bufsz = len * 4 + 256; + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf, bufsz, "D3 SRAM capture: %sabled\n", + mvm->store_d3_resume_sram ? "en" : "dis"); + + if (ptr) { + for (ofs = 0; ofs < len; ofs += 16) { + pos += scnprintf(buf + pos, bufsz - pos, + "0x%.4x ", ofs); + hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos, + bufsz - pos, false); + pos += strlen(buf + pos); + if (bufsz - pos > 0) + buf[pos++] = '\n'; + } + } else { + pos += scnprintf(buf + pos, bufsz - pos, + "(no data captured)\n"); + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + + kfree(buf); + + return ret; +} +#endif + #define MVM_DEBUGFS_READ_FILE_OPS(name) \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .read = iwl_dbgfs_##name##_read, \ @@ -524,9 +913,14 @@ MVM_DEBUGFS_READ_FILE_OPS(bt_notif); MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow); MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow); MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart); +#ifdef CONFIG_PM_SLEEP +MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram); +#endif /* Interface specific debugfs entries */ MVM_DEBUGFS_READ_FILE_OPS(mac_params); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params); int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) { @@ -542,6 +936,10 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR); +#ifdef CONFIG_PM_SLEEP + MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, S_IRUSR); +#endif /* * Create a symlink with mac80211. It will be removed when mac80211 @@ -577,9 +975,19 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) return; } + if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM && + vif->type == NL80211_IFTYPE_STATION && !vif->p2p) + MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR | + S_IRUSR); + MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR); + if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && + mvmvif == mvm->bf_allowed_vif) + MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir, + S_IRUSR | S_IWUSR); + /* * Create symlink for convenience pointing to interface specific * debugfs entries for the driver. For example, under diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h index 51e015d1dfb2..6f8b2c16ae17 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h @@ -75,13 +75,15 @@ enum iwl_d3_wakeup_flags { * struct iwl_d3_manager_config - D3 manager configuration command * @min_sleep_time: minimum sleep time (in usec) * @wakeup_flags: wakeup flags, see &enum iwl_d3_wakeup_flags + * @wakeup_host_timer: force wakeup after this many seconds * * The structure is used for the D3_CONFIG_CMD command. */ struct iwl_d3_manager_config { __le32 min_sleep_time; __le32 wakeup_flags; -} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_3 */ + __le32 wakeup_host_timer; +} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_4 */ /* TODO: OFFLOADS_QUERY_API_S_VER_1 */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h index d68640ea41d4..98b1feb43d38 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h @@ -71,7 +71,13 @@ #define MAC_INDEX_MIN_DRIVER 0 #define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX -#define AC_NUM 4 /* Number of access categories */ +enum iwl_ac { + AC_BK, + AC_BE, + AC_VI, + AC_VO, + AC_NUM, +}; /** * enum iwl_mac_protection_flags - MAC context flags diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h index 81fe45f46be7..d8e19290b0f3 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h @@ -101,20 +101,107 @@ enum iwl_power_flags { * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to * PSM transition - legacy PM * @sleep_interval: not in use - * @keep_alive_beacons: not in use + * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag + * is set. For example, if it is required to skip over + * one DTIM, this value need to be set to 2 (DTIM periods). * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled. * Default: 80dbm */ struct iwl_powertable_cmd { - /* PM_POWER_TABLE_CMD_API_S_VER_5 */ + /* PM_POWER_TABLE_CMD_API_S_VER_6 */ __le16 flags; u8 keep_alive_seconds; u8 debug_flags; __le32 rx_data_timeout; __le32 tx_data_timeout; __le32 sleep_interval[IWL_POWER_VEC_SIZE]; - __le32 keep_alive_beacons; + __le32 skip_dtim_periods; __le32 lprx_rssi_threshold; } __packed; +/** + * struct iwl_beacon_filter_cmd + * REPLY_BEACON_FILTERING_CMD = 0xd2 (command) + * @id_and_color: MAC contex identifier + * @bf_energy_delta: Used for RSSI filtering, if in 'normal' state. Send beacon + * to driver if delta in Energy values calculated for this and last + * passed beacon is greater than this threshold. Zero value means that + * the Energy change is ignored for beacon filtering, and beacon will + * not be forced to be sent to driver regardless of this delta. Typical + * energy delta 5dB. + * @bf_roaming_energy_delta: Used for RSSI filtering, if in 'roaming' state. + * Send beacon to driver if delta in Energy values calculated for this + * and last passed beacon is greater than this threshold. Zero value + * means that the Energy change is ignored for beacon filtering while in + * Roaming state, typical energy delta 1dB. + * @bf_roaming_state: Used for RSSI filtering. If absolute Energy values + * calculated for current beacon is less than the threshold, use + * Roaming Energy Delta Threshold, otherwise use normal Energy Delta + * Threshold. Typical energy threshold is -72dBm. + * @bf_temperature_delta: Send Beacon to driver if delta in temperature values + * calculated for this and the last passed beacon is greater than this + * threshold. Zero value means that the temperature changeis ignored for + * beacon filtering; beacons will not be forced to be sent to driver + * regardless of whether its temerature has been changed. + * @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled. + * @bf_filter_escape_timer: Send beacons to to driver if no beacons were passed + * for a specific period of time. Units: Beacons. + * @ba_escape_timer: Fully receive and parse beacon if no beacons were passed + * for a longer period of time then this escape-timeout. Units: Beacons. + * @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled. + */ +struct iwl_beacon_filter_cmd { + u8 bf_energy_delta; + u8 bf_roaming_energy_delta; + u8 bf_roaming_state; + u8 bf_temperature_delta; + u8 bf_enable_beacon_filter; + u8 bf_debug_flag; + __le16 reserved1; + __le32 bf_escape_timer; + __le32 ba_escape_timer; + u8 ba_enable_beacon_abort; + u8 reserved2[3]; +} __packed; + +/* Beacon filtering and beacon abort */ +#define IWL_BF_ENERGY_DELTA_DEFAULT 5 +#define IWL_BF_ENERGY_DELTA_MAX 255 +#define IWL_BF_ENERGY_DELTA_MIN 0 + +#define IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT 1 +#define IWL_BF_ROAMING_ENERGY_DELTA_MAX 255 +#define IWL_BF_ROAMING_ENERGY_DELTA_MIN 0 + +#define IWL_BF_ROAMING_STATE_DEFAULT 72 +#define IWL_BF_ROAMING_STATE_MAX 255 +#define IWL_BF_ROAMING_STATE_MIN 0 + +#define IWL_BF_TEMPERATURE_DELTA_DEFAULT 5 +#define IWL_BF_TEMPERATURE_DELTA_MAX 255 +#define IWL_BF_TEMPERATURE_DELTA_MIN 0 + +#define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1 + +#define IWL_BF_DEBUG_FLAG_DEFAULT 0 + +#define IWL_BF_ESCAPE_TIMER_DEFAULT 50 +#define IWL_BF_ESCAPE_TIMER_MAX 1024 +#define IWL_BF_ESCAPE_TIMER_MIN 0 + +#define IWL_BA_ESCAPE_TIMER_DEFAULT 3 +#define IWL_BA_ESCAPE_TIMER_MAX 1024 +#define IWL_BA_ESCAPE_TIMER_MIN 0 + +#define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1 + +#define IWL_BF_CMD_CONFIG_DEFAULTS \ + .bf_energy_delta = IWL_BF_ENERGY_DELTA_DEFAULT, \ + .bf_roaming_energy_delta = IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT, \ + .bf_roaming_state = IWL_BF_ROAMING_STATE_DEFAULT, \ + .bf_temperature_delta = IWL_BF_TEMPERATURE_DELTA_DEFAULT, \ + .bf_debug_flag = IWL_BF_DEBUG_FLAG_DEFAULT, \ + .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT), \ + .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT) + #endif diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h index 007a93b25bd7..700cce731770 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h @@ -134,6 +134,7 @@ enum iwl_tx_flags { #define TX_CMD_SEC_WEP 0x01 #define TX_CMD_SEC_CCM 0x02 #define TX_CMD_SEC_TKIP 0x03 +#define TX_CMD_SEC_MSK 0x07 #define TX_CMD_SEC_WEP_KEY_IDX_POS 6 #define TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0 #define TX_CMD_SEC_KEY128 0x08 @@ -227,10 +228,11 @@ struct iwl_tx_cmd { __le16 len; __le16 next_frame_len; __le32 tx_flags; - /* DRAM_SCRATCH_API_U_VER_1 */ - u8 try_cnt; - u8 btkill_cnt; - __le16 reserved; + struct { + u8 try_cnt; + u8 btkill_cnt; + __le16 reserved; + } scratch; /* DRAM_SCRATCH_API_U_VER_1 */ __le32 rate_n_flags; u8 sta_id; u8 sec_ctl; diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index c6384555aab4..cbfb3beae783 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -139,6 +139,9 @@ enum { /* Power */ POWER_TABLE_CMD = 0x77, + /* Thermal Throttling*/ + REPLY_THERMAL_MNG_BACKOFF = 0x7e, + /* Scanning */ SCAN_REQUEST_CMD = 0x80, SCAN_ABORT_CMD = 0x81, @@ -161,6 +164,8 @@ enum { CARD_STATE_CMD = 0xa0, CARD_STATE_NOTIFICATION = 0xa1, + MISSED_BEACONS_NOTIFICATION = 0xa2, + REPLY_RX_PHY_CMD = 0xc0, REPLY_RX_MPDU_CMD = 0xc1, BA_NOTIF = 0xc5, @@ -170,6 +175,8 @@ enum { BT_COEX_PROT_ENV = 0xcd, BT_PROFILE_NOTIFICATION = 0xce, + REPLY_BEACON_FILTERING_CMD = 0xd2, + REPLY_DEBUG_CMD = 0xf0, DEBUG_LOG_MSG = 0xf7, @@ -938,6 +945,24 @@ struct iwl_card_state_notif { } __packed; /* CARD_STATE_NTFY_API_S_VER_1 */ /** + * struct iwl_missed_beacons_notif - information on missed beacons + * ( MISSED_BEACONS_NOTIFICATION = 0xa2 ) + * @mac_id: interface ID + * @consec_missed_beacons_since_last_rx: number of consecutive missed + * beacons since last RX. + * @consec_missed_beacons: number of consecutive missed beacons + * @num_expected_beacons: + * @num_recvd_beacons: + */ +struct iwl_missed_beacons_notif { + __le32 mac_id; + __le32 consec_missed_beacons_since_last_rx; + __le32 consec_missed_beacons; + __le32 num_expected_beacons; + __le32 num_recvd_beacons; +} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */ + +/** * struct iwl_set_calib_default_cmd - set default value for calibration. * ( SET_CALIB_DEFAULT_CMD = 0x8e ) * @calib_index: the calibration to set value for @@ -975,4 +1000,212 @@ struct iwl_mcast_filter_cmd { u8 addr_list[0]; } __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */ +struct mvm_statistics_dbg { + __le32 burst_check; + __le32 burst_count; + __le32 wait_for_silence_timeout_cnt; + __le32 reserved[3]; +} __packed; /* STATISTICS_DEBUG_API_S_VER_2 */ + +struct mvm_statistics_div { + __le32 tx_on_a; + __le32 tx_on_b; + __le32 exec_time; + __le32 probe_time; + __le32 rssi_ant; + __le32 reserved2; +} __packed; /* STATISTICS_SLOW_DIV_API_S_VER_2 */ + +struct mvm_statistics_general_common { + __le32 temperature; /* radio temperature */ + __le32 temperature_m; /* radio voltage */ + struct mvm_statistics_dbg dbg; + __le32 sleep_time; + __le32 slots_out; + __le32 slots_idle; + __le32 ttl_timestamp; + struct mvm_statistics_div div; + __le32 rx_enable_counter; + /* + * num_of_sos_states: + * count the number of times we have to re-tune + * in order to get out of bad PHY status + */ + __le32 num_of_sos_states; +} __packed; /* STATISTICS_GENERAL_API_S_VER_5 */ + +struct mvm_statistics_rx_non_phy { + __le32 bogus_cts; /* CTS received when not expecting CTS */ + __le32 bogus_ack; /* ACK received when not expecting ACK */ + __le32 non_bssid_frames; /* number of frames with BSSID that + * doesn't belong to the STA BSSID */ + __le32 filtered_frames; /* count frames that were dumped in the + * filtering process */ + __le32 non_channel_beacons; /* beacons with our bss id but not on + * our serving channel */ + __le32 channel_beacons; /* beacons with our bss id and in our + * serving channel */ + __le32 num_missed_bcon; /* number of missed beacons */ + __le32 adc_rx_saturation_time; /* count in 0.8us units the time the + * ADC was in saturation */ + __le32 ina_detection_search_time;/* total time (in 0.8us) searched + * for INA */ + __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */ + __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */ + __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */ + __le32 interference_data_flag; /* flag for interference data + * availability. 1 when data is + * available. */ + __le32 channel_load; /* counts RX Enable time in uSec */ + __le32 dsp_false_alarms; /* DSP false alarm (both OFDM + * and CCK) counter */ + __le32 beacon_rssi_a; + __le32 beacon_rssi_b; + __le32 beacon_rssi_c; + __le32 beacon_energy_a; + __le32 beacon_energy_b; + __le32 beacon_energy_c; + __le32 num_bt_kills; + __le32 mac_id; + __le32 directed_data_mpdu; +} __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_3 */ + +struct mvm_statistics_rx_phy { + __le32 ina_cnt; + __le32 fina_cnt; + __le32 plcp_err; + __le32 crc32_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 false_alarm_cnt; + __le32 fina_sync_err_cnt; + __le32 sfd_timeout; + __le32 fina_timeout; + __le32 unresponded_rts; + __le32 rxe_frame_limit_overrun; + __le32 sent_ack_cnt; + __le32 sent_cts_cnt; + __le32 sent_ba_rsp_cnt; + __le32 dsp_self_kill; + __le32 mh_format_err; + __le32 re_acq_main_rssi_sum; + __le32 reserved; +} __packed; /* STATISTICS_RX_PHY_API_S_VER_2 */ + +struct mvm_statistics_rx_ht_phy { + __le32 plcp_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 crc32_err; + __le32 mh_format_err; + __le32 agg_crc32_good; + __le32 agg_mpdu_cnt; + __le32 agg_cnt; + __le32 unsupport_mcs; +} __packed; /* STATISTICS_HT_RX_PHY_API_S_VER_1 */ + +#define MAX_CHAINS 3 + +struct mvm_statistics_tx_non_phy_agg { + __le32 ba_timeout; + __le32 ba_reschedule_frames; + __le32 scd_query_agg_frame_cnt; + __le32 scd_query_no_agg; + __le32 scd_query_agg; + __le32 scd_query_mismatch; + __le32 frame_not_ready; + __le32 underrun; + __le32 bt_prio_kill; + __le32 rx_ba_rsp_cnt; + __s8 txpower[MAX_CHAINS]; + __s8 reserved; + __le32 reserved2; +} __packed; /* STATISTICS_TX_NON_PHY_AGG_API_S_VER_1 */ + +struct mvm_statistics_tx_channel_width { + __le32 ext_cca_narrow_ch20[1]; + __le32 ext_cca_narrow_ch40[2]; + __le32 ext_cca_narrow_ch80[3]; + __le32 ext_cca_narrow_ch160[4]; + __le32 last_tx_ch_width_indx; + __le32 rx_detected_per_ch_width[4]; + __le32 success_per_ch_width[4]; + __le32 fail_per_ch_width[4]; +}; /* STATISTICS_TX_CHANNEL_WIDTH_API_S_VER_1 */ + +struct mvm_statistics_tx { + __le32 preamble_cnt; + __le32 rx_detected_cnt; + __le32 bt_prio_defer_cnt; + __le32 bt_prio_kill_cnt; + __le32 few_bytes_cnt; + __le32 cts_timeout; + __le32 ack_timeout; + __le32 expected_ack_cnt; + __le32 actual_ack_cnt; + __le32 dump_msdu_cnt; + __le32 burst_abort_next_frame_mismatch_cnt; + __le32 burst_abort_missing_next_frame_cnt; + __le32 cts_timeout_collision; + __le32 ack_or_ba_timeout_collision; + struct mvm_statistics_tx_non_phy_agg agg; + struct mvm_statistics_tx_channel_width channel_width; +} __packed; /* STATISTICS_TX_API_S_VER_4 */ + + +struct mvm_statistics_bt_activity { + __le32 hi_priority_tx_req_cnt; + __le32 hi_priority_tx_denied_cnt; + __le32 lo_priority_tx_req_cnt; + __le32 lo_priority_tx_denied_cnt; + __le32 hi_priority_rx_req_cnt; + __le32 hi_priority_rx_denied_cnt; + __le32 lo_priority_rx_req_cnt; + __le32 lo_priority_rx_denied_cnt; +} __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */ + +struct mvm_statistics_general { + struct mvm_statistics_general_common common; + __le32 beacon_filtered; + __le32 missed_beacons; + __s8 beacon_filter_everage_energy; + __s8 beacon_filter_reason; + __s8 beacon_filter_current_energy; + __s8 beacon_filter_reserved; + __le32 beacon_filter_delta_time; + struct mvm_statistics_bt_activity bt_activity; +} __packed; /* STATISTICS_GENERAL_API_S_VER_5 */ + +struct mvm_statistics_rx { + struct mvm_statistics_rx_phy ofdm; + struct mvm_statistics_rx_phy cck; + struct mvm_statistics_rx_non_phy general; + struct mvm_statistics_rx_ht_phy ofdm_ht; +} __packed; /* STATISTICS_RX_API_S_VER_3 */ + +/* + * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command) + * + * By default, uCode issues this notification after receiving a beacon + * while associated. To disable this behavior, set DISABLE_NOTIF flag in the + * REPLY_STATISTICS_CMD 0x9c, above. + * + * Statistics counters continue to increment beacon after beacon, but are + * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD + * 0x9c with CLEAR_STATS bit set (see above). + * + * uCode also issues this notification during scans. uCode clears statistics + * appropriately so that each notification contains statistics for only the + * one channel that has just been scanned. + */ + +struct iwl_notif_statistics { /* STATISTICS_NTFY_API_S_VER_8 */ + __le32 flag; + struct mvm_statistics_rx rx; + struct mvm_statistics_tx tx; + struct mvm_statistics_general general; +} __packed; + #endif /* __fw_api_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c index e18c92dd60ec..cd7c0032cc58 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c @@ -326,6 +326,17 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); WARN_ON(ret); + /* + * abort after reading the nvm in case RF Kill is on, we will complete + * the init seq later when RF kill will switch to off + */ + if (iwl_mvm_is_radio_killed(mvm)) { + IWL_DEBUG_RF_KILL(mvm, + "jump over all phy activities due to RF kill\n"); + iwl_remove_notification(&mvm->notif_wait, &calib_wait); + return 1; + } + /* Send TX valid antennas before triggering calibrations */ ret = iwl_send_tx_ant_cfg(mvm, iwl_fw_valid_tx_ant(mvm->fw)); if (ret) @@ -388,6 +399,8 @@ out: int iwl_mvm_up(struct iwl_mvm *mvm) { int ret, i; + struct ieee80211_channel *chan; + struct cfg80211_chan_def chandef; lockdep_assert_held(&mvm->mutex); @@ -400,8 +413,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm) ret = iwl_run_init_mvm_ucode(mvm, false); if (ret && !iwlmvm_mod_params.init_dbg) { IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); + /* this can't happen */ + if (WARN_ON(ret > 0)) + ret = -ERFKILL; goto error; } + /* should stop & start HW since that INIT image just loaded */ + iwl_trans_stop_hw(mvm->trans, false); + ret = iwl_trans_start_hw(mvm->trans); + if (ret) + return ret; } if (iwlmvm_mod_params.init_dbg) @@ -443,8 +464,22 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (ret) goto error; - IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); + /* Add all the PHY contexts */ + chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0]; + cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); + for (i = 0; i < NUM_PHY_CTX; i++) { + /* + * The channel used here isn't relevant as it's + * going to be overwritten in the other flows. + * For now use the first channel we have. + */ + ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i], + &chandef, 1, 1); + if (ret) + goto error; + } + IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); return 0; error: iwl_trans_stop_device(mvm->trans); diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c index b2cc3d98e0f7..273b0cc197ab 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c @@ -193,14 +193,11 @@ static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac, u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { - u32 qmask, ac; + u32 qmask = 0, ac; if (vif->type == NL80211_IFTYPE_P2P_DEVICE) return BIT(IWL_MVM_OFFCHANNEL_QUEUE); - qmask = (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE) ? - BIT(vif->cab_queue) : 0; - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE) qmask |= BIT(vif->hw_queue[ac]); @@ -227,7 +224,7 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, .found_vif = false, }; u32 ac; - int ret; + int ret, i; /* * Allocate a MAC ID and a TSF for this MAC, along with the queues @@ -335,6 +332,9 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, mvmvif->bcast_sta.sta_id = IWL_MVM_STATION_COUNT; mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; + for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) + mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC; + return 0; exit_fail: @@ -362,7 +362,7 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) break; case NL80211_IFTYPE_AP: iwl_trans_ac_txq_enable(mvm->trans, vif->cab_queue, - IWL_MVM_TX_FIFO_VO); + IWL_MVM_TX_FIFO_MCAST); /* fall through */ default: for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) @@ -550,6 +550,10 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, cmd->ac[i].fifos_mask = BIT(iwl_mvm_ac_to_tx_fifo[i]); } + /* in AP mode, the MCAST FIFO takes the EDCA params from VO */ + if (vif->type == NL80211_IFTYPE_AP) + cmd->ac[AC_VO].fifos_mask |= BIT(IWL_MVM_TX_FIFO_MCAST); + if (vif->bss_conf.qos) cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); @@ -1047,3 +1051,28 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, rate); return 0; } + +static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + u16 *id = _data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (mvmvif->id == *id) + ieee80211_beacon_loss(vif); +} + +int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_missed_beacons_notif *missed_beacons = (void *)pkt->data; + u16 id = (u16)le32_to_cpu(missed_beacons->mac_id); + + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_beacon_loss_iterator, + &id); + return 0; +} diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index a5eb8c82f16a..e08683b20531 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -81,12 +81,12 @@ static const struct ieee80211_iface_limit iwl_mvm_limits[] = { { .max = 1, - .types = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_AP), + .types = BIT(NL80211_IFTYPE_STATION), }, { .max = 1, - .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | + .types = BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO), }, { @@ -127,6 +127,17 @@ static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = { }; #endif +static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm) +{ + int i; + + memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts)); + for (i = 0; i < NUM_PHY_CTX; i++) { + mvm->phy_ctxts[i].id = i; + mvm->phy_ctxts[i].ref = 0; + } +} + int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) { struct ieee80211_hw *hw = mvm->hw; @@ -141,7 +152,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS | IEEE80211_HW_AMPDU_AGGREGATION | - IEEE80211_HW_TIMING_BEACON_ONLY; + IEEE80211_HW_TIMING_BEACON_ONLY | + IEEE80211_HW_CONNECTION_MONITOR; hw->queues = IWL_MVM_FIRST_AGG_QUEUE; hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; @@ -158,7 +170,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->sta_data_size = sizeof(struct iwl_mvm_sta); hw->vif_data_size = sizeof(struct iwl_mvm_vif); - hw->chanctx_data_size = sizeof(struct iwl_mvm_phy_ctxt); + hw->chanctx_data_size = sizeof(u16); hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_CLIENT) | @@ -193,6 +205,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->n_addresses++; } + iwl_mvm_reset_phy_ctxts(mvm); + /* we create the 802.11 header and a max-length SSID element */ hw->wiphy->max_scan_ie_len = mvm->fw->ucode_capa.max_probe_length - 24 - 34; @@ -222,20 +236,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) mvm->trans->ops->d3_suspend && mvm->trans->ops->d3_resume && device_can_wakeup(mvm->trans->dev)) { - hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | - WIPHY_WOWLAN_DISCONNECT | - WIPHY_WOWLAN_EAP_IDENTITY_REQ | - WIPHY_WOWLAN_RFKILL_RELEASE; + mvm->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | + WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_EAP_IDENTITY_REQ | + WIPHY_WOWLAN_RFKILL_RELEASE; if (!iwlwifi_mod_params.sw_crypto) - hw->wiphy->wowlan.flags |= - WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | - WIPHY_WOWLAN_GTK_REKEY_FAILURE | - WIPHY_WOWLAN_4WAY_HANDSHAKE; - - hw->wiphy->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; - hw->wiphy->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; - hw->wiphy->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; - hw->wiphy->wowlan.tcp = &iwl_mvm_wowlan_tcp_support; + mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | + WIPHY_WOWLAN_GTK_REKEY_FAILURE | + WIPHY_WOWLAN_4WAY_HANDSHAKE; + + mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; + mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; + mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; + mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support; + hw->wiphy->wowlan = &mvm->wowlan; } #endif @@ -252,8 +266,8 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - if (test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status)) { - IWL_DEBUG_DROP(mvm, "Dropping - RF KILL\n"); + if (iwl_mvm_is_radio_killed(mvm)) { + IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n"); goto drop; } @@ -345,8 +359,7 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); spin_unlock_bh(&mvm->time_event_lock); - if (vif->type != NL80211_IFTYPE_P2P_DEVICE) - mvmvif->phy_ctxt = NULL; + mvmvif->phy_ctxt = NULL; } static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) @@ -363,6 +376,9 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, iwl_mvm_cleanup_iterator, mvm); + mvm->p2p_device_vif = NULL; + + iwl_mvm_reset_phy_ctxts(mvm); memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained)); @@ -456,6 +472,20 @@ static void iwl_mvm_power_update_iterator(void *data, u8 *mac, iwl_mvm_power_update_mode(mvm, vif); } +static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) +{ + u16 i; + + lockdep_assert_held(&mvm->mutex); + + for (i = 0; i < NUM_PHY_CTX; i++) + if (!mvm->phy_ctxts[i].ref) + return &mvm->phy_ctxts[i]; + + IWL_ERR(mvm, "No available PHY context\n"); + return NULL; +} + static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -530,32 +560,34 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, */ iwl_mvm_power_update_mode(mvm, vif); + /* beacon filtering */ + if (!mvm->bf_allowed_vif && + vif->type == NL80211_IFTYPE_STATION && !vif->p2p){ + mvm->bf_allowed_vif = mvmvif; + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; + } + + ret = iwl_mvm_disable_beacon_filter(mvm, vif); + if (ret) + goto out_release; + /* * P2P_DEVICE interface does not have a channel context assigned to it, * so a dedicated PHY context is allocated to it and the corresponding * MAC context is bound to it at this stage. */ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { - struct ieee80211_channel *chan; - struct cfg80211_chan_def chandef; - mvmvif->phy_ctxt = &mvm->phy_ctxt_roc; - - /* - * The channel used here isn't relevant as it's - * going to be overwritten as part of the ROC flow. - * For now use the first channel we have. - */ - chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0]; - cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); - ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, - &chandef, 1, 1); - if (ret) + mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); + if (!mvmvif->phy_ctxt) { + ret = -ENOSPC; goto out_remove_mac; + } + iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); ret = iwl_mvm_binding_add_vif(mvm, vif); if (ret) - goto out_remove_phy; + goto out_unref_phy; ret = iwl_mvm_add_bcast_sta(mvm, vif, &mvmvif->bcast_sta); if (ret) @@ -571,27 +603,17 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, out_unbind: iwl_mvm_binding_remove_vif(mvm, vif); - out_remove_phy: - iwl_mvm_phy_ctxt_remove(mvm, mvmvif->phy_ctxt); + out_unref_phy: + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); out_remove_mac: mvmvif->phy_ctxt = NULL; iwl_mvm_mac_ctxt_remove(mvm, vif); out_release: - /* - * TODO: remove this temporary code. - * Currently MVM FW supports power management only on single MAC. - * Check if only one additional interface remains after releasing - * current one. Update power mode on the remaining interface. - */ if (vif->type != NL80211_IFTYPE_P2P_DEVICE) mvm->vif_count--; - IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n", - mvm->vif_count); - if (mvm->vif_count == 1) { - ieee80211_iterate_active_interfaces( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_power_update_iterator, mvm); - } + ieee80211_iterate_active_interfaces( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_power_update_iterator, mvm); iwl_mvm_mac_ctxt_release(mvm, vif); out_unlock: mutex_unlock(&mvm->mutex); @@ -629,8 +651,7 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, * By now, all the AC queues are empty. The AGG queues are * empty too. We already got all the Tx responses for all the * packets in the queues. The drain work can have been - * triggered. Flush it. This work item takes the mutex, so kill - * it before we take it. + * triggered. Flush it. */ flush_work(&mvm->sta_drained_wk); } @@ -646,6 +667,11 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); + if (mvm->bf_allowed_vif == mvmvif) { + mvm->bf_allowed_vif = NULL; + vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; + } + iwl_mvm_vif_dbgfs_clean(mvm, vif); /* @@ -661,7 +687,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, mvm->p2p_device_vif = NULL; iwl_mvm_rm_bcast_sta(mvm, &mvmvif->bcast_sta); iwl_mvm_binding_remove_vif(mvm, vif); - iwl_mvm_phy_ctxt_remove(mvm, mvmvif->phy_ctxt); + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); mvmvif->phy_ctxt = NULL; } @@ -748,7 +774,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, if (ret) IWL_ERR(mvm, "failed to update quotas\n"); } - } else if (changes & BSS_CHANGED_DTIM_PERIOD) { + ret = iwl_mvm_power_update_mode(mvm, vif); + if (ret) + IWL_ERR(mvm, "failed to update power mode\n"); + } else if (changes & BSS_CHANGED_BEACON_INFO) { /* * We received a beacon _after_ association so * remove the session protection. @@ -756,19 +785,9 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); } else if (changes & BSS_CHANGED_PS) { - /* - * TODO: remove this temporary code. - * Currently MVM FW supports power management only on single - * MAC. Avoid power mode update if more than one interface - * is active. - */ - IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n", - mvm->vif_count); - if (mvm->vif_count == 1) { - ret = iwl_mvm_power_update_mode(mvm, vif); - if (ret) - IWL_ERR(mvm, "failed to update power mode\n"); - } + ret = iwl_mvm_power_update_mode(mvm, vif); + if (ret) + IWL_ERR(mvm, "failed to update power mode\n"); } } @@ -999,9 +1018,13 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, mvmvif->phy_ctxt->channel->band); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) { + /* enable beacon filtering */ + WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif)); ret = 0; } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { + /* disable beacon filtering */ + WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif)); ret = 0; } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTH) { @@ -1167,29 +1190,107 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw, enum ieee80211_roc_type type) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct cfg80211_chan_def chandef; - int ret; + struct iwl_mvm_phy_ctxt *phy_ctxt; + int ret, i; + + IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, + duration, type); if (vif->type != NL80211_IFTYPE_P2P_DEVICE) { IWL_ERR(mvm, "vif isn't a P2P_DEVICE: %d\n", vif->type); return -EINVAL; } - IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, - duration, type); - mutex_lock(&mvm->mutex); + for (i = 0; i < NUM_PHY_CTX; i++) { + phy_ctxt = &mvm->phy_ctxts[i]; + if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt) + continue; + + if (phy_ctxt->ref && channel == phy_ctxt->channel) { + /* + * Unbind the P2P_DEVICE from the current PHY context, + * and if the PHY context is not used remove it. + */ + ret = iwl_mvm_binding_remove_vif(mvm, vif); + if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) + goto out_unlock; + + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); + + /* Bind the P2P_DEVICE to the current PHY Context */ + mvmvif->phy_ctxt = phy_ctxt; + + ret = iwl_mvm_binding_add_vif(mvm, vif); + if (WARN(ret, "Failed binding P2P_DEVICE\n")) + goto out_unlock; + + iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); + goto schedule_time_event; + } + } + + /* Need to update the PHY context only if the ROC channel changed */ + if (channel == mvmvif->phy_ctxt->channel) + goto schedule_time_event; + cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); - ret = iwl_mvm_phy_ctxt_changed(mvm, &mvm->phy_ctxt_roc, - &chandef, 1, 1); + /* + * Change the PHY context configuration as it is currently referenced + * only by the P2P Device MAC + */ + if (mvmvif->phy_ctxt->ref == 1) { + ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt, + &chandef, 1, 1); + if (ret) + goto out_unlock; + } else { + /* + * The PHY context is shared with other MACs. Need to remove the + * P2P Device from the binding, allocate an new PHY context and + * create a new binding + */ + phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); + if (!phy_ctxt) { + ret = -ENOSPC; + goto out_unlock; + } + + ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef, + 1, 1); + if (ret) { + IWL_ERR(mvm, "Failed to change PHY context\n"); + goto out_unlock; + } + + /* Unbind the P2P_DEVICE from the current PHY context */ + ret = iwl_mvm_binding_remove_vif(mvm, vif); + if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) + goto out_unlock; + + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); + + /* Bind the P2P_DEVICE to the new allocated PHY context */ + mvmvif->phy_ctxt = phy_ctxt; + + ret = iwl_mvm_binding_add_vif(mvm, vif); + if (WARN(ret, "Failed binding P2P_DEVICE\n")) + goto out_unlock; + + iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); + } + +schedule_time_event: /* Schedule the time events */ ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type); +out_unlock: mutex_unlock(&mvm->mutex); IWL_DEBUG_MAC80211(mvm, "leave\n"); - return ret; } @@ -1211,15 +1312,30 @@ static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv; + u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; + struct iwl_mvm_phy_ctxt *phy_ctxt; int ret; + IWL_DEBUG_MAC80211(mvm, "Add channel context\n"); + mutex_lock(&mvm->mutex); + phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); + if (!phy_ctxt) { + ret = -ENOSPC; + goto out; + } - IWL_DEBUG_MAC80211(mvm, "Add PHY context\n"); - ret = iwl_mvm_phy_ctxt_add(mvm, phy_ctxt, &ctx->def, - ctx->rx_chains_static, - ctx->rx_chains_dynamic); + ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def, + ctx->rx_chains_static, + ctx->rx_chains_dynamic); + if (ret) { + IWL_ERR(mvm, "Failed to add PHY context\n"); + goto out; + } + + iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt); + *phy_ctxt_id = phy_ctxt->id; +out: mutex_unlock(&mvm->mutex); return ret; } @@ -1228,10 +1344,11 @@ static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv; + u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; + struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; mutex_lock(&mvm->mutex); - iwl_mvm_phy_ctxt_remove(mvm, phy_ctxt); + iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt); mutex_unlock(&mvm->mutex); } @@ -1240,7 +1357,16 @@ static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, u32 changed) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv; + u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; + struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; + + if (WARN_ONCE((phy_ctxt->ref > 1) && + (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH | + IEEE80211_CHANCTX_CHANGE_RX_CHAINS | + IEEE80211_CHANCTX_CHANGE_RADAR)), + "Cannot change PHY. Ref=%d, changed=0x%X\n", + phy_ctxt->ref, changed)) + return; mutex_lock(&mvm->mutex); iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def, @@ -1254,13 +1380,14 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_phy_ctxt *phyctx = (void *)ctx->drv_priv; + u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; + struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; mutex_lock(&mvm->mutex); - mvmvif->phy_ctxt = phyctx; + mvmvif->phy_ctxt = phy_ctxt; switch (vif->type) { case NL80211_IFTYPE_AP: diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 9f46b23801bc..c7409f159a36 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -88,6 +88,7 @@ enum iwl_mvm_tx_fifo { IWL_MVM_TX_FIFO_BE, IWL_MVM_TX_FIFO_VI, IWL_MVM_TX_FIFO_VO, + IWL_MVM_TX_FIFO_MCAST = 5, }; extern struct ieee80211_ops iwl_mvm_hw_ops; @@ -109,6 +110,7 @@ extern struct iwl_mvm_mod_params iwlmvm_mod_params; struct iwl_mvm_phy_ctxt { u16 id; u16 color; + u32 ref; /* * TODO: This should probably be removed. Currently here only for rate @@ -149,6 +151,60 @@ enum iwl_power_scheme { #define IWL_CONN_MAX_LISTEN_INTERVAL 70 +#ifdef CONFIG_IWLWIFI_DEBUGFS +enum iwl_dbgfs_pm_mask { + MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0), + MVM_DEBUGFS_PM_SKIP_OVER_DTIM = BIT(1), + MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2), + MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3), + MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4), + MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5), +}; + +struct iwl_dbgfs_pm { + u8 keep_alive_seconds; + u32 rx_data_timeout; + u32 tx_data_timeout; + bool skip_over_dtim; + u8 skip_dtim_periods; + bool disable_power_off; + int mask; +}; + +/* beacon filtering */ + +enum iwl_dbgfs_bf_mask { + MVM_DEBUGFS_BF_ENERGY_DELTA = BIT(0), + MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA = BIT(1), + MVM_DEBUGFS_BF_ROAMING_STATE = BIT(2), + MVM_DEBUGFS_BF_TEMPERATURE_DELTA = BIT(3), + MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(4), + MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(5), + MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(6), + MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(7), + MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(8), +}; + +struct iwl_dbgfs_bf { + u8 bf_energy_delta; + u8 bf_roaming_energy_delta; + u8 bf_roaming_state; + u8 bf_temperature_delta; + u8 bf_enable_beacon_filter; + u8 bf_debug_flag; + u32 bf_escape_timer; + u32 ba_escape_timer; + u8 ba_enable_beacon_abort; + int mask; +}; +#endif + +enum iwl_mvm_smps_type_request { + IWL_MVM_SMPS_REQ_BT_COEX, + IWL_MVM_SMPS_REQ_TT, + NUM_IWL_MVM_SMPS_REQ, +}; + /** * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context * @id: between 0 and 3 @@ -163,6 +219,8 @@ enum iwl_power_scheme { * @bcast_sta: station used for broadcast packets. Used by the following * vifs: P2P_DEVICE, GO and AP. * @beacon_skb: the skb used to hold the AP/GO beacon template + * @smps_requests: the requests of of differents parts of the driver, regard + the desired smps mode. */ struct iwl_mvm_vif { u16 id; @@ -172,6 +230,8 @@ struct iwl_mvm_vif { bool uploaded; bool ap_active; bool monitor_active; + /* indicate whether beacon filtering is enabled */ + bool bf_enabled; u32 ap_beacon_time; @@ -214,7 +274,11 @@ struct iwl_mvm_vif { struct dentry *dbgfs_dir; struct dentry *dbgfs_slink; void *dbgfs_data; + struct iwl_dbgfs_pm dbgfs_pm; + struct iwl_dbgfs_bf dbgfs_bf; #endif + + enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ]; }; static inline struct iwl_mvm_vif * @@ -223,12 +287,6 @@ iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif) return (void *)vif->drv_priv; } -enum iwl_mvm_status { - IWL_MVM_STATUS_HW_RFKILL, - IWL_MVM_STATUS_ROC_RUNNING, - IWL_MVM_STATUS_IN_HW_RESTART, -}; - enum iwl_scan_status { IWL_MVM_SCAN_NONE, IWL_MVM_SCAN_OS, @@ -246,6 +304,63 @@ struct iwl_nvm_section { const u8 *data; }; +/* + * Tx-backoff threshold + * @temperature: The threshold in Celsius + * @backoff: The tx-backoff in uSec + */ +struct iwl_tt_tx_backoff { + s32 temperature; + u32 backoff; +}; + +#define TT_TX_BACKOFF_SIZE 6 + +/** + * struct iwl_tt_params - thermal throttling parameters + * @ct_kill_entry: CT Kill entry threshold + * @ct_kill_exit: CT Kill exit threshold + * @ct_kill_duration: The time intervals (in uSec) in which the driver needs + * to checks whether to exit CT Kill. + * @dynamic_smps_entry: Dynamic SMPS entry threshold + * @dynamic_smps_exit: Dynamic SMPS exit threshold + * @tx_protection_entry: TX protection entry threshold + * @tx_protection_exit: TX protection exit threshold + * @tx_backoff: Array of thresholds for tx-backoff , in ascending order. + * @support_ct_kill: Support CT Kill? + * @support_dynamic_smps: Support dynamic SMPS? + * @support_tx_protection: Support tx protection? + * @support_tx_backoff: Support tx-backoff? + */ +struct iwl_tt_params { + s32 ct_kill_entry; + s32 ct_kill_exit; + u32 ct_kill_duration; + s32 dynamic_smps_entry; + s32 dynamic_smps_exit; + s32 tx_protection_entry; + s32 tx_protection_exit; + struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE]; + bool support_ct_kill; + bool support_dynamic_smps; + bool support_tx_protection; + bool support_tx_backoff; +}; + +/** + * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure + * @ct_kill_exit: worker to exit thermal kill + * @dynamic_smps: Is thermal throttling enabled dynamic_smps? + * @tx_backoff: The current thremal throttling tx backoff in uSec. + * @params: Parameters to configure the thermal throttling algorithm. + */ +struct iwl_mvm_tt_mgmt { + struct delayed_work ct_kill_exit; + bool dynamic_smps; + u32 tx_backoff; + const struct iwl_tt_params *params; +}; + struct iwl_mvm { /* for logger access */ struct device *dev; @@ -266,6 +381,12 @@ struct iwl_mvm { unsigned long status; + /* + * for beacon filtering - + * currently only one interface can be supported + */ + struct iwl_mvm_vif *bf_allowed_vif; + enum iwl_ucode_type cur_ucode; bool ucode_loaded; bool init_ucode_run; @@ -313,7 +434,7 @@ struct iwl_mvm { bool prevent_power_down_d3; #endif - struct iwl_mvm_phy_ctxt phy_ctxt_roc; + struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX]; struct list_head time_event_list; spinlock_t time_event_lock; @@ -337,12 +458,23 @@ struct iwl_mvm { struct ieee80211_vif *p2p_device_vif; #ifdef CONFIG_PM_SLEEP + struct wiphy_wowlan_support wowlan; int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen; +#ifdef CONFIG_IWLWIFI_DEBUGFS + bool d3_test_active; + bool store_d3_resume_sram; + void *d3_resume_sram; + u32 d3_test_pme_ptr; +#endif #endif /* BT-Coex */ u8 bt_kill_msk; struct iwl_bt_coex_profile_notif last_bt_notif; + + /* Thermal Throttling and CTkill */ + struct iwl_mvm_tt_mgmt thermal_throttle; + s32 temperature; /* Celsius */ }; /* Extract MVM priv from op_mode and _hw */ @@ -352,6 +484,19 @@ struct iwl_mvm { #define IWL_MAC80211_GET_MVM(_hw) \ IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv)) +enum iwl_mvm_status { + IWL_MVM_STATUS_HW_RFKILL, + IWL_MVM_STATUS_HW_CTKILL, + IWL_MVM_STATUS_ROC_RUNNING, + IWL_MVM_STATUS_IN_HW_RESTART, +}; + +static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm) +{ + return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status) || + test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); +} + extern const u8 iwl_mvm_ac_to_tx_fifo[]; struct iwl_rate_info { @@ -443,8 +588,10 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic); -void iwl_mvm_phy_ctxt_remove(struct iwl_mvm *mvm, - struct iwl_mvm_phy_ctxt *ctxt); +void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, + struct iwl_mvm_phy_ctxt *ctxt); +void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, + struct iwl_mvm_phy_ctxt *ctxt); /* MAC (virtual interface) programming */ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif); @@ -459,6 +606,9 @@ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd); +int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); /* Bindings */ int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); @@ -523,6 +673,7 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, struct inet6_dev *idev); void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int idx); +extern const struct file_operations iwl_dbgfs_d3_test_ops; /* BT Coex */ int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm); @@ -534,4 +685,31 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum ieee80211_rssi_event rssi_event); void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +/* beacon filtering */ +#ifdef CONFIG_IWLWIFI_DEBUGFS +void +iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, + struct iwl_beacon_filter_cmd *cmd); +#else +static inline void +iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, + struct iwl_beacon_filter_cmd *cmd) +{} +#endif +int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); +int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); + +/* SMPS */ +void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + enum iwl_mvm_smps_type_request req_type, + enum ieee80211_smps_mode smps_request); + +/* Thermal management and CT-kill */ +void iwl_mvm_tt_handler(struct iwl_mvm *mvm); +void iwl_mvm_tt_initialize(struct iwl_mvm *mvm); +void iwl_mvm_tt_exit(struct iwl_mvm *mvm); +void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state); + #endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c index b8ec02f89acc..edb94ea31654 100644 --- a/drivers/net/wireless/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c @@ -60,6 +60,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ +#include <linux/firmware.h> #include "iwl-trans.h" #include "mvm.h" #include "iwl-eeprom-parse.h" @@ -75,31 +76,56 @@ static const int nvm_to_read[] = { }; /* Default NVM size to read */ -#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024); +#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024) +#define IWL_MAX_NVM_SECTION_SIZE 6000 -static inline void iwl_nvm_fill_read(struct iwl_nvm_access_cmd *cmd, - u16 offset, u16 length, u16 section) +#define NVM_WRITE_OPCODE 1 +#define NVM_READ_OPCODE 0 + +/* + * prepare the NVM host command w/ the pointers to the nvm buffer + * and send it to fw + */ +static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section, + u16 offset, u16 length, const u8 *data) { - cmd->offset = cpu_to_le16(offset); - cmd->length = cpu_to_le16(length); - cmd->type = cpu_to_le16(section); + struct iwl_nvm_access_cmd nvm_access_cmd = { + .offset = cpu_to_le16(offset), + .length = cpu_to_le16(length), + .type = cpu_to_le16(section), + .op_code = NVM_WRITE_OPCODE, + }; + struct iwl_host_cmd cmd = { + .id = NVM_ACCESS_CMD, + .len = { sizeof(struct iwl_nvm_access_cmd), length }, + .flags = CMD_SYNC | CMD_SEND_IN_RFKILL, + .data = { &nvm_access_cmd, data }, + /* data may come from vmalloc, so use _DUP */ + .dataflags = { 0, IWL_HCMD_DFL_DUP }, + }; + + return iwl_mvm_send_cmd(mvm, &cmd); } static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section, u16 offset, u16 length, u8 *data) { - struct iwl_nvm_access_cmd nvm_access_cmd = {}; + struct iwl_nvm_access_cmd nvm_access_cmd = { + .offset = cpu_to_le16(offset), + .length = cpu_to_le16(length), + .type = cpu_to_le16(section), + .op_code = NVM_READ_OPCODE, + }; struct iwl_nvm_access_resp *nvm_resp; struct iwl_rx_packet *pkt; struct iwl_host_cmd cmd = { .id = NVM_ACCESS_CMD, - .flags = CMD_SYNC | CMD_WANT_SKB, + .flags = CMD_SYNC | CMD_WANT_SKB | CMD_SEND_IN_RFKILL, .data = { &nvm_access_cmd, }, }; int ret, bytes_read, offset_read; u8 *resp_data; - iwl_nvm_fill_read(&nvm_access_cmd, offset, length, section); cmd.len[0] = sizeof(struct iwl_nvm_access_cmd); ret = iwl_mvm_send_cmd(mvm, &cmd); @@ -144,6 +170,30 @@ exit: return ret; } +static int iwl_nvm_write_section(struct iwl_mvm *mvm, u16 section, + const u8 *data, u16 length) +{ + int offset = 0; + + /* copy data in chunks of 2k (and remainder if any) */ + + while (offset < length) { + int chunk_size, ret; + + chunk_size = min(IWL_NVM_DEFAULT_CHUNK_SIZE, + length - offset); + + ret = iwl_nvm_write_chunk(mvm, section, offset, + chunk_size, data + offset); + if (ret < 0) + return ret; + + offset += chunk_size; + } + + return 0; +} + /* * Reads an NVM section completely. * NICs prior to 7000 family doesn't have a real NVM, but just read @@ -177,7 +227,8 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section, offset += ret; } - IWL_INFO(mvm, "NVM section %d read completed\n", section); + IWL_DEBUG_EEPROM(mvm->trans->dev, + "NVM section %d read completed\n", section); return offset; } @@ -200,7 +251,130 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) hw = (const __le16 *)sections[NVM_SECTION_TYPE_HW].data; sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data; calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data; - return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib); + return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib, + iwl_fw_valid_tx_ant(mvm->fw), + iwl_fw_valid_rx_ant(mvm->fw)); +} + +#define MAX_NVM_FILE_LEN 16384 + +/* + * HOW TO CREATE THE NVM FILE FORMAT: + * ------------------------------ + * 1. create hex file, format: + * 3800 -> header + * 0000 -> header + * 5a40 -> data + * + * rev - 6 bit (word1) + * len - 10 bit (word1) + * id - 4 bit (word2) + * rsv - 12 bit (word2) + * + * 2. flip 8bits with 8 bits per line to get the right NVM file format + * + * 3. create binary file from the hex file + * + * 4. save as "iNVM_xxx.bin" under /lib/firmware + */ +static int iwl_mvm_load_external_nvm(struct iwl_mvm *mvm) +{ + int ret, section_id, section_size; + const struct firmware *fw_entry; + const struct { + __le16 word1; + __le16 word2; + u8 data[]; + } *file_sec; + const u8 *eof; + +#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF)) +#define NVM_WORD2_ID(x) (x >> 12) + + /* + * Obtain NVM image via request_firmware. Since we already used + * request_firmware_nowait() for the firmware binary load and only + * get here after that we assume the NVM request can be satisfied + * synchronously. + */ + ret = request_firmware(&fw_entry, iwlwifi_mod_params.nvm_file, + mvm->trans->dev); + if (ret) { + IWL_ERR(mvm, "ERROR: %s isn't available %d\n", + iwlwifi_mod_params.nvm_file, ret); + return ret; + } + + IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n", + iwlwifi_mod_params.nvm_file, fw_entry->size); + + if (fw_entry->size < sizeof(*file_sec)) { + IWL_ERR(mvm, "NVM file too small\n"); + ret = -EINVAL; + goto out; + } + + if (fw_entry->size > MAX_NVM_FILE_LEN) { + IWL_ERR(mvm, "NVM file too large\n"); + ret = -EINVAL; + goto out; + } + + eof = fw_entry->data + fw_entry->size; + + file_sec = (void *)fw_entry->data; + + while (true) { + if (file_sec->data > eof) { + IWL_ERR(mvm, + "ERROR - NVM file too short for section header\n"); + ret = -EINVAL; + break; + } + + /* check for EOF marker */ + if (!file_sec->word1 && !file_sec->word2) { + ret = 0; + break; + } + + section_size = 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1)); + section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2)); + + if (section_size > IWL_MAX_NVM_SECTION_SIZE) { + IWL_ERR(mvm, "ERROR - section too large (%d)\n", + section_size); + ret = -EINVAL; + break; + } + + if (!section_size) { + IWL_ERR(mvm, "ERROR - section empty\n"); + ret = -EINVAL; + break; + } + + if (file_sec->data + section_size > eof) { + IWL_ERR(mvm, + "ERROR - NVM file too short for section (%d bytes)\n", + section_size); + ret = -EINVAL; + break; + } + + ret = iwl_nvm_write_section(mvm, section_id, file_sec->data, + section_size); + if (ret < 0) { + IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret); + break; + } + + /* advance to the next section */ + file_sec = (void *)(file_sec->data + section_size); + } +out: + release_firmware(fw_entry); + return ret; } int iwl_nvm_init(struct iwl_mvm *mvm) @@ -208,6 +382,17 @@ int iwl_nvm_init(struct iwl_mvm *mvm) int ret, i, section; u8 *nvm_buffer, *temp; + /* load external NVM if configured */ + if (iwlwifi_mod_params.nvm_file) { + /* move to External NVM flow */ + ret = iwl_mvm_load_external_nvm(mvm); + if (ret) + return ret; + } + + /* Read From FW NVM */ + IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n"); + /* TODO: find correct NVM max size for a section */ nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size, GFP_KERNEL); @@ -231,8 +416,9 @@ int iwl_nvm_init(struct iwl_mvm *mvm) if (ret < 0) return ret; - ret = 0; mvm->nvm_data = iwl_parse_nvm_sections(mvm); + if (!mvm->nvm_data) + return -ENODATA; - return ret; + return 0; } diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index b29c31a41594..af79a14063a9 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -215,17 +215,22 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER(REPLY_RX_PHY_CMD, iwl_mvm_rx_rx_phy_cmd, false), RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false), RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false), + + RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true), + RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, false), + RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true), + RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false), RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false), RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false), - RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true), - RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, false), - RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false), RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false), + RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif, + false), + RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false), }; #undef RX_HANDLER @@ -288,11 +293,14 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = { CMD(NET_DETECT_HOTSPOTS_CMD), CMD(NET_DETECT_HOTSPOTS_QUERY_CMD), CMD(CARD_STATE_NOTIFICATION), + CMD(MISSED_BEACONS_NOTIFICATION), CMD(BT_COEX_PRIO_TABLE), CMD(BT_COEX_PROT_ENV), CMD(BT_PROFILE_NOTIFICATION), CMD(BT_CONFIG), CMD(MCAST_FILTER_CMD), + CMD(REPLY_BEACON_FILTERING_CMD), + CMD(REPLY_THERMAL_MNG_BACKOFF), }; #undef CMD @@ -393,10 +401,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, if (err) goto out_free; + iwl_mvm_tt_initialize(mvm); + mutex_lock(&mvm->mutex); err = iwl_run_init_mvm_ucode(mvm, true); mutex_unlock(&mvm->mutex); - if (err && !iwlmvm_mod_params.init_dbg) { + /* returns 0 if successful, 1 if success but in rfkill */ + if (err < 0 && !iwlmvm_mod_params.init_dbg) { IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err); goto out_free; } @@ -439,10 +450,16 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) iwl_mvm_leds_exit(mvm); + iwl_mvm_tt_exit(mvm); + ieee80211_unregister_hw(mvm->hw); kfree(mvm->scan_cmd); +#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS) + kfree(mvm->d3_resume_sram); +#endif + iwl_trans_stop_hw(mvm->trans, true); iwl_phy_db_free(mvm->phy_db); @@ -589,6 +606,16 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue) ieee80211_wake_queue(mvm->hw, mq); } +void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) +{ + if (state) + set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); + else + clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); + + wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm)); +} + static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); @@ -598,7 +625,7 @@ static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) else clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); - wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state); + wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm)); } static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c index a28a1d1f23eb..a8652ddd6bed 100644 --- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c @@ -195,21 +195,6 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm, return ret; } - -struct phy_ctx_used_data { - unsigned long used[BITS_TO_LONGS(NUM_PHY_CTX)]; -}; - -static void iwl_mvm_phy_ctx_used_iter(struct ieee80211_hw *hw, - struct ieee80211_chanctx_conf *ctx, - void *_data) -{ - struct phy_ctx_used_data *data = _data; - struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv; - - __set_bit(phy_ctxt->id, data->used); -} - /* * Send a command to add a PHY context based on the current HW configuration. */ @@ -217,34 +202,28 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic) { - struct phy_ctx_used_data data = { - .used = { }, - }; - - /* - * If this is a regular PHY context (not the ROC one) - * skip the ROC PHY context's ID. - */ - if (ctxt != &mvm->phy_ctxt_roc) - __set_bit(mvm->phy_ctxt_roc.id, data.used); + int ret; + WARN_ON(!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && + ctxt->ref); lockdep_assert_held(&mvm->mutex); - ctxt->color++; - if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { - ieee80211_iter_chan_contexts_atomic( - mvm->hw, iwl_mvm_phy_ctx_used_iter, &data); + ctxt->channel = chandef->chan; + ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, + chains_static, chains_dynamic, + FW_CTXT_ACTION_ADD, 0); - ctxt->id = find_first_zero_bit(data.used, NUM_PHY_CTX); - if (WARN_ONCE(ctxt->id == NUM_PHY_CTX, - "Failed to init PHY context - no free ID!\n")) - return -EIO; - } + return ret; +} - ctxt->channel = chandef->chan; - return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, - chains_static, chains_dynamic, - FW_CTXT_ACTION_ADD, 0); +/* + * Update the number of references to the given PHY context. This is valid only + * in case the PHY context was already created, i.e., its reference count > 0. + */ +void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) +{ + lockdep_assert_held(&mvm->mutex); + ctxt->ref++; } /* @@ -264,23 +243,12 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, FW_CTXT_ACTION_MODIFY, 0); } -/* - * Send a command to the FW to remove the given phy context. - * Once the command is sent, regardless of success or failure, the context is - * marked as invalid - */ -void iwl_mvm_phy_ctxt_remove(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) +void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) { - struct iwl_phy_context_cmd cmd; - int ret; - lockdep_assert_held(&mvm->mutex); - iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, FW_CTXT_ACTION_REMOVE, 0); - ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, CMD_SYNC, - sizeof(struct iwl_phy_context_cmd), - &cmd); - if (ret) - IWL_ERR(mvm, "Failed to send PHY remove: ctxt id=%d\n", - ctxt->id); + if (WARN_ON_ONCE(!ctxt)) + return; + + ctxt->ref--; } diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c index ed77e437aac4..3760a33ca3a4 100644 --- a/drivers/net/wireless/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/iwlwifi/mvm/power.c @@ -75,6 +75,54 @@ #define POWER_KEEP_ALIVE_PERIOD_SEC 25 +static int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm, + struct iwl_beacon_filter_cmd *cmd) +{ + int ret; + + ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, CMD_SYNC, + sizeof(struct iwl_beacon_filter_cmd), cmd); + + if (!ret) { + IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n", + cmd->ba_enable_beacon_abort); + IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n", + cmd->ba_escape_timer); + IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n", + cmd->bf_debug_flag); + IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n", + cmd->bf_enable_beacon_filter); + IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n", + cmd->bf_energy_delta); + IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n", + cmd->bf_escape_timer); + IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n", + cmd->bf_roaming_energy_delta); + IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n", + cmd->bf_roaming_state); + IWL_DEBUG_POWER(mvm, "bf_temperature_delta is: %d\n", + cmd->bf_temperature_delta); + } + return ret; +} + +static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, bool enable) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_beacon_filter_cmd cmd = { + IWL_BF_CMD_CONFIG_DEFAULTS, + .bf_enable_beacon_filter = 1, + .ba_enable_beacon_abort = enable, + }; + + if (!mvmvif->bf_enabled) + return 0; + + iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); + return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd); +} + static void iwl_mvm_power_log(struct iwl_mvm *mvm, struct iwl_powertable_cmd *cmd) { @@ -91,6 +139,9 @@ static void iwl_mvm_power_log(struct iwl_mvm *mvm, le32_to_cpu(cmd->tx_data_timeout)); IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n", cmd->lprx_rssi_threshold); + if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) + IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n", + le32_to_cpu(cmd->skip_dtim_periods)); } } @@ -103,6 +154,8 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int dtimper, dtimper_msec; int keep_alive; bool radar_detect = false; + struct iwl_mvm_vif *mvmvif __maybe_unused = + iwl_mvm_vif_from_mac80211(vif); /* * Regardless of power management state the driver must set @@ -115,7 +168,14 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return; cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); + if (!vif->bss_conf.assoc) + cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK); +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF && + mvmvif->dbgfs_pm.disable_power_off) + cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK); +#endif if (!vif->bss_conf.ps) return; @@ -135,8 +195,11 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, /* Check skip over DTIM conditions */ if (!radar_detect && (dtimper <= 10) && - (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP)) + (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP || + mvm->cur_ucode == IWL_UCODE_WOWLAN)) { cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); + cmd->skip_dtim_periods = cpu_to_le32(3); + } /* Check that keep alive period is at least 3 * DTIM */ dtimper_msec = dtimper * vif->bss_conf.beacon_int; @@ -145,27 +208,76 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC); cmd->keep_alive_seconds = keep_alive; - cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC); - cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC); + if (mvm->cur_ucode != IWL_UCODE_WOWLAN) { + cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC); + cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC); + } else { + cmd->rx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC); + cmd->tx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC); + } + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE) + cmd->keep_alive_seconds = mvmvif->dbgfs_pm.keep_alive_seconds; + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) { + if (mvmvif->dbgfs_pm.skip_over_dtim) + cmd->flags |= + cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); + else + cmd->flags &= + cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK); + } + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT) + cmd->rx_data_timeout = + cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout); + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT) + cmd->tx_data_timeout = + cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout); + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS) + cmd->skip_dtim_periods = + cpu_to_le32(mvmvif->dbgfs_pm.skip_dtim_periods); +#endif /* CONFIG_IWLWIFI_DEBUGFS */ } int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { + int ret; + bool ba_enable; struct iwl_powertable_cmd cmd = {}; if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) return 0; + /* + * TODO: The following vif_count verification is temporary condition. + * Avoid power mode update if more than one interface is currently + * active. Remove this condition when FW will support power management + * on multiple MACs. + */ + IWL_DEBUG_POWER(mvm, "Currently %d interfaces active\n", + mvm->vif_count); + if (mvm->vif_count > 1) + return 0; + iwl_mvm_power_build_cmd(mvm, vif, &cmd); iwl_mvm_power_log(mvm, &cmd); - return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC, - sizeof(cmd), &cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC, + sizeof(cmd), &cmd); + if (ret) + return ret; + + ba_enable = !!(cmd.flags & + cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)); + + return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable); } int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_powertable_cmd cmd = {}; + struct iwl_mvm_vif *mvmvif __maybe_unused = + iwl_mvm_vif_from_mac80211(vif); if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) return 0; @@ -173,8 +285,82 @@ int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF && + mvmvif->dbgfs_pm.disable_power_off) + cmd.flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK); +#endif iwl_mvm_power_log(mvm, &cmd); return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC, sizeof(cmd), &cmd); } + +#ifdef CONFIG_IWLWIFI_DEBUGFS +void +iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, + struct iwl_beacon_filter_cmd *cmd) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf; + + if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ENERGY_DELTA) + cmd->bf_energy_delta = dbgfs_bf->bf_energy_delta; + if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA) + cmd->bf_roaming_energy_delta = + dbgfs_bf->bf_roaming_energy_delta; + if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_STATE) + cmd->bf_roaming_state = dbgfs_bf->bf_roaming_state; + if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMPERATURE_DELTA) + cmd->bf_temperature_delta = dbgfs_bf->bf_temperature_delta; + if (dbgfs_bf->mask & MVM_DEBUGFS_BF_DEBUG_FLAG) + cmd->bf_debug_flag = dbgfs_bf->bf_debug_flag; + if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ESCAPE_TIMER) + cmd->bf_escape_timer = cpu_to_le32(dbgfs_bf->bf_escape_timer); + if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ESCAPE_TIMER) + cmd->ba_escape_timer = cpu_to_le32(dbgfs_bf->ba_escape_timer); + if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT) + cmd->ba_enable_beacon_abort = dbgfs_bf->ba_enable_beacon_abort; +} +#endif + +int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_beacon_filter_cmd cmd = { + IWL_BF_CMD_CONFIG_DEFAULTS, + .bf_enable_beacon_filter = 1, + }; + int ret; + + if (mvmvif != mvm->bf_allowed_vif || + vif->type != NL80211_IFTYPE_STATION || vif->p2p) + return 0; + + iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); + ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd); + + if (!ret) + mvmvif->bf_enabled = true; + + return ret; +} + +int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_beacon_filter_cmd cmd = {}; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) + return 0; + + ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd); + + if (!ret) + mvmvif->bf_enabled = false; + + return ret; +} diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c index a1e3e923ea3e..29d49cf0fdb2 100644 --- a/drivers/net/wireless/iwlwifi/mvm/quota.c +++ b/drivers/net/wireless/iwlwifi/mvm/quota.c @@ -169,27 +169,34 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif) num_active_bindings++; } - if (!num_active_bindings) - goto send_cmd; - - quota = IWL_MVM_MAX_QUOTA / num_active_bindings; - quota_rem = IWL_MVM_MAX_QUOTA % num_active_bindings; + quota = 0; + quota_rem = 0; + if (num_active_bindings) { + quota = IWL_MVM_MAX_QUOTA / num_active_bindings; + quota_rem = IWL_MVM_MAX_QUOTA % num_active_bindings; + } for (idx = 0, i = 0; i < MAX_BINDINGS; i++) { - if (data.n_interfaces[i] <= 0) + if (data.colors[i] < 0) continue; cmd.quotas[idx].id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i])); - cmd.quotas[idx].quota = cpu_to_le32(quota); - cmd.quotas[idx].max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA); + + if (data.n_interfaces[i] <= 0) { + cmd.quotas[idx].quota = cpu_to_le32(0); + cmd.quotas[idx].max_duration = cpu_to_le32(0); + } else { + cmd.quotas[idx].quota = cpu_to_le32(quota); + cmd.quotas[idx].max_duration = + cpu_to_le32(IWL_MVM_MAX_QUOTA); + } idx++; } /* Give the remainder of the session to the first binding */ le32_add_cpu(&cmd.quotas[0].quota, quota_rem); -send_cmd: ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC, sizeof(cmd), &cmd); if (ret) diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index b99fe3163866..31587a318f8b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -401,6 +401,17 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm, load = rs_tl_get_load(lq_data, tid); + /* + * Don't create TX aggregation sessions when in high + * BT traffic, as they would just be disrupted by BT. + */ + if (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 2) { + IWL_DEBUG_COEX(mvm, "BT traffic (%d), no aggregation allowed\n", + BT_MBOX_MSG(&mvm->last_bt_notif, + 3, TRAFFIC_LOAD)); + return ret; + } + if ((iwlwifi_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) { IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n", sta->addr, tid); @@ -1519,6 +1530,29 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm, u8 update_search_tbl_counter = 0; int ret; + switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) { + case IWL_BT_COEX_TRAFFIC_LOAD_NONE: + /* nothing */ + break; + case IWL_BT_COEX_TRAFFIC_LOAD_LOW: + /* avoid antenna B unless MIMO */ + if (tbl->action == IWL_SISO_SWITCH_ANTENNA2) + tbl->action = IWL_SISO_SWITCH_MIMO2_AB; + break; + case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: + case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: + /* avoid antenna B and MIMO */ + valid_tx_ant = + first_antenna(iwl_fw_valid_tx_ant(mvm->fw)); + if (tbl->action != IWL_SISO_SWITCH_ANTENNA1) + tbl->action = IWL_SISO_SWITCH_ANTENNA1; + break; + default: + IWL_ERR(mvm, "Invalid BT load %d", + BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)); + break; + } + start_action = tbl->action; while (1) { lq_sta->action_counter++; @@ -1532,7 +1566,9 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm, tx_chains_num <= 2)) break; - if (window->success_ratio >= IWL_RS_GOOD_RATIO) + if (window->success_ratio >= IWL_RS_GOOD_RATIO && + BT_MBOX_MSG(&mvm->last_bt_notif, 3, + TRAFFIC_LOAD) == 0) break; memcpy(search_tbl, tbl, sz); @@ -1654,6 +1690,28 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm, u8 update_search_tbl_counter = 0; int ret; + switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) { + case IWL_BT_COEX_TRAFFIC_LOAD_NONE: + /* nothing */ + break; + case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: + case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: + /* avoid antenna B and MIMO */ + if (tbl->action != IWL_MIMO2_SWITCH_SISO_A) + tbl->action = IWL_MIMO2_SWITCH_SISO_A; + break; + case IWL_BT_COEX_TRAFFIC_LOAD_LOW: + /* avoid antenna B unless MIMO */ + if (tbl->action == IWL_MIMO2_SWITCH_SISO_B || + tbl->action == IWL_MIMO2_SWITCH_SISO_C) + tbl->action = IWL_MIMO2_SWITCH_SISO_A; + break; + default: + IWL_ERR(mvm, "Invalid BT load %d", + BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)); + break; + } + start_action = tbl->action; while (1) { lq_sta->action_counter++; @@ -1791,6 +1849,28 @@ static int rs_move_mimo3_to_other(struct iwl_mvm *mvm, int ret; u8 update_search_tbl_counter = 0; + switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) { + case IWL_BT_COEX_TRAFFIC_LOAD_NONE: + /* nothing */ + break; + case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: + case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: + /* avoid antenna B and MIMO */ + if (tbl->action != IWL_MIMO3_SWITCH_SISO_A) + tbl->action = IWL_MIMO3_SWITCH_SISO_A; + break; + case IWL_BT_COEX_TRAFFIC_LOAD_LOW: + /* avoid antenna B unless MIMO */ + if (tbl->action == IWL_MIMO3_SWITCH_SISO_B || + tbl->action == IWL_MIMO3_SWITCH_SISO_C) + tbl->action = IWL_MIMO3_SWITCH_SISO_A; + break; + default: + IWL_ERR(mvm, "Invalid BT load %d", + BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)); + break; + } + start_action = tbl->action; while (1) { lq_sta->action_counter++; @@ -2302,6 +2382,32 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm, (current_tpt > (100 * tbl->expected_tpt[low])))) scale_action = 0; + if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= + IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && + (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) { + if (lq_sta->last_bt_traffic > + BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) { + /* + * don't set scale_action, don't want to scale up if + * the rate scale doesn't otherwise think that is a + * good idea. + */ + } else if (lq_sta->last_bt_traffic <= + BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) { + scale_action = -1; + } + } + lq_sta->last_bt_traffic = + BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD); + + if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= + IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && + (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) { + /* search for a new modulation */ + rs_stay_in_table(lq_sta, true); + goto lq_update; + } + switch (scale_action) { case -1: /* Decrease starting rate, update uCode's rate table */ @@ -2783,6 +2889,13 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm, lq_cmd->agg_time_limit = cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); + + /* + * overwrite if needed, pass aggregation time limit + * to uCode in uSec - This is racy - but heh, at least it helps... + */ + if (mvm && BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 2) + lq_cmd->agg_time_limit = cpu_to_le16(1200); } static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) @@ -3081,3 +3194,29 @@ void iwl_mvm_rate_control_unregister(void) { ieee80211_rate_control_unregister(&rs_mvm_ops); } + +/** + * iwl_mvm_tx_protection - Gets LQ command, change it to enable/disable + * Tx protection, according to this rquest and previous requests, + * and send the LQ command. + * @lq: The LQ command + * @mvmsta: The station + * @enable: Enable Tx protection? + */ +int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, + struct iwl_mvm_sta *mvmsta, bool enable) +{ + lockdep_assert_held(&mvm->mutex); + + if (enable) { + if (mvmsta->tx_protection == 0) + lq->flags |= LQ_FLAG_SET_STA_TLC_RTS_MSK; + mvmsta->tx_protection++; + } else { + mvmsta->tx_protection--; + if (mvmsta->tx_protection == 0) + lq->flags &= ~LQ_FLAG_SET_STA_TLC_RTS_MSK; + } + + return iwl_mvm_send_lq_cmd(mvm, lq, CMD_ASYNC, false); +} diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h index 219c6857cc0f..cff4f6da7733 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/iwlwifi/mvm/rs.h @@ -358,6 +358,18 @@ struct iwl_lq_sta { u8 last_bt_traffic; }; +enum iwl_bt_coex_profile_traffic_load { + IWL_BT_COEX_TRAFFIC_LOAD_NONE = 0, + IWL_BT_COEX_TRAFFIC_LOAD_LOW = 1, + IWL_BT_COEX_TRAFFIC_LOAD_HIGH = 2, + IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS = 3, +/* + * There are no more even though below is a u8, the + * indication from the BT device only has two bits. + */ +}; + + static inline u8 num_of_ant(u8 mask) { return !!((mask) & ANT_A) + @@ -390,4 +402,9 @@ extern int iwl_mvm_rate_control_register(void); */ extern void iwl_mvm_rate_control_unregister(void); +struct iwl_mvm_sta; + +int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, + struct iwl_mvm_sta *mvmsta, bool enable); + #endif /* __rs__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c index 4dfc21a3e83e..e4930d5027d2 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/iwlwifi/mvm/rx.c @@ -363,3 +363,25 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, rxb, &rx_status); return 0; } + +/* + * iwl_mvm_rx_statistics - STATISTICS_NOTIFICATION handler + * + * TODO: This handler is implemented partially. + * It only gets the NIC's temperature. + */ +int iwl_mvm_rx_statistics(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_notif_statistics *stats = (void *)&pkt->data; + struct mvm_statistics_general_common *common = &stats->general.common; + + if (mvm->temperature != le32_to_cpu(common->temperature)) { + mvm->temperature = le32_to_cpu(common->temperature); + iwl_mvm_tt_handler(mvm); + } + + return 0; +} diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 2476e43799d5..2157b0f8ced5 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -298,12 +298,6 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm, else cmd->type = cpu_to_le32(SCAN_TYPE_FORCED); - /* - * TODO: This is a WA due to a bug in the FW AUX framework that does not - * properly handle time events that fail to be scheduled - */ - cmd->type = cpu_to_le32(SCAN_TYPE_FORCED); - cmd->repeats = cpu_to_le32(1); /* diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c index 5c664ed54400..62fe5209093b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c @@ -64,6 +64,7 @@ #include "mvm.h" #include "sta.h" +#include "rs.h" static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm) { @@ -217,6 +218,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, mvmvif->color); mvm_sta->vif = vif; mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; + mvm_sta->tx_protection = 0; + mvm_sta->tt_tx_protection = false; /* HW restart, don't assume the memory has been zeroed */ atomic_set(&mvm->pending_frames[sta_id], 0); @@ -226,9 +229,6 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]); - if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE) - mvm_sta->tfd_queue_msk |= BIT(vif->cab_queue); - /* for HW restart - need to reset the seq_number etc... */ memset(mvm_sta->tid_data, 0, sizeof(mvm_sta->tid_data)); @@ -798,21 +798,23 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, min(mvmsta->max_agg_bufsize, buf_size); mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize; + IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", + sta->addr, tid); + if (mvm->cfg->ht_params->use_rts_for_aggregation) { /* * switch to RTS/CTS if it is the prefer protection * method for HT traffic + * this function also sends the LQ command */ - mvmsta->lq_sta.lq.flags |= LQ_FLAG_SET_STA_TLC_RTS_MSK; + return iwl_mvm_tx_protection(mvm, &mvmsta->lq_sta.lq, + mvmsta, true); /* * TODO: remove the TLC_RTS flag when we tear down the last * AGG session (agg_tids_count in DVM) */ } - IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", - sta->addr, tid); - return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, CMD_ASYNC, false); } @@ -1287,17 +1289,11 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, struct iwl_mvm_add_sta_cmd cmd = { .add_modify = STA_MODE_MODIFY, .sta_id = mvmsta->sta_id, - .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT, - .sleep_state_flags = cpu_to_le16(STA_SLEEP_STATE_AWAKE), + .station_flags_msk = cpu_to_le32(STA_FLG_PS), .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), }; int ret; - /* - * Same modify mask for sleep_tx_count and sleep_state_flags but this - * should be fine since if we set the STA as "awake", then - * sleep_tx_count is not relevant. - */ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h index a4ddce77aaae..94b265eb32b8 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/iwlwifi/mvm/sta.h @@ -250,7 +250,6 @@ enum iwl_mvm_agg_state { * the first packet to be sent in legacy HW queue in Tx AGG stop flow. * Basically when next_reclaimed reaches ssn, we can tell mac80211 that * we are ready to finish the Tx AGG stop / start flow. - * @wait_for_ba: Expect block-ack before next Tx reply */ struct iwl_mvm_tid_data { u16 seq_number; @@ -260,7 +259,6 @@ struct iwl_mvm_tid_data { enum iwl_mvm_agg_state state; u16 txq_id; u16 ssn; - bool wait_for_ba; }; /** @@ -275,6 +273,8 @@ struct iwl_mvm_tid_data { * @lock: lock to protect the whole struct. Since %tid_data is access from Tx * and from Tx response flow, it needs a spinlock. * @tid_data: per tid data. Look at %iwl_mvm_tid_data. + * @tx_protection: reference counter for controlling the Tx protection. + * @tt_tx_protection: is thermal throttling enable Tx protection? * * When mac80211 creates a station it reserves some space (hw->sta_data_size) * in the structure for use by driver. This structure is placed in that @@ -296,6 +296,10 @@ struct iwl_mvm_sta { #ifdef CONFIG_PM_SLEEP u16 last_seq_ctl; #endif + + /* Temporary, until the new TLC will control the Tx protection */ + s8 tx_protection; + bool tt_tx_protection; }; /** diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c new file mode 100644 index 000000000000..a7e3b8ddf22b --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/tt.c @@ -0,0 +1,512 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include "mvm.h" +#include "iwl-config.h" +#include "iwl-io.h" +#include "iwl-csr.h" +#include "iwl-prph.h" + +#define OTP_DTS_DIODE_DEVIATION 96 /*in words*/ +/* VBG - Voltage Band Gap error data (temperature offset) */ +#define OTP_WP_DTS_VBG (OTP_DTS_DIODE_DEVIATION + 2) +#define MEAS_VBG_MIN_VAL 2300 +#define MEAS_VBG_MAX_VAL 3000 +#define MEAS_VBG_DEFAULT_VAL 2700 +#define DTS_DIODE_VALID(flags) (flags & DTS_DIODE_REG_FLAGS_PASS_ONCE) +#define MIN_TEMPERATURE 0 +#define MAX_TEMPERATURE 125 +#define TEMPERATURE_ERROR (MAX_TEMPERATURE + 1) +#define PTAT_DIGITAL_VALUE_MIN_VALUE 0 +#define PTAT_DIGITAL_VALUE_MAX_VALUE 0xFF +#define DTS_VREFS_NUM 5 +static inline u32 DTS_DIODE_GET_VREFS_ID(u32 flags) +{ + return (flags & DTS_DIODE_REG_FLAGS_VREFS_ID) >> + DTS_DIODE_REG_FLAGS_VREFS_ID_POS; +} + +#define CALC_VREFS_MIN_DIFF 43 +#define CALC_VREFS_MAX_DIFF 51 +#define CALC_LUT_SIZE (1 + CALC_VREFS_MAX_DIFF - CALC_VREFS_MIN_DIFF) +#define CALC_LUT_INDEX_OFFSET CALC_VREFS_MIN_DIFF +#define CALC_TEMPERATURE_RESULT_SHIFT_OFFSET 23 + +/* + * @digital_value: The diode's digital-value sampled (temperature/voltage) + * @vref_low: The lower voltage-reference (the vref just below the diode's + * sampled digital-value) + * @vref_high: The higher voltage-reference (the vref just above the diode's + * sampled digital-value) + * @flags: bits[1:0]: The ID of the Vrefs pair (lowVref,highVref) + * bits[6:2]: Reserved. + * bits[7:7]: Indicates completion of at least 1 successful sample + * since last DTS reset. + */ +struct iwl_mvm_dts_diode_bits { + u8 digital_value; + u8 vref_low; + u8 vref_high; + u8 flags; +} __packed; + +union dts_diode_results { + u32 reg_value; + struct iwl_mvm_dts_diode_bits bits; +} __packed; + +static s16 iwl_mvm_dts_get_volt_band_gap(struct iwl_mvm *mvm) +{ + struct iwl_nvm_section calib_sec; + const __le16 *calib; + u16 vbg; + + /* TODO: move parsing to NVM code */ + calib_sec = mvm->nvm_sections[NVM_SECTION_TYPE_CALIBRATION]; + calib = (__le16 *)calib_sec.data; + + vbg = le16_to_cpu(calib[OTP_WP_DTS_VBG]); + + if (vbg < MEAS_VBG_MIN_VAL || vbg > MEAS_VBG_MAX_VAL) + vbg = MEAS_VBG_DEFAULT_VAL; + + return vbg; +} + +static u16 iwl_mvm_dts_get_ptat_deviation_offset(struct iwl_mvm *mvm) +{ + const u8 *calib; + u8 ptat, pa1, pa2, median; + + /* TODO: move parsing to NVM code */ + calib = mvm->nvm_sections[NVM_SECTION_TYPE_CALIBRATION].data; + ptat = calib[OTP_DTS_DIODE_DEVIATION]; + pa1 = calib[OTP_DTS_DIODE_DEVIATION + 1]; + pa2 = calib[OTP_DTS_DIODE_DEVIATION + 2]; + + /* get the median: */ + if (ptat > pa1) { + if (ptat > pa2) + median = (pa1 > pa2) ? pa1 : pa2; + else + median = ptat; + } else { + if (pa1 > pa2) + median = (ptat > pa2) ? ptat : pa2; + else + median = pa1; + } + + return ptat - median; +} + +static u8 iwl_mvm_dts_calibrate_ptat_deviation(struct iwl_mvm *mvm, u8 value) +{ + /* Calibrate the PTAT digital value, based on PTAT deviation data: */ + s16 new_val = value - iwl_mvm_dts_get_ptat_deviation_offset(mvm); + + if (new_val > PTAT_DIGITAL_VALUE_MAX_VALUE) + new_val = PTAT_DIGITAL_VALUE_MAX_VALUE; + else if (new_val < PTAT_DIGITAL_VALUE_MIN_VALUE) + new_val = PTAT_DIGITAL_VALUE_MIN_VALUE; + + return new_val; +} + +static bool dts_get_adjacent_vrefs(struct iwl_mvm *mvm, + union dts_diode_results *avg_ptat) +{ + u8 vrefs_results[DTS_VREFS_NUM]; + u8 low_vref_index = 0, flags; + u32 reg; + + reg = iwl_read_prph(mvm->trans, DTSC_VREF_AVG); + memcpy(vrefs_results, ®, sizeof(reg)); + reg = iwl_read_prph(mvm->trans, DTSC_VREF5_AVG); + vrefs_results[4] = reg & 0xff; + + if (avg_ptat->bits.digital_value < vrefs_results[0] || + avg_ptat->bits.digital_value > vrefs_results[4]) + return false; + + if (avg_ptat->bits.digital_value > vrefs_results[3]) + low_vref_index = 3; + else if (avg_ptat->bits.digital_value > vrefs_results[2]) + low_vref_index = 2; + else if (avg_ptat->bits.digital_value > vrefs_results[1]) + low_vref_index = 1; + + avg_ptat->bits.vref_low = vrefs_results[low_vref_index]; + avg_ptat->bits.vref_high = vrefs_results[low_vref_index + 1]; + flags = avg_ptat->bits.flags; + avg_ptat->bits.flags = + (flags & ~DTS_DIODE_REG_FLAGS_VREFS_ID) | + (low_vref_index & DTS_DIODE_REG_FLAGS_VREFS_ID); + return true; +} + +/* + * return true it the results are valid, and false otherwise. + */ +static bool dts_read_ptat_avg_results(struct iwl_mvm *mvm, + union dts_diode_results *avg_ptat) +{ + u32 reg; + u8 tmp; + + /* fill the diode value and pass_once with avg-reg results */ + reg = iwl_read_prph(mvm->trans, DTSC_PTAT_AVG); + reg &= DTS_DIODE_REG_DIG_VAL | DTS_DIODE_REG_PASS_ONCE; + avg_ptat->reg_value = reg; + + /* calibrate the PTAT digital value */ + tmp = avg_ptat->bits.digital_value; + tmp = iwl_mvm_dts_calibrate_ptat_deviation(mvm, tmp); + avg_ptat->bits.digital_value = tmp; + + /* + * fill vrefs fields, based on the avgVrefs results + * and the diode value + */ + return dts_get_adjacent_vrefs(mvm, avg_ptat) && + DTS_DIODE_VALID(avg_ptat->bits.flags); +} + +static s32 calculate_nic_temperature(union dts_diode_results avg_ptat, + u16 volt_band_gap) +{ + u32 tmp_result; + u8 vrefs_diff; + /* + * For temperature calculation (at the end, shift right by 23) + * LUT[(D2-D1)] = ROUND{ 2^23 / ((D2-D1)*9*10) } + * (D2-D1) == 43 44 45 46 47 48 49 50 51 + */ + static const u16 calc_lut[CALC_LUT_SIZE] = { + 2168, 2118, 2071, 2026, 1983, 1942, 1902, 1864, 1828, + }; + + /* + * The diff between the high and low voltage-references is assumed + * to be strictly be in range of [60,68] + */ + vrefs_diff = avg_ptat.bits.vref_high - avg_ptat.bits.vref_low; + + if (vrefs_diff < CALC_VREFS_MIN_DIFF || + vrefs_diff > CALC_VREFS_MAX_DIFF) + return TEMPERATURE_ERROR; + + /* calculate the result: */ + tmp_result = + vrefs_diff * (DTS_DIODE_GET_VREFS_ID(avg_ptat.bits.flags) + 9); + tmp_result += avg_ptat.bits.digital_value; + tmp_result -= avg_ptat.bits.vref_high; + + /* multiply by the LUT value (based on the diff) */ + tmp_result *= calc_lut[vrefs_diff - CALC_LUT_INDEX_OFFSET]; + + /* + * Get the BandGap (the voltage refereces source) error data + * (temperature offset) + */ + tmp_result *= volt_band_gap; + + /* + * here, tmp_result value can be up to 32-bits. We want to right-shift + * it *without* sign-extend. + */ + tmp_result = tmp_result >> CALC_TEMPERATURE_RESULT_SHIFT_OFFSET; + + /* + * at this point, tmp_result should be in the range: + * 200 <= tmp_result <= 365 + */ + return (s16)tmp_result - 240; +} + +static s32 check_nic_temperature(struct iwl_mvm *mvm) +{ + u16 volt_band_gap; + union dts_diode_results avg_ptat; + + volt_band_gap = iwl_mvm_dts_get_volt_band_gap(mvm); + + /* disable DTS */ + iwl_write_prph(mvm->trans, SHR_MISC_WFM_DTS_EN, 0); + + /* SV initialization */ + iwl_write_prph(mvm->trans, SHR_MISC_WFM_DTS_EN, 1); + iwl_write_prph(mvm->trans, DTSC_CFG_MODE, + DTSC_CFG_MODE_PERIODIC); + + /* wait for results */ + msleep(100); + if (!dts_read_ptat_avg_results(mvm, &avg_ptat)) + return TEMPERATURE_ERROR; + + /* disable DTS */ + iwl_write_prph(mvm->trans, SHR_MISC_WFM_DTS_EN, 0); + + return calculate_nic_temperature(avg_ptat, volt_band_gap); +} + +static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm) +{ + u32 duration = mvm->thermal_throttle.params->ct_kill_duration; + + IWL_ERR(mvm, "Enter CT Kill\n"); + iwl_mvm_set_hw_ctkill_state(mvm, true); + schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit, + round_jiffies_relative(duration * HZ)); +} + +static void iwl_mvm_exit_ctkill(struct iwl_mvm *mvm) +{ + IWL_ERR(mvm, "Exit CT Kill\n"); + iwl_mvm_set_hw_ctkill_state(mvm, false); +} + +static void check_exit_ctkill(struct work_struct *work) +{ + struct iwl_mvm_tt_mgmt *tt; + struct iwl_mvm *mvm; + u32 duration; + s32 temp; + + tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work); + mvm = container_of(tt, struct iwl_mvm, thermal_throttle); + + duration = tt->params->ct_kill_duration; + + iwl_trans_start_hw(mvm->trans); + temp = check_nic_temperature(mvm); + iwl_trans_stop_hw(mvm->trans, false); + + if (temp < MIN_TEMPERATURE || temp > MAX_TEMPERATURE) { + IWL_DEBUG_TEMP(mvm, "Failed to measure NIC temperature\n"); + goto reschedule; + } + IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp); + + if (temp <= tt->params->ct_kill_exit) { + iwl_mvm_exit_ctkill(mvm); + return; + } + +reschedule: + schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit, + round_jiffies(duration * HZ)); +} + +static void iwl_mvm_tt_smps_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = _data; + enum ieee80211_smps_mode smps_mode; + + lockdep_assert_held(&mvm->mutex); + + if (mvm->thermal_throttle.dynamic_smps) + smps_mode = IEEE80211_SMPS_DYNAMIC; + else + smps_mode = IEEE80211_SMPS_AUTOMATIC; + + if (vif->type != NL80211_IFTYPE_STATION) + return; + + iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, smps_mode); +} + +static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable) +{ + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + int i, err; + + for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], + lockdep_is_held(&mvm->mutex)); + if (IS_ERR_OR_NULL(sta)) + continue; + mvmsta = (void *)sta->drv_priv; + if (enable == mvmsta->tt_tx_protection) + continue; + err = iwl_mvm_tx_protection(mvm, &mvmsta->lq_sta.lq, + mvmsta, enable); + if (err) { + IWL_ERR(mvm, "Failed to %s Tx protection\n", + enable ? "enable" : "disable"); + } else { + IWL_DEBUG_TEMP(mvm, "%s Tx protection\n", + enable ? "Enable" : "Disable"); + mvmsta->tt_tx_protection = enable; + } + } +} + +static void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff) +{ + struct iwl_host_cmd cmd = { + .id = REPLY_THERMAL_MNG_BACKOFF, + .len = { sizeof(u32), }, + .data = { &backoff, }, + .flags = CMD_SYNC, + }; + + if (iwl_mvm_send_cmd(mvm, &cmd) == 0) { + IWL_DEBUG_TEMP(mvm, "Set Thermal Tx backoff to: %u\n", + backoff); + mvm->thermal_throttle.tx_backoff = backoff; + } else { + IWL_ERR(mvm, "Failed to change Thermal Tx backoff\n"); + } +} + +void iwl_mvm_tt_handler(struct iwl_mvm *mvm) +{ + const struct iwl_tt_params *params = mvm->thermal_throttle.params; + struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle; + s32 temperature = mvm->temperature; + int i; + u32 tx_backoff; + + IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", mvm->temperature); + + if (params->support_ct_kill && temperature >= params->ct_kill_entry) { + iwl_mvm_enter_ctkill(mvm); + return; + } + + if (params->support_dynamic_smps) { + if (!tt->dynamic_smps && + temperature >= params->dynamic_smps_entry) { + IWL_DEBUG_TEMP(mvm, "Enable dynamic SMPS\n"); + tt->dynamic_smps = true; + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_tt_smps_iterator, mvm); + } else if (tt->dynamic_smps && + temperature <= params->dynamic_smps_exit) { + IWL_DEBUG_TEMP(mvm, "Disable dynamic SMPS\n"); + tt->dynamic_smps = false; + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_tt_smps_iterator, mvm); + } + } + + if (params->support_tx_protection) { + if (temperature >= params->tx_protection_entry) + iwl_mvm_tt_tx_protection(mvm, true); + else if (temperature <= params->tx_protection_exit) + iwl_mvm_tt_tx_protection(mvm, false); + } + + if (params->support_tx_backoff) { + tx_backoff = 0; + for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) { + if (temperature < params->tx_backoff[i].temperature) + break; + tx_backoff = params->tx_backoff[i].backoff; + } + if (tt->tx_backoff != tx_backoff) + iwl_mvm_tt_tx_backoff(mvm, tx_backoff); + } +} + +static const struct iwl_tt_params iwl7000_tt_params = { + .ct_kill_entry = 118, + .ct_kill_exit = 96, + .ct_kill_duration = 5, + .dynamic_smps_entry = 114, + .dynamic_smps_exit = 110, + .tx_protection_entry = 114, + .tx_protection_exit = 108, + .tx_backoff = { + {.temperature = 112, .backoff = 200}, + {.temperature = 113, .backoff = 600}, + {.temperature = 114, .backoff = 1200}, + {.temperature = 115, .backoff = 2000}, + {.temperature = 116, .backoff = 4000}, + {.temperature = 117, .backoff = 10000}, + }, + .support_ct_kill = true, + .support_dynamic_smps = true, + .support_tx_protection = true, + .support_tx_backoff = true, +}; + +void iwl_mvm_tt_initialize(struct iwl_mvm *mvm) +{ + struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle; + + IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n"); + tt->params = &iwl7000_tt_params; + INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill); +} + +void iwl_mvm_tt_exit(struct iwl_mvm *mvm) +{ + cancel_delayed_work_sync(&mvm->thermal_throttle.ct_kill_exit); + IWL_DEBUG_TEMP(mvm, "Exit Thermal Throttling\n"); +} diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 48c1891e3df6..f0e96a927407 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -175,7 +175,7 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, * table is controlled by LINK_QUALITY commands */ - if (ieee80211_is_data(fc)) { + if (ieee80211_is_data(fc) && sta) { tx_cmd->initial_rate_index = 0; tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); return; @@ -408,7 +408,6 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, tid, txq_id, seq_number); - /* NOTE: aggregation will need changes here (for txq id) */ if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) goto drop_unlock_sta; @@ -610,8 +609,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, !(info->flags & IEEE80211_TX_STAT_ACK)) info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; - /* W/A FW bug: seq_ctl is wrong when the queue is flushed */ - if (status == TX_STATUS_FAIL_FIFO_FLUSHED) { + /* W/A FW bug: seq_ctl is wrong when the status isn't success */ + if (status != TX_STATUS_SUCCESS) { struct ieee80211_hdr *hdr = (void *)skb->data; seq_ctl = le16_to_cpu(hdr->seq_ctrl); } diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c index 687b34e387ac..1e1332839e4a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/iwlwifi/mvm/utils.c @@ -76,6 +76,11 @@ int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd) { int ret; +#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP) + if (WARN_ON(mvm->d3_test_active)) + return -EIO; +#endif + /* * Synchronous commands from this op-mode must hold * the mutex, this ensures we don't try to send two @@ -125,6 +130,11 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd, lockdep_assert_held(&mvm->mutex); +#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP) + if (WARN_ON(mvm->d3_test_active)) + return -EIO; +#endif + /* * Only synchronous commands can wait for status, * we use WANT_SKB so the caller can't. @@ -471,3 +481,34 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, return iwl_mvm_send_cmd(mvm, &cmd); } + +/** + * iwl_mvm_update_smps - Get a requst to change the SMPS mode + * @req_type: The part of the driver who call for a change. + * @smps_requests: The request to change the SMPS mode. + * + * Get a requst to change the SMPS mode, + * and change it according to all other requests in the driver. + */ +void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + enum iwl_mvm_smps_type_request req_type, + enum ieee80211_smps_mode smps_request) +{ + struct iwl_mvm_vif *mvmvif; + enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC; + int i; + + lockdep_assert_held(&mvm->mutex); + mvmvif = iwl_mvm_vif_from_mac80211(vif); + mvmvif->smps_requests[req_type] = smps_request; + for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) { + if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC) { + smps_mode = IEEE80211_SMPS_STATIC; + break; + } + if (mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) + smps_mode = IEEE80211_SMPS_DYNAMIC; + } + + ieee80211_request_smps(vif, smps_mode); +} diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 8cb53ec2b77b..81f3ea5b09a4 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -78,6 +78,7 @@ /* Hardware specific file defines the PCI IDs table for that hardware module */ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { +#if IS_ENABLED(CONFIG_IWLDVM) {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */ @@ -253,13 +254,60 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)}, {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)}, {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)}, +#endif /* CONFIG_IWLDVM */ +#if IS_ENABLED(CONFIG_IWLMVM) /* 7000 Series */ {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)}, + +/* 3160 Series */ + {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, +#endif /* CONFIG_IWLMVM */ {0} }; diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index 148843e7f34f..b654dcdd048a 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -217,6 +217,7 @@ struct iwl_pcie_txq_scratch_buf { * @trans_pcie: pointer back to transport (for timer) * @need_update: indicates need to update read/write index * @active: stores if queue is active + * @ampdu: true if this queue is an ampdu queue for an specific RA/TID * * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame * descriptors) and required locking structures. @@ -232,6 +233,7 @@ struct iwl_txq { struct iwl_trans_pcie *trans_pcie; u8 need_update; u8 active; + bool ampdu; }; static inline dma_addr_t diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index 567e67ad1f61..3688dc5ba1ac 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -802,9 +802,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) u32 handled = 0; unsigned long flags; u32 i; -#ifdef CONFIG_IWLWIFI_DEBUG - u32 inta_mask; -#endif lock_map_acquire(&trans->sync_cmd_lockdep_map); @@ -826,14 +823,9 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) inta = trans_pcie->inta; -#ifdef CONFIG_IWLWIFI_DEBUG - if (iwl_have_debug_level(IWL_DL_ISR)) { - /* just for debug */ - inta_mask = iwl_read32(trans, CSR_INT_MASK); + if (iwl_have_debug_level(IWL_DL_ISR)) IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", - inta, inta_mask); - } -#endif + inta, iwl_read32(trans, CSR_INT_MASK)); /* saved interrupt in inta variable now we can reset trans_pcie->inta */ trans_pcie->inta = 0; @@ -855,12 +847,11 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) goto out; } -#ifdef CONFIG_IWLWIFI_DEBUG if (iwl_have_debug_level(IWL_DL_ISR)) { /* NIC fires this, but we don't use it, redundant with WAKEUP */ if (inta & CSR_INT_BIT_SCD) { - IWL_DEBUG_ISR(trans, "Scheduler finished to transmit " - "the frame/frames.\n"); + IWL_DEBUG_ISR(trans, + "Scheduler finished to transmit the frame/frames.\n"); isr_stats->sch++; } @@ -870,7 +861,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) isr_stats->alive++; } } -#endif + /* Safely ignore these bits for debug checks below */ inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); @@ -1118,9 +1109,6 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data) struct iwl_trans *trans = data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 inta, inta_mask; -#ifdef CONFIG_IWLWIFI_DEBUG - u32 inta_fh; -#endif lockdep_assert_held(&trans_pcie->irq_lock); @@ -1159,13 +1147,11 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data) return IRQ_HANDLED; } -#ifdef CONFIG_IWLWIFI_DEBUG - if (iwl_have_debug_level(IWL_DL_ISR)) { - inta_fh = iwl_read32(trans, CSR_FH_INT_STATUS); - IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, " - "fh 0x%08x\n", inta, inta_mask, inta_fh); - } -#endif + if (iwl_have_debug_level(IWL_DL_ISR)) + IWL_DEBUG_ISR(trans, + "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", + inta, inta_mask, + iwl_read32(trans, CSR_FH_INT_STATUS)); trans_pcie->inta |= inta; /* the thread will service interrupts and re-enable them */ @@ -1198,7 +1184,7 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data) { struct iwl_trans *trans = data; struct iwl_trans_pcie *trans_pcie; - u32 inta, inta_mask; + u32 inta; u32 val = 0; u32 read; unsigned long flags; @@ -1226,7 +1212,6 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data) * If we have something to service, the tasklet will re-enable ints. * If we *don't* have something, we'll re-enable before leaving here. */ - inta_mask = iwl_read32(trans, CSR_INT_MASK); iwl_write32(trans, CSR_INT_MASK, 0x00000000); /* Ignore interrupt if there's nothing in NIC to service. @@ -1271,8 +1256,11 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data) val |= 0x8000; inta = (0xff & val) | ((0xff00 & val) << 16); - IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n", - inta, inta_mask, val); + IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled(sw) 0x%08x ict 0x%08x\n", + inta, trans_pcie->inta_mask, val); + if (iwl_have_debug_level(IWL_DL_ISR)) + IWL_DEBUG_ISR(trans, "enabled(hw) 0x%08x\n", + iwl_read32(trans, CSR_INT_MASK)); inta &= trans_pcie->inta_mask; trans_pcie->inta |= inta; diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 50ba0a468f94..197dbe0a868c 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -405,20 +405,27 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, { u8 *v_addr; dma_addr_t p_addr; - u32 offset; + u32 offset, chunk_sz = section->len; int ret = 0; IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", section_num); - v_addr = dma_alloc_coherent(trans->dev, PAGE_SIZE, &p_addr, GFP_KERNEL); - if (!v_addr) - return -ENOMEM; + v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr, + GFP_KERNEL | __GFP_NOWARN); + if (!v_addr) { + IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n"); + chunk_sz = PAGE_SIZE; + v_addr = dma_alloc_coherent(trans->dev, chunk_sz, + &p_addr, GFP_KERNEL); + if (!v_addr) + return -ENOMEM; + } - for (offset = 0; offset < section->len; offset += PAGE_SIZE) { + for (offset = 0; offset < section->len; offset += chunk_sz) { u32 copy_size; - copy_size = min_t(u32, PAGE_SIZE, section->len - offset); + copy_size = min_t(u32, chunk_sz, section->len - offset); memcpy(v_addr, (u8 *)section->data + offset, copy_size); ret = iwl_pcie_load_firmware_chunk(trans, @@ -432,7 +439,7 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, } } - dma_free_coherent(trans->dev, PAGE_SIZE, v_addr, p_addr); + dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr); return ret; } @@ -571,13 +578,17 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) clear_bit(STATUS_RFKILL, &trans_pcie->status); } -static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans) +static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) { - /* let the ucode operate on its own */ - iwl_write32(trans, CSR_UCODE_DRV_GP1_SET, - CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); - iwl_disable_interrupts(trans); + + /* + * in testing mode, the host stays awake and the + * hardware won't be reset (not even partially) + */ + if (test) + return; + iwl_pcie_disable_ict(trans); iwl_clear_bit(trans, CSR_GP_CNTRL, @@ -596,11 +607,18 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans) } static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, - enum iwl_d3_status *status) + enum iwl_d3_status *status, + bool test) { u32 val; int ret; + if (test) { + iwl_enable_interrupts(trans); + *status = IWL_D3_STATUS_ALIVE; + return 0; + } + iwl_pcie_set_pwr(trans, false); val = iwl_read32(trans, CSR_RESET); @@ -636,9 +654,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, return ret; } - iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, - CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); - *status = IWL_D3_STATUS_ALIVE; return 0; } @@ -917,11 +932,11 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, } static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, - void *buf, int dwords) + const void *buf, int dwords) { unsigned long flags; int offs, ret = 0; - u32 *vals = buf; + const u32 *vals = buf; if (iwl_trans_grab_nic_access(trans, false, &flags)) { iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index c5e30294c5ac..b8f3cb78382b 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -224,13 +224,13 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, switch (sec_ctl & TX_CMD_SEC_MSK) { case TX_CMD_SEC_CCM: - len += CCMP_MIC_LEN; + len += IEEE80211_CCMP_MIC_LEN; break; case TX_CMD_SEC_TKIP: - len += TKIP_ICV_LEN; + len += IEEE80211_TKIP_ICV_LEN; break; case TX_CMD_SEC_WEP: - len += WEP_IV_LEN + WEP_ICV_LEN; + len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; break; } @@ -1045,6 +1045,10 @@ static inline void iwl_pcie_txq_set_inactive(struct iwl_trans *trans, (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); } +/* Receiver address (actually, Rx station's index into station table), + * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ +#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) + void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, int sta_id, int tid, int frame_limit, u16 ssn) { @@ -1069,6 +1073,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, /* enable aggregations for the queue */ iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); + trans_pcie->txq[txq_id].ampdu = true; } else { /* * disable aggregations for the queue, this will also make the @@ -1125,6 +1130,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) ARRAY_SIZE(zero_val)); iwl_pcie_txq_unmap(trans, txq_id); + trans_pcie->txq[txq_id].ampdu = false; IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); } @@ -1518,11 +1524,13 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) { IWL_ERR(trans, "FW error in SYNC CMD %s\n", get_cmd_string(trans_pcie, cmd->id)); + dump_stack(); ret = -EIO; goto cancel; } - if (test_bit(STATUS_RFKILL, &trans_pcie->status)) { + if (!(cmd->flags & CMD_SEND_IN_RFKILL) && + test_bit(STATUS_RFKILL, &trans_pcie->status)) { IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); ret = -ERFKILL; goto cancel; @@ -1564,7 +1572,8 @@ int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) return -EIO; - if (test_bit(STATUS_RFKILL, &trans_pcie->status)) { + if (!(cmd->flags & CMD_SEND_IN_RFKILL) && + test_bit(STATUS_RFKILL, &trans_pcie->status)) { IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", cmd->id); return -ERFKILL; @@ -1592,7 +1601,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, u8 wait_write_ptr = 0; __le16 fc = hdr->frame_control; u8 hdr_len = ieee80211_hdrlen(fc); - u16 __maybe_unused wifi_seq; + u16 wifi_seq; txq = &trans_pcie->txq[txq_id]; q = &txq->q; @@ -1609,13 +1618,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, * the BA. * Check here that the packets are in the right place on the ring. */ -#ifdef CONFIG_IWLWIFI_DEBUG wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); - WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) && - ((wifi_seq & 0xff) != q->write_ptr), + WARN_ONCE(trans_pcie->txq[txq_id].ampdu && + (wifi_seq & 0xff) != q->write_ptr, "Q: %d WiFi Seq %d tfdNum %d", txq_id, wifi_seq, q->write_ptr); -#endif /* Set up driver data for this TFD */ txq->entries[q->write_ptr].skb = skb; diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig index 4f614aad9ded..f7ff4725506a 100644 --- a/drivers/net/wireless/mwifiex/Kconfig +++ b/drivers/net/wireless/mwifiex/Kconfig @@ -3,13 +3,13 @@ config MWIFIEX depends on CFG80211 ---help--- This adds support for wireless adapters based on Marvell - 802.11n chipsets. + 802.11n/ac chipsets. If you choose to build it as a module, it will be called mwifiex. config MWIFIEX_SDIO - tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797" + tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8897" depends on MWIFIEX && MMC select FW_LOADER ---help--- diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index e42b266a023a..ef5fa890a286 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -20,6 +20,9 @@ #include "cfg80211.h" #include "main.h" +static char *reg_alpha2; +module_param(reg_alpha2, charp, 0); + static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = { { .max = 2, .types = BIT(NL80211_IFTYPE_STATION), @@ -1231,6 +1234,51 @@ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy, return 0; } +/* cfg80211 operation handler for del_station. + * Function deauthenticates station which value is provided in mac parameter. + * If mac is NULL/broadcast, all stations in associated station list are + * deauthenticated. If bss is not started or there are no stations in + * associated stations list, no action is taken. + */ +static int +mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev, + u8 *mac) +{ + struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + struct mwifiex_sta_node *sta_node; + unsigned long flags; + + if (list_empty(&priv->sta_list) || !priv->bss_started) + return 0; + + if (!mac || is_broadcast_ether_addr(mac)) { + wiphy_dbg(wiphy, "%s: NULL/broadcast mac address\n", __func__); + list_for_each_entry(sta_node, &priv->sta_list, list) { + if (mwifiex_send_cmd_sync(priv, + HostCmd_CMD_UAP_STA_DEAUTH, + HostCmd_ACT_GEN_SET, 0, + sta_node->mac_addr)) + return -1; + mwifiex_uap_del_sta_data(priv, sta_node); + } + } else { + wiphy_dbg(wiphy, "%s: mac address %pM\n", __func__, mac); + spin_lock_irqsave(&priv->sta_list_spinlock, flags); + sta_node = mwifiex_get_sta_entry(priv, mac); + spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); + if (sta_node) { + if (mwifiex_send_cmd_sync(priv, + HostCmd_CMD_UAP_STA_DEAUTH, + HostCmd_ACT_GEN_SET, 0, + sta_node->mac_addr)) + return -1; + mwifiex_uap_del_sta_data(priv, sta_node); + } + } + + return 0; +} + static int mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant) { @@ -1859,6 +1907,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, int i, offset, ret; struct ieee80211_channel *chan; struct ieee_types_header *ie; + struct mwifiex_user_scan_cfg *user_scan_cfg; wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name); @@ -1869,20 +1918,22 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, return -EBUSY; } - if (priv->user_scan_cfg) { + /* Block scan request if scan operation or scan cleanup when interface + * is disabled is in process + */ + if (priv->scan_request || priv->scan_aborting) { dev_err(priv->adapter->dev, "cmd: Scan already in process..\n"); return -EBUSY; } - priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg), - GFP_KERNEL); - if (!priv->user_scan_cfg) + user_scan_cfg = kzalloc(sizeof(*user_scan_cfg), GFP_KERNEL); + if (!user_scan_cfg) return -ENOMEM; priv->scan_request = request; - priv->user_scan_cfg->num_ssids = request->n_ssids; - priv->user_scan_cfg->ssid_list = request->ssids; + user_scan_cfg->num_ssids = request->n_ssids; + user_scan_cfg->ssid_list = request->ssids; if (request->ie && request->ie_len) { offset = 0; @@ -1902,25 +1953,25 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, for (i = 0; i < min_t(u32, request->n_channels, MWIFIEX_USER_SCAN_CHAN_MAX); i++) { chan = request->channels[i]; - priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value; - priv->user_scan_cfg->chan_list[i].radio_type = chan->band; + user_scan_cfg->chan_list[i].chan_number = chan->hw_value; + user_scan_cfg->chan_list[i].radio_type = chan->band; if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) - priv->user_scan_cfg->chan_list[i].scan_type = + user_scan_cfg->chan_list[i].scan_type = MWIFIEX_SCAN_TYPE_PASSIVE; else - priv->user_scan_cfg->chan_list[i].scan_type = + user_scan_cfg->chan_list[i].scan_type = MWIFIEX_SCAN_TYPE_ACTIVE; - priv->user_scan_cfg->chan_list[i].scan_time = 0; + user_scan_cfg->chan_list[i].scan_time = 0; } - ret = mwifiex_scan_networks(priv, priv->user_scan_cfg); + ret = mwifiex_scan_networks(priv, user_scan_cfg); + kfree(user_scan_cfg); if (ret) { dev_err(priv->adapter->dev, "scan failed: %d\n", ret); + priv->scan_aborting = false; priv->scan_request = NULL; - kfree(priv->user_scan_cfg); - priv->user_scan_cfg = NULL; return ret; } @@ -2419,6 +2470,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = { .change_beacon = mwifiex_cfg80211_change_beacon, .set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config, .set_antenna = mwifiex_cfg80211_set_antenna, + .del_station = mwifiex_cfg80211_del_station, #ifdef CONFIG_PM .suspend = mwifiex_cfg80211_suspend, .resume = mwifiex_cfg80211_resume, @@ -2426,6 +2478,27 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = { #endif }; +#ifdef CONFIG_PM +static const struct wiphy_wowlan_support mwifiex_wowlan_support = { + .flags = WIPHY_WOWLAN_MAGIC_PKT, + .n_patterns = MWIFIEX_MAX_FILTERS, + .pattern_min_len = 1, + .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN, + .max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN, +}; +#endif + +static bool mwifiex_is_valid_alpha2(const char *alpha2) +{ + if (!alpha2 || strlen(alpha2) != 2) + return false; + + if (isalpha(alpha2[0]) && isalpha(alpha2[1])) + return true; + + return false; +} + /* * This function registers the device with CFG802.11 subsystem. * @@ -2478,16 +2551,13 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD | WIPHY_FLAG_AP_UAPSD | WIPHY_FLAG_CUSTOM_REGULATORY | + WIPHY_FLAG_STRICT_REGULATORY | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; wiphy_apply_custom_regulatory(wiphy, &mwifiex_world_regdom_custom); #ifdef CONFIG_PM - wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT; - wiphy->wowlan.n_patterns = MWIFIEX_MAX_FILTERS; - wiphy->wowlan.pattern_min_len = 1; - wiphy->wowlan.pattern_max_len = MWIFIEX_MAX_PATTERN_LEN; - wiphy->wowlan.max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN; + wiphy->wowlan = &mwifiex_wowlan_support; #endif wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | @@ -2519,10 +2589,16 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) wiphy_free(wiphy); return ret; } - country_code = mwifiex_11d_code_2_region(priv->adapter->region_code); - if (country_code) - dev_info(adapter->dev, - "ignoring F/W country code %2.2s\n", country_code); + + if (reg_alpha2 && mwifiex_is_valid_alpha2(reg_alpha2)) { + wiphy_info(wiphy, "driver hint alpha2: %2.2s\n", reg_alpha2); + regulatory_hint(wiphy, reg_alpha2); + } else { + country_code = mwifiex_11d_code_2_region(adapter->region_code); + if (country_code) + wiphy_info(wiphy, "ignoring F/W country code %2.2s\n", + country_code); + } adapter->wiphy = wiphy; return ret; diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index 26755d9acb55..2d761477d15e 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c @@ -570,6 +570,7 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no, case HostCmd_CMD_UAP_SYS_CONFIG: case HostCmd_CMD_UAP_BSS_START: case HostCmd_CMD_UAP_BSS_STOP: + case HostCmd_CMD_UAP_STA_DEAUTH: ret = mwifiex_uap_prepare_cmd(priv, cmd_no, cmd_action, cmd_oid, data_buf, cmd_ptr); diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index 1f7578d553ec..d6ada7354c14 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h @@ -271,6 +271,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define HostCmd_CMD_802_11_SUBSCRIBE_EVENT 0x0075 #define HostCmd_CMD_802_11_TX_RATE_QUERY 0x007f #define HostCmd_CMD_802_11_IBSS_COALESCING_STATUS 0x0083 +#define HostCmd_CMD_CFG_DATA 0x008f #define HostCmd_CMD_VERSION_EXT 0x0097 #define HostCmd_CMD_MEF_CFG 0x009a #define HostCmd_CMD_RSSI_INFO 0x00a4 @@ -279,6 +280,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define HostCmd_CMD_UAP_SYS_CONFIG 0x00b0 #define HostCmd_CMD_UAP_BSS_START 0x00b1 #define HostCmd_CMD_UAP_BSS_STOP 0x00b2 +#define HostCmd_CMD_UAP_STA_DEAUTH 0x00b5 #define HostCmd_CMD_11N_CFG 0x00cd #define HostCmd_CMD_11N_ADDBA_REQ 0x00ce #define HostCmd_CMD_11N_ADDBA_RSP 0x00cf @@ -464,6 +466,8 @@ enum P2P_MODES { #define MWIFIEX_CRITERIA_UNICAST BIT(1) #define MWIFIEX_CRITERIA_MULTICAST BIT(3) +#define CFG_DATA_TYPE_CAL 2 + struct mwifiex_ie_types_header { __le16 type; __le16 len; @@ -1197,6 +1201,11 @@ struct host_cmd_ds_amsdu_aggr_ctrl { __le16 curr_buf_size; } __packed; +struct host_cmd_ds_sta_deauth { + u8 mac[ETH_ALEN]; + __le16 reason; +} __packed; + struct mwifiex_ie_types_wmm_param_set { struct mwifiex_ie_types_header header; u8 wmm_ie[1]; @@ -1573,6 +1582,12 @@ struct mwifiex_ie_list { struct mwifiex_ie ie_list[MAX_MGMT_IE_INDEX]; } __packed; +struct host_cmd_ds_802_11_cfg_data { + __le16 action; + __le16 type; + __le16 data_len; +} __packed; + struct host_cmd_ds_command { __le16 command; __le16 size; @@ -1630,7 +1645,9 @@ struct host_cmd_ds_command { struct host_cmd_ds_802_11_eeprom_access eeprom; struct host_cmd_ds_802_11_subsc_evt subsc_evt; struct host_cmd_ds_sys_config uap_sys_config; + struct host_cmd_ds_sta_deauth sta_deauth; struct host_cmd_11ac_vht_cfg vht_cfg; + struct host_cmd_ds_802_11_cfg_data cfg_data; } params; } __packed; diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index 9f44fda19db9..2fe31dcaa6ef 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c @@ -52,87 +52,6 @@ static int mwifiex_add_bss_prio_tbl(struct mwifiex_private *priv) return 0; } -static void scan_delay_timer_fn(unsigned long data) -{ - struct mwifiex_private *priv = (struct mwifiex_private *)data; - struct mwifiex_adapter *adapter = priv->adapter; - struct cmd_ctrl_node *cmd_node, *tmp_node; - unsigned long flags; - - if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) { - /* - * Abort scan operation by cancelling all pending scan - * commands - */ - spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); - list_for_each_entry_safe(cmd_node, tmp_node, - &adapter->scan_pending_q, list) { - list_del(&cmd_node->list); - mwifiex_insert_cmd_to_free_q(adapter, cmd_node); - } - spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); - - spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); - adapter->scan_processing = false; - adapter->scan_delay_cnt = 0; - adapter->empty_tx_q_cnt = 0; - spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); - - if (priv->user_scan_cfg) { - if (priv->scan_request) { - dev_dbg(priv->adapter->dev, - "info: aborting scan\n"); - cfg80211_scan_done(priv->scan_request, 1); - priv->scan_request = NULL; - } else { - dev_dbg(priv->adapter->dev, - "info: scan already aborted\n"); - } - - kfree(priv->user_scan_cfg); - priv->user_scan_cfg = NULL; - } - goto done; - } - - if (!atomic_read(&priv->adapter->is_tx_received)) { - adapter->empty_tx_q_cnt++; - if (adapter->empty_tx_q_cnt == MWIFIEX_MAX_EMPTY_TX_Q_CNT) { - /* - * No Tx traffic for 200msec. Get scan command from - * scan pending queue and put to cmd pending queue to - * resume scan operation - */ - adapter->scan_delay_cnt = 0; - adapter->empty_tx_q_cnt = 0; - spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); - cmd_node = list_first_entry(&adapter->scan_pending_q, - struct cmd_ctrl_node, list); - list_del(&cmd_node->list); - spin_unlock_irqrestore(&adapter->scan_pending_q_lock, - flags); - - mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, - true); - queue_work(adapter->workqueue, &adapter->main_work); - goto done; - } - } else { - adapter->empty_tx_q_cnt = 0; - } - - /* Delay scan operation further by 20msec */ - mod_timer(&priv->scan_delay_timer, jiffies + - msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC)); - adapter->scan_delay_cnt++; - -done: - if (atomic_read(&priv->adapter->is_tx_received)) - atomic_set(&priv->adapter->is_tx_received, false); - - return; -} - /* * This function initializes the private structure and sets default * values to the members. @@ -214,9 +133,6 @@ int mwifiex_init_priv(struct mwifiex_private *priv) priv->scan_block = false; - setup_timer(&priv->scan_delay_timer, scan_delay_timer_fn, - (unsigned long)priv); - return mwifiex_add_bss_prio_tbl(priv); } @@ -447,23 +363,29 @@ static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter) } /* - * This function frees the adapter structure. + * This function performs cleanup for adapter structure. * - * The freeing operation is done recursively, by canceling all - * pending commands, freeing the member buffers previously - * allocated (command buffers, scan table buffer, sleep confirm - * command buffer), stopping the timers and calling the cleanup - * routines for every interface, before the actual adapter - * structure is freed. + * The cleanup is done recursively, by canceling all pending + * commands, freeing the member buffers previously allocated + * (command buffers, scan table buffer, sleep confirm command + * buffer), stopping the timers and calling the cleanup routines + * for every interface. */ static void -mwifiex_free_adapter(struct mwifiex_adapter *adapter) +mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter) { + int i; + if (!adapter) { pr_err("%s: adapter is NULL\n", __func__); return; } + for (i = 0; i < adapter->priv_num; i++) { + if (adapter->priv[i]) + del_timer_sync(&adapter->priv[i]->scan_delay_timer); + } + mwifiex_cancel_all_pending_cmd(adapter); /* Free lock variables */ @@ -684,7 +606,6 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter) int ret = -EINPROGRESS; struct mwifiex_private *priv; s32 i; - unsigned long flags; struct sk_buff *skb; /* mwifiex already shutdown */ @@ -719,7 +640,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter) } } - spin_lock_irqsave(&adapter->mwifiex_lock, flags); + spin_lock(&adapter->mwifiex_lock); if (adapter->if_ops.data_complete) { while ((skb = skb_dequeue(&adapter->usb_rx_data_q))) { @@ -733,10 +654,9 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter) } } - /* Free adapter structure */ - mwifiex_free_adapter(adapter); + mwifiex_adapter_cleanup(adapter); - spin_unlock_irqrestore(&adapter->mwifiex_lock, flags); + spin_unlock(&adapter->mwifiex_lock); /* Notify completion */ ret = mwifiex_shutdown_fw_complete(adapter); diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c index 6bcb66e6e97c..122175af18c6 100644 --- a/drivers/net/wireless/mwifiex/join.c +++ b/drivers/net/wireless/mwifiex/join.c @@ -919,9 +919,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, memcpy(&priv->curr_bss_params.data_rates, &adhoc_start->data_rate, priv->curr_bss_params.num_of_rates); - dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%02x %02x %02x %02x\n", - adhoc_start->data_rate[0], adhoc_start->data_rate[1], - adhoc_start->data_rate[2], adhoc_start->data_rate[3]); + dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%4ph\n", + adhoc_start->data_rate); dev_dbg(adapter->dev, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n"); diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 2eb88ea9acf7..e15ab72fb03d 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -25,6 +25,86 @@ #define VERSION "1.0" const char driver_version[] = "mwifiex " VERSION " (%s) "; +static char *cal_data_cfg; +module_param(cal_data_cfg, charp, 0); + +static void scan_delay_timer_fn(unsigned long data) +{ + struct mwifiex_private *priv = (struct mwifiex_private *)data; + struct mwifiex_adapter *adapter = priv->adapter; + struct cmd_ctrl_node *cmd_node, *tmp_node; + unsigned long flags; + + if (adapter->surprise_removed) + return; + + if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) { + /* + * Abort scan operation by cancelling all pending scan + * commands + */ + spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); + list_for_each_entry_safe(cmd_node, tmp_node, + &adapter->scan_pending_q, list) { + list_del(&cmd_node->list); + mwifiex_insert_cmd_to_free_q(adapter, cmd_node); + } + spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); + + spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); + adapter->scan_processing = false; + adapter->scan_delay_cnt = 0; + adapter->empty_tx_q_cnt = 0; + spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); + + if (priv->scan_request) { + dev_dbg(adapter->dev, "info: aborting scan\n"); + cfg80211_scan_done(priv->scan_request, 1); + priv->scan_request = NULL; + } else { + priv->scan_aborting = false; + dev_dbg(adapter->dev, "info: scan already aborted\n"); + } + goto done; + } + + if (!atomic_read(&priv->adapter->is_tx_received)) { + adapter->empty_tx_q_cnt++; + if (adapter->empty_tx_q_cnt == MWIFIEX_MAX_EMPTY_TX_Q_CNT) { + /* + * No Tx traffic for 200msec. Get scan command from + * scan pending queue and put to cmd pending queue to + * resume scan operation + */ + adapter->scan_delay_cnt = 0; + adapter->empty_tx_q_cnt = 0; + spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); + cmd_node = list_first_entry(&adapter->scan_pending_q, + struct cmd_ctrl_node, list); + list_del(&cmd_node->list); + spin_unlock_irqrestore(&adapter->scan_pending_q_lock, + flags); + + mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, + true); + queue_work(adapter->workqueue, &adapter->main_work); + goto done; + } + } else { + adapter->empty_tx_q_cnt = 0; + } + + /* Delay scan operation further by 20msec */ + mod_timer(&priv->scan_delay_timer, jiffies + + msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC)); + adapter->scan_delay_cnt++; + +done: + if (atomic_read(&priv->adapter->is_tx_received)) + atomic_set(&priv->adapter->is_tx_received, false); + + return; +} /* * This function registers the device and performs all the necessary @@ -73,6 +153,10 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops, adapter->priv[i]->adapter = adapter; adapter->priv_num++; + + setup_timer(&adapter->priv[i]->scan_delay_timer, + scan_delay_timer_fn, + (unsigned long)adapter->priv[i]); } mwifiex_init_lock_list(adapter); @@ -336,6 +420,13 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) dev_notice(adapter->dev, "WLAN FW is active\n"); + if (cal_data_cfg) { + if ((request_firmware(&adapter->cal_data, cal_data_cfg, + adapter->dev)) < 0) + dev_err(adapter->dev, + "Cal data request_firmware() failed\n"); + } + adapter->init_wait_q_woken = false; ret = mwifiex_init_fw(adapter); if (ret == -1) { @@ -390,6 +481,10 @@ err_init_fw: pr_debug("info: %s: unregister device\n", __func__); adapter->if_ops.unregister_dev(adapter); done: + if (adapter->cal_data) { + release_firmware(adapter->cal_data); + adapter->cal_data = NULL; + } release_firmware(adapter->firmware); complete(&adapter->fw_load); return; @@ -436,6 +531,7 @@ mwifiex_close(struct net_device *dev) dev_dbg(priv->adapter->dev, "aborting scan on ndo_stop\n"); cfg80211_scan_done(priv->scan_request, 1); priv->scan_request = NULL; + priv->scan_aborting = true; } return 0; @@ -573,9 +669,8 @@ static void mwifiex_set_multicast_list(struct net_device *dev) mcast_list.mode = MWIFIEX_ALL_MULTI_MODE; } else { mcast_list.mode = MWIFIEX_MULTICAST_MODE; - if (netdev_mc_count(dev)) - mcast_list.num_multicast_addr = - mwifiex_copy_mcast_addr(&mcast_list, dev); + mcast_list.num_multicast_addr = + mwifiex_copy_mcast_addr(&mcast_list, dev); } mwifiex_request_set_multicast_list(priv, &mcast_list); } diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index 4ef67fca06d3..0832c2437daf 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -492,7 +492,6 @@ struct mwifiex_private { struct semaphore async_sem; u8 report_scan_result; struct cfg80211_scan_request *scan_request; - struct mwifiex_user_scan_cfg *user_scan_cfg; u8 cfg_bssid[6]; struct wps wps; u8 scan_block; @@ -510,6 +509,7 @@ struct mwifiex_private { u8 ap_11ac_enabled; u32 mgmt_frame_mask; struct mwifiex_roc_cfg roc_cfg; + bool scan_aborting; }; enum mwifiex_ba_status { @@ -730,6 +730,7 @@ struct mwifiex_adapter { u16 max_mgmt_ie_index; u8 scan_delay_cnt; u8 empty_tx_q_cnt; + const struct firmware *cal_data; /* 11AC */ u32 is_hw_11ac_capable; @@ -1115,6 +1116,8 @@ int mwifiex_set_mgmt_ies(struct mwifiex_private *priv, struct cfg80211_beacon_data *data); int mwifiex_del_mgmt_ies(struct mwifiex_private *priv); u8 *mwifiex_11d_code_2_region(u8 code); +void mwifiex_uap_del_sta_data(struct mwifiex_private *priv, + struct mwifiex_sta_node *node); extern const struct ethtool_ops mwifiex_ethtool_ops; diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index 9cf5d8f07df8..801b6b728379 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c @@ -1784,22 +1784,17 @@ check_next_scan: if (priv->report_scan_result) priv->report_scan_result = false; - if (priv->user_scan_cfg) { - if (priv->scan_request) { - dev_dbg(priv->adapter->dev, - "info: notifying scan done\n"); - cfg80211_scan_done(priv->scan_request, 0); - priv->scan_request = NULL; - } else { - dev_dbg(priv->adapter->dev, - "info: scan already aborted\n"); - } - - kfree(priv->user_scan_cfg); - priv->user_scan_cfg = NULL; + if (priv->scan_request) { + dev_dbg(adapter->dev, "info: notifying scan done\n"); + cfg80211_scan_done(priv->scan_request, 0); + priv->scan_request = NULL; + } else { + priv->scan_aborting = false; + dev_dbg(adapter->dev, "info: scan already aborted\n"); } } else { - if (priv->user_scan_cfg && !priv->scan_request) { + if ((priv->scan_aborting && !priv->scan_request) || + priv->scan_block) { spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); adapter->scan_delay_cnt = MWIFIEX_MAX_SCAN_DELAY_CNT; diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index 363ba31b58bf..5ee5ed02eccd 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -77,6 +77,17 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; + if (id->driver_data) { + struct mwifiex_sdio_device *data = (void *)id->driver_data; + + card->firmware = data->firmware; + card->reg = data->reg; + card->max_ports = data->max_ports; + card->mp_agg_pkt_limit = data->mp_agg_pkt_limit; + card->supports_sdio_new_mode = data->supports_sdio_new_mode; + card->has_control_mask = data->has_control_mask; + } + sdio_claim_host(func); ret = sdio_enable_func(func); sdio_release_host(func); @@ -251,12 +262,19 @@ static int mwifiex_sdio_resume(struct device *dev) #define SDIO_DEVICE_ID_MARVELL_8787 (0x9119) /* Device ID for SD8797 */ #define SDIO_DEVICE_ID_MARVELL_8797 (0x9129) +/* Device ID for SD8897 */ +#define SDIO_DEVICE_ID_MARVELL_8897 (0x912d) /* WLAN IDs */ static const struct sdio_device_id mwifiex_ids[] = { - {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8786)}, - {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787)}, - {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797)}, + {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8786), + .driver_data = (unsigned long) &mwifiex_sdio_sd8786}, + {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787), + .driver_data = (unsigned long) &mwifiex_sdio_sd8787}, + {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797), + .driver_data = (unsigned long) &mwifiex_sdio_sd8797}, + {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8897), + .driver_data = (unsigned long) &mwifiex_sdio_sd8897}, {}, }; @@ -282,13 +300,13 @@ static struct sdio_driver mwifiex_sdio = { * This function writes data into SDIO card register. */ static int -mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u32 data) +mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u8 data) { struct sdio_mmc_card *card = adapter->card; int ret = -1; sdio_claim_host(card->func); - sdio_writeb(card->func, (u8) data, reg, &ret); + sdio_writeb(card->func, data, reg, &ret); sdio_release_host(card->func); return ret; @@ -298,7 +316,7 @@ mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u32 data) * This function reads data from SDIO card register. */ static int -mwifiex_read_reg(struct mwifiex_adapter *adapter, u32 reg, u32 *data) +mwifiex_read_reg(struct mwifiex_adapter *adapter, u32 reg, u8 *data) { struct sdio_mmc_card *card = adapter->card; int ret = -1; @@ -400,7 +418,40 @@ static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter) } /* - * This function initializes the IO ports. + * This function is used to initialize IO ports for the + * chipsets supporting SDIO new mode eg SD8897. + */ +static int mwifiex_init_sdio_new_mode(struct mwifiex_adapter *adapter) +{ + u8 reg; + + adapter->ioport = MEM_PORT; + + /* enable sdio new mode */ + if (mwifiex_read_reg(adapter, CARD_CONFIG_2_1_REG, ®)) + return -1; + if (mwifiex_write_reg(adapter, CARD_CONFIG_2_1_REG, + reg | CMD53_NEW_MODE)) + return -1; + + /* Configure cmd port and enable reading rx length from the register */ + if (mwifiex_read_reg(adapter, CMD_CONFIG_0, ®)) + return -1; + if (mwifiex_write_reg(adapter, CMD_CONFIG_0, reg | CMD_PORT_RD_LEN_EN)) + return -1; + + /* Enable Dnld/Upld ready auto reset for cmd port after cmd53 is + * completed + */ + if (mwifiex_read_reg(adapter, CMD_CONFIG_1, ®)) + return -1; + if (mwifiex_write_reg(adapter, CMD_CONFIG_1, reg | CMD_PORT_AUTO_EN)) + return -1; + + return 0; +} + +/* This function initializes the IO ports. * * The following operations are performed - * - Read the IO ports (0, 1 and 2) @@ -409,10 +460,17 @@ static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter) */ static int mwifiex_init_sdio_ioport(struct mwifiex_adapter *adapter) { - u32 reg; + u8 reg; + struct sdio_mmc_card *card = adapter->card; adapter->ioport = 0; + if (card->supports_sdio_new_mode) { + if (mwifiex_init_sdio_new_mode(adapter)) + return -1; + goto cont; + } + /* Read the IO port */ if (!mwifiex_read_reg(adapter, IO_PORT_0_REG, ®)) adapter->ioport |= (reg & 0xff); @@ -428,19 +486,19 @@ static int mwifiex_init_sdio_ioport(struct mwifiex_adapter *adapter) adapter->ioport |= ((reg & 0xff) << 16); else return -1; - +cont: pr_debug("info: SDIO FUNC1 IO port: %#x\n", adapter->ioport); /* Set Host interrupt reset to read to clear */ if (!mwifiex_read_reg(adapter, HOST_INT_RSR_REG, ®)) mwifiex_write_reg(adapter, HOST_INT_RSR_REG, - reg | SDIO_INT_MASK); + reg | card->reg->sdio_int_mask); else return -1; /* Dnld/Upld ready set to auto reset */ - if (!mwifiex_read_reg(adapter, CARD_MISC_CFG_REG, ®)) - mwifiex_write_reg(adapter, CARD_MISC_CFG_REG, + if (!mwifiex_read_reg(adapter, card->reg->card_misc_cfg_reg, ®)) + mwifiex_write_reg(adapter, card->reg->card_misc_cfg_reg, reg | AUTO_RE_ENABLE_INT); else return -1; @@ -486,34 +544,42 @@ static int mwifiex_write_data_to_card(struct mwifiex_adapter *adapter, static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port) { struct sdio_mmc_card *card = adapter->card; - u16 rd_bitmap = card->mp_rd_bitmap; + const struct mwifiex_sdio_card_reg *reg = card->reg; + u32 rd_bitmap = card->mp_rd_bitmap; - dev_dbg(adapter->dev, "data: mp_rd_bitmap=0x%04x\n", rd_bitmap); + dev_dbg(adapter->dev, "data: mp_rd_bitmap=0x%08x\n", rd_bitmap); - if (!(rd_bitmap & (CTRL_PORT_MASK | DATA_PORT_MASK))) - return -1; + if (card->supports_sdio_new_mode) { + if (!(rd_bitmap & reg->data_port_mask)) + return -1; + } else { + if (!(rd_bitmap & (CTRL_PORT_MASK | reg->data_port_mask))) + return -1; + } - if (card->mp_rd_bitmap & CTRL_PORT_MASK) { - card->mp_rd_bitmap &= (u16) (~CTRL_PORT_MASK); + if ((card->has_control_mask) && + (card->mp_rd_bitmap & CTRL_PORT_MASK)) { + card->mp_rd_bitmap &= (u32) (~CTRL_PORT_MASK); *port = CTRL_PORT; - dev_dbg(adapter->dev, "data: port=%d mp_rd_bitmap=0x%04x\n", + dev_dbg(adapter->dev, "data: port=%d mp_rd_bitmap=0x%08x\n", *port, card->mp_rd_bitmap); - } else { - if (card->mp_rd_bitmap & (1 << card->curr_rd_port)) { - card->mp_rd_bitmap &= (u16) - (~(1 << card->curr_rd_port)); - *port = card->curr_rd_port; + return 0; + } - if (++card->curr_rd_port == MAX_PORT) - card->curr_rd_port = 1; - } else { - return -1; - } + if (!(card->mp_rd_bitmap & (1 << card->curr_rd_port))) + return -1; + + /* We are now handling the SDIO data ports */ + card->mp_rd_bitmap &= (u32)(~(1 << card->curr_rd_port)); + *port = card->curr_rd_port; + + if (++card->curr_rd_port == card->max_ports) + card->curr_rd_port = reg->start_rd_port; + + dev_dbg(adapter->dev, + "data: port=%d mp_rd_bitmap=0x%08x -> 0x%08x\n", + *port, rd_bitmap, card->mp_rd_bitmap); - dev_dbg(adapter->dev, - "data: port=%d mp_rd_bitmap=0x%04x -> 0x%04x\n", - *port, rd_bitmap, card->mp_rd_bitmap); - } return 0; } @@ -524,35 +590,45 @@ static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port) * increased (provided it does not reach the maximum limit, in which * case it is reset to 1) */ -static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u8 *port) +static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u32 *port) { struct sdio_mmc_card *card = adapter->card; - u16 wr_bitmap = card->mp_wr_bitmap; + const struct mwifiex_sdio_card_reg *reg = card->reg; + u32 wr_bitmap = card->mp_wr_bitmap; - dev_dbg(adapter->dev, "data: mp_wr_bitmap=0x%04x\n", wr_bitmap); + dev_dbg(adapter->dev, "data: mp_wr_bitmap=0x%08x\n", wr_bitmap); - if (!(wr_bitmap & card->mp_data_port_mask)) + if (card->supports_sdio_new_mode && + !(wr_bitmap & reg->data_port_mask)) { + adapter->data_sent = true; + return -EBUSY; + } else if (!card->supports_sdio_new_mode && + !(wr_bitmap & card->mp_data_port_mask)) { return -1; + } if (card->mp_wr_bitmap & (1 << card->curr_wr_port)) { - card->mp_wr_bitmap &= (u16) (~(1 << card->curr_wr_port)); + card->mp_wr_bitmap &= (u32) (~(1 << card->curr_wr_port)); *port = card->curr_wr_port; - if (++card->curr_wr_port == card->mp_end_port) - card->curr_wr_port = 1; + if (((card->supports_sdio_new_mode) && + (++card->curr_wr_port == card->max_ports)) || + ((!card->supports_sdio_new_mode) && + (++card->curr_wr_port == card->mp_end_port))) + card->curr_wr_port = reg->start_wr_port; } else { adapter->data_sent = true; return -EBUSY; } - if (*port == CTRL_PORT) { - dev_err(adapter->dev, "invalid data port=%d cur port=%d" - " mp_wr_bitmap=0x%04x -> 0x%04x\n", + if ((card->has_control_mask) && (*port == CTRL_PORT)) { + dev_err(adapter->dev, + "invalid data port=%d cur port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n", *port, card->curr_wr_port, wr_bitmap, card->mp_wr_bitmap); return -1; } - dev_dbg(adapter->dev, "data: port=%d mp_wr_bitmap=0x%04x -> 0x%04x\n", + dev_dbg(adapter->dev, "data: port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n", *port, wr_bitmap, card->mp_wr_bitmap); return 0; @@ -564,11 +640,12 @@ static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u8 *port) static int mwifiex_sdio_poll_card_status(struct mwifiex_adapter *adapter, u8 bits) { + struct sdio_mmc_card *card = adapter->card; u32 tries; - u32 cs; + u8 cs; for (tries = 0; tries < MAX_POLL_TRIES; tries++) { - if (mwifiex_read_reg(adapter, CARD_STATUS_REG, &cs)) + if (mwifiex_read_reg(adapter, card->reg->poll_reg, &cs)) break; else if ((cs & bits) == bits) return 0; @@ -587,12 +664,14 @@ mwifiex_sdio_poll_card_status(struct mwifiex_adapter *adapter, u8 bits) static int mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat) { - u32 fws0, fws1; + struct sdio_mmc_card *card = adapter->card; + const struct mwifiex_sdio_card_reg *reg = card->reg; + u8 fws0, fws1; - if (mwifiex_read_reg(adapter, CARD_FW_STATUS0_REG, &fws0)) + if (mwifiex_read_reg(adapter, reg->status_reg_0, &fws0)) return -1; - if (mwifiex_read_reg(adapter, CARD_FW_STATUS1_REG, &fws1)) + if (mwifiex_read_reg(adapter, reg->status_reg_1, &fws1)) return -1; *dat = (u16) ((fws1 << 8) | fws0); @@ -608,14 +687,14 @@ mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat) */ static int mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter) { - u32 host_int_mask; + u8 host_int_mask, host_int_disable = HOST_INT_DISABLE; /* Read back the host_int_mask register */ if (mwifiex_read_reg(adapter, HOST_INT_MASK_REG, &host_int_mask)) return -1; /* Update with the mask and write back to the register */ - host_int_mask &= ~HOST_INT_DISABLE; + host_int_mask &= ~host_int_disable; if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG, host_int_mask)) { dev_err(adapter->dev, "disable host interrupt failed\n"); @@ -633,8 +712,11 @@ static int mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter) */ static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter) { + struct sdio_mmc_card *card = adapter->card; + /* Simply write the mask to the register */ - if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG, HOST_INT_ENABLE)) { + if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG, + card->reg->host_int_enable)) { dev_err(adapter->dev, "enable host interrupt failed\n"); return -1; } @@ -686,11 +768,13 @@ static int mwifiex_sdio_card_to_host(struct mwifiex_adapter *adapter, static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, struct mwifiex_fw_image *fw) { + struct sdio_mmc_card *card = adapter->card; + const struct mwifiex_sdio_card_reg *reg = card->reg; int ret; u8 *firmware = fw->fw_buf; u32 firmware_len = fw->fw_len; u32 offset = 0; - u32 base0, base1; + u8 base0, base1; u8 *fwbuf; u16 len = 0; u32 txlen, tx_blocks = 0, tries; @@ -727,7 +811,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, break; for (tries = 0; tries < MAX_POLL_TRIES; tries++) { - ret = mwifiex_read_reg(adapter, HOST_F1_RD_BASE_0, + ret = mwifiex_read_reg(adapter, reg->base_0_reg, &base0); if (ret) { dev_err(adapter->dev, @@ -736,7 +820,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, base0, base0); goto done; } - ret = mwifiex_read_reg(adapter, HOST_F1_RD_BASE_1, + ret = mwifiex_read_reg(adapter, reg->base_1_reg, &base1); if (ret) { dev_err(adapter->dev, @@ -828,10 +912,11 @@ done: static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num) { + struct sdio_mmc_card *card = adapter->card; int ret = 0; u16 firmware_stat; u32 tries; - u32 winner_status; + u8 winner_status; /* Wait for firmware initialization event */ for (tries = 0; tries < poll_num; tries++) { @@ -849,7 +934,7 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter, if (ret) { if (mwifiex_read_reg - (adapter, CARD_FW_STATUS0_REG, &winner_status)) + (adapter, card->reg->status_reg_0, &winner_status)) winner_status = 0; if (winner_status) @@ -866,12 +951,12 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter, static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter) { struct sdio_mmc_card *card = adapter->card; - u32 sdio_ireg; + u8 sdio_ireg; unsigned long flags; - if (mwifiex_read_data_sync(adapter, card->mp_regs, MAX_MP_REGS, - REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK, - 0)) { + if (mwifiex_read_data_sync(adapter, card->mp_regs, + card->reg->max_mp_regs, + REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK, 0)) { dev_err(adapter->dev, "read mp_regs failed\n"); return; } @@ -880,6 +965,9 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter) if (sdio_ireg) { /* * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS + * For SDIO new mode CMD port interrupts + * DN_LD_CMD_PORT_HOST_INT_STATUS and/or + * UP_LD_CMD_PORT_HOST_INT_STATUS * Clear the interrupt status register */ dev_dbg(adapter->dev, "int: sdio_ireg = %#x\n", sdio_ireg); @@ -1003,11 +1091,11 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, s32 f_aggr_cur = 0; struct sk_buff *skb_deaggr; u32 pind; - u32 pkt_len, pkt_type = 0; + u32 pkt_len, pkt_type, mport; u8 *curr_ptr; u32 rx_len = skb->len; - if (port == CTRL_PORT) { + if ((card->has_control_mask) && (port == CTRL_PORT)) { /* Read the command Resp without aggr */ dev_dbg(adapter->dev, "info: %s: no aggregation for cmd " "response\n", __func__); @@ -1024,7 +1112,10 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, goto rx_curr_single; } - if (card->mp_rd_bitmap & (~((u16) CTRL_PORT_MASK))) { + if ((!card->has_control_mask && (card->mp_rd_bitmap & + card->reg->data_port_mask)) || + (card->has_control_mask && (card->mp_rd_bitmap & + (~((u32) CTRL_PORT_MASK))))) { /* Some more data RX pending */ dev_dbg(adapter->dev, "info: %s: not last packet\n", __func__); @@ -1060,10 +1151,10 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, if (f_aggr_cur) { dev_dbg(adapter->dev, "info: current packet aggregation\n"); /* Curr pkt can be aggregated */ - MP_RX_AGGR_SETUP(card, skb, port); + mp_rx_aggr_setup(card, skb, port); if (MP_RX_AGGR_PKT_LIMIT_REACHED(card) || - MP_RX_AGGR_PORT_LIMIT_REACHED(card)) { + mp_rx_aggr_port_limit_reached(card)) { dev_dbg(adapter->dev, "info: %s: aggregated packet " "limit reached\n", __func__); /* No more pkts allowed in Aggr buf, rx it */ @@ -1076,11 +1167,28 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, dev_dbg(adapter->dev, "info: do_rx_aggr: num of packets: %d\n", card->mpa_rx.pkt_cnt); + if (card->supports_sdio_new_mode) { + int i; + u32 port_count; + + for (i = 0, port_count = 0; i < card->max_ports; i++) + if (card->mpa_rx.ports & BIT(i)) + port_count++; + + /* Reading data from "start_port + 0" to "start_port + + * port_count -1", so decrease the count by 1 + */ + port_count--; + mport = (adapter->ioport | SDIO_MPA_ADDR_BASE | + (port_count << 8)) + card->mpa_rx.start_port; + } else { + mport = (adapter->ioport | SDIO_MPA_ADDR_BASE | + (card->mpa_rx.ports << 4)) + + card->mpa_rx.start_port; + } + if (mwifiex_read_data_sync(adapter, card->mpa_rx.buf, - card->mpa_rx.buf_len, - (adapter->ioport | 0x1000 | - (card->mpa_rx.ports << 4)) + - card->mpa_rx.start_port, 1)) + card->mpa_rx.buf_len, mport, 1)) goto error; curr_ptr = card->mpa_rx.buf; @@ -1167,6 +1275,7 @@ error: static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) { struct sdio_mmc_card *card = adapter->card; + const struct mwifiex_sdio_card_reg *reg = card->reg; int ret = 0; u8 sdio_ireg; struct sk_buff *skb; @@ -1175,6 +1284,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) u32 rx_blocks; u16 rx_len; unsigned long flags; + u32 bitmap; + u8 cr; spin_lock_irqsave(&adapter->int_lock, flags); sdio_ireg = adapter->int_status; @@ -1184,10 +1295,60 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) if (!sdio_ireg) return ret; + /* Following interrupt is only for SDIO new mode */ + if (sdio_ireg & DN_LD_CMD_PORT_HOST_INT_STATUS && adapter->cmd_sent) + adapter->cmd_sent = false; + + /* Following interrupt is only for SDIO new mode */ + if (sdio_ireg & UP_LD_CMD_PORT_HOST_INT_STATUS) { + u32 pkt_type; + + /* read the len of control packet */ + rx_len = card->mp_regs[CMD_RD_LEN_1] << 8; + rx_len |= (u16) card->mp_regs[CMD_RD_LEN_0]; + rx_blocks = DIV_ROUND_UP(rx_len, MWIFIEX_SDIO_BLOCK_SIZE); + if (rx_len <= INTF_HEADER_LEN || + (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) > + MWIFIEX_RX_DATA_BUF_SIZE) + return -1; + rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE); + + skb = dev_alloc_skb(rx_len); + if (!skb) + return -1; + + skb_put(skb, rx_len); + + if (mwifiex_sdio_card_to_host(adapter, &pkt_type, skb->data, + skb->len, adapter->ioport | + CMD_PORT_SLCT)) { + dev_err(adapter->dev, + "%s: failed to card_to_host", __func__); + dev_kfree_skb_any(skb); + goto term_cmd; + } + + if ((pkt_type != MWIFIEX_TYPE_CMD) && + (pkt_type != MWIFIEX_TYPE_EVENT)) + dev_err(adapter->dev, + "%s:Received wrong packet on cmd port", + __func__); + + mwifiex_decode_rx_packet(adapter, skb, pkt_type); + } + if (sdio_ireg & DN_LD_HOST_INT_STATUS) { - card->mp_wr_bitmap = ((u16) card->mp_regs[WR_BITMAP_U]) << 8; - card->mp_wr_bitmap |= (u16) card->mp_regs[WR_BITMAP_L]; - dev_dbg(adapter->dev, "int: DNLD: wr_bitmap=0x%04x\n", + bitmap = (u32) card->mp_regs[reg->wr_bitmap_l]; + bitmap |= ((u32) card->mp_regs[reg->wr_bitmap_u]) << 8; + if (card->supports_sdio_new_mode) { + bitmap |= + ((u32) card->mp_regs[reg->wr_bitmap_1l]) << 16; + bitmap |= + ((u32) card->mp_regs[reg->wr_bitmap_1u]) << 24; + } + card->mp_wr_bitmap = bitmap; + + dev_dbg(adapter->dev, "int: DNLD: wr_bitmap=0x%x\n", card->mp_wr_bitmap); if (adapter->data_sent && (card->mp_wr_bitmap & card->mp_data_port_mask)) { @@ -1200,11 +1361,11 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) /* As firmware will not generate download ready interrupt if the port updated is command port only, cmd_sent should be done for any SDIO interrupt. */ - if (adapter->cmd_sent) { + if (card->has_control_mask && adapter->cmd_sent) { /* Check if firmware has attach buffer at command port and update just that in wr_bit_map. */ card->mp_wr_bitmap |= - (u16) card->mp_regs[WR_BITMAP_L] & CTRL_PORT_MASK; + (u32) card->mp_regs[reg->wr_bitmap_l] & CTRL_PORT_MASK; if (card->mp_wr_bitmap & CTRL_PORT_MASK) adapter->cmd_sent = false; } @@ -1212,9 +1373,16 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n", adapter->cmd_sent, adapter->data_sent); if (sdio_ireg & UP_LD_HOST_INT_STATUS) { - card->mp_rd_bitmap = ((u16) card->mp_regs[RD_BITMAP_U]) << 8; - card->mp_rd_bitmap |= (u16) card->mp_regs[RD_BITMAP_L]; - dev_dbg(adapter->dev, "int: UPLD: rd_bitmap=0x%04x\n", + bitmap = (u32) card->mp_regs[reg->rd_bitmap_l]; + bitmap |= ((u32) card->mp_regs[reg->rd_bitmap_u]) << 8; + if (card->supports_sdio_new_mode) { + bitmap |= + ((u32) card->mp_regs[reg->rd_bitmap_1l]) << 16; + bitmap |= + ((u32) card->mp_regs[reg->rd_bitmap_1u]) << 24; + } + card->mp_rd_bitmap = bitmap; + dev_dbg(adapter->dev, "int: UPLD: rd_bitmap=0x%x\n", card->mp_rd_bitmap); while (true) { @@ -1224,8 +1392,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) "info: no more rd_port available\n"); break; } - len_reg_l = RD_LEN_P0_L + (port << 1); - len_reg_u = RD_LEN_P0_U + (port << 1); + len_reg_l = reg->rd_len_p0_l + (port << 1); + len_reg_u = reg->rd_len_p0_u + (port << 1); rx_len = ((u16) card->mp_regs[len_reg_u]) << 8; rx_len |= (u16) card->mp_regs[len_reg_l]; dev_dbg(adapter->dev, "info: RX: port=%d rx_len=%u\n", @@ -1257,37 +1425,33 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) if (mwifiex_sdio_card_to_host_mp_aggr(adapter, skb, port)) { - u32 cr = 0; - dev_err(adapter->dev, "card_to_host_mpa failed:" " int status=%#x\n", sdio_ireg); - if (mwifiex_read_reg(adapter, - CONFIGURATION_REG, &cr)) - dev_err(adapter->dev, - "read CFG reg failed\n"); - - dev_dbg(adapter->dev, - "info: CFG reg val = %d\n", cr); - if (mwifiex_write_reg(adapter, - CONFIGURATION_REG, - (cr | 0x04))) - dev_err(adapter->dev, - "write CFG reg failed\n"); - - dev_dbg(adapter->dev, "info: write success\n"); - if (mwifiex_read_reg(adapter, - CONFIGURATION_REG, &cr)) - dev_err(adapter->dev, - "read CFG reg failed\n"); - - dev_dbg(adapter->dev, - "info: CFG reg val =%x\n", cr); - return -1; + goto term_cmd; } } } return 0; + +term_cmd: + /* terminate cmd */ + if (mwifiex_read_reg(adapter, CONFIGURATION_REG, &cr)) + dev_err(adapter->dev, "read CFG reg failed\n"); + else + dev_dbg(adapter->dev, "info: CFG reg val = %d\n", cr); + + if (mwifiex_write_reg(adapter, CONFIGURATION_REG, (cr | 0x04))) + dev_err(adapter->dev, "write CFG reg failed\n"); + else + dev_dbg(adapter->dev, "info: write success\n"); + + if (mwifiex_read_reg(adapter, CONFIGURATION_REG, &cr)) + dev_err(adapter->dev, "read CFG reg failed\n"); + else + dev_dbg(adapter->dev, "info: CFG reg val =%x\n", cr); + + return -1; } /* @@ -1305,7 +1469,7 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) * and return. */ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, - u8 *payload, u32 pkt_len, u8 port, + u8 *payload, u32 pkt_len, u32 port, u32 next_pkt_len) { struct sdio_mmc_card *card = adapter->card; @@ -1314,8 +1478,11 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, s32 f_send_cur_buf = 0; s32 f_precopy_cur_buf = 0; s32 f_postcopy_cur_buf = 0; + u32 mport; - if ((!card->mpa_tx.enabled) || (port == CTRL_PORT)) { + if (!card->mpa_tx.enabled || + (card->has_control_mask && (port == CTRL_PORT)) || + (card->supports_sdio_new_mode && (port == CMD_PORT_SLCT))) { dev_dbg(adapter->dev, "info: %s: tx aggregation disabled\n", __func__); @@ -1329,7 +1496,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, __func__); if (MP_TX_AGGR_IN_PROGRESS(card)) { - if (!MP_TX_AGGR_PORT_LIMIT_REACHED(card) && + if (!mp_tx_aggr_port_limit_reached(card) && MP_TX_AGGR_BUF_HAS_ROOM(card, pkt_len)) { f_precopy_cur_buf = 1; @@ -1342,7 +1509,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, /* No room in Aggr buf, send it */ f_send_aggr_buf = 1; - if (MP_TX_AGGR_PORT_LIMIT_REACHED(card) || + if (mp_tx_aggr_port_limit_reached(card) || !(card->mp_wr_bitmap & (1 << card->curr_wr_port))) f_send_cur_buf = 1; @@ -1381,7 +1548,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, MP_TX_AGGR_BUF_PUT(card, payload, pkt_len, port); if (MP_TX_AGGR_PKT_LIMIT_REACHED(card) || - MP_TX_AGGR_PORT_LIMIT_REACHED(card)) + mp_tx_aggr_port_limit_reached(card)) /* No more pkts allowed in Aggr buf, send it */ f_send_aggr_buf = 1; } @@ -1390,11 +1557,28 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, dev_dbg(adapter->dev, "data: %s: send aggr buffer: %d %d\n", __func__, card->mpa_tx.start_port, card->mpa_tx.ports); + if (card->supports_sdio_new_mode) { + u32 port_count; + int i; + + for (i = 0, port_count = 0; i < card->max_ports; i++) + if (card->mpa_tx.ports & BIT(i)) + port_count++; + + /* Writing data from "start_port + 0" to "start_port + + * port_count -1", so decrease the count by 1 + */ + port_count--; + mport = (adapter->ioport | SDIO_MPA_ADDR_BASE | + (port_count << 8)) + card->mpa_tx.start_port; + } else { + mport = (adapter->ioport | SDIO_MPA_ADDR_BASE | + (card->mpa_tx.ports << 4)) + + card->mpa_tx.start_port; + } + ret = mwifiex_write_data_to_card(adapter, card->mpa_tx.buf, - card->mpa_tx.buf_len, - (adapter->ioport | 0x1000 | - (card->mpa_tx.ports << 4)) + - card->mpa_tx.start_port); + card->mpa_tx.buf_len, mport); MP_TX_AGGR_BUF_RESET(card); } @@ -1434,7 +1618,7 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter, int ret; u32 buf_block_len; u32 blk_size; - u8 port = CTRL_PORT; + u32 port = CTRL_PORT; u8 *payload = (u8 *)skb->data; u32 pkt_len = skb->len; @@ -1465,6 +1649,9 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter, pkt_len > MWIFIEX_UPLD_SIZE) dev_err(adapter->dev, "%s: payload=%p, nb=%d\n", __func__, payload, pkt_len); + + if (card->supports_sdio_new_mode) + port = CMD_PORT_SLCT; } /* Transfer data to card */ @@ -1586,18 +1773,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) adapter->dev = &func->dev; - switch (func->device) { - case SDIO_DEVICE_ID_MARVELL_8786: - strcpy(adapter->fw_name, SD8786_DEFAULT_FW_NAME); - break; - case SDIO_DEVICE_ID_MARVELL_8797: - strcpy(adapter->fw_name, SD8797_DEFAULT_FW_NAME); - break; - case SDIO_DEVICE_ID_MARVELL_8787: - default: - strcpy(adapter->fw_name, SD8787_DEFAULT_FW_NAME); - break; - } + strcpy(adapter->fw_name, card->firmware); return 0; @@ -1626,8 +1802,9 @@ disable_func: static int mwifiex_init_sdio(struct mwifiex_adapter *adapter) { struct sdio_mmc_card *card = adapter->card; + const struct mwifiex_sdio_card_reg *reg = card->reg; int ret; - u32 sdio_ireg; + u8 sdio_ireg; /* * Read the HOST_INT_STATUS_REG for ACK the first interrupt got @@ -1645,30 +1822,35 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter) /* Initialize SDIO variables in card */ card->mp_rd_bitmap = 0; card->mp_wr_bitmap = 0; - card->curr_rd_port = 1; - card->curr_wr_port = 1; + card->curr_rd_port = reg->start_rd_port; + card->curr_wr_port = reg->start_wr_port; - card->mp_data_port_mask = DATA_PORT_MASK; + card->mp_data_port_mask = reg->data_port_mask; card->mpa_tx.buf_len = 0; card->mpa_tx.pkt_cnt = 0; card->mpa_tx.start_port = 0; card->mpa_tx.enabled = 1; - card->mpa_tx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT; + card->mpa_tx.pkt_aggr_limit = card->mp_agg_pkt_limit; card->mpa_rx.buf_len = 0; card->mpa_rx.pkt_cnt = 0; card->mpa_rx.start_port = 0; card->mpa_rx.enabled = 1; - card->mpa_rx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT; + card->mpa_rx.pkt_aggr_limit = card->mp_agg_pkt_limit; /* Allocate buffers for SDIO MP-A */ - card->mp_regs = kzalloc(MAX_MP_REGS, GFP_KERNEL); + card->mp_regs = kzalloc(reg->max_mp_regs, GFP_KERNEL); if (!card->mp_regs) return -ENOMEM; + /* Allocate skb pointer buffers */ + card->mpa_rx.skb_arr = kzalloc((sizeof(void *)) * + card->mp_agg_pkt_limit, GFP_KERNEL); + card->mpa_rx.len_arr = kzalloc(sizeof(*card->mpa_rx.len_arr) * + card->mp_agg_pkt_limit, GFP_KERNEL); ret = mwifiex_alloc_sdio_mpa_buffers(adapter, SDIO_MP_TX_AGGR_DEF_BUF_SIZE, SDIO_MP_RX_AGGR_DEF_BUF_SIZE); @@ -1705,6 +1887,8 @@ static void mwifiex_cleanup_sdio(struct mwifiex_adapter *adapter) struct sdio_mmc_card *card = adapter->card; kfree(card->mp_regs); + kfree(card->mpa_rx.skb_arr); + kfree(card->mpa_rx.len_arr); kfree(card->mpa_tx.buf); kfree(card->mpa_rx.buf); } @@ -1716,16 +1900,20 @@ static void mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port) { struct sdio_mmc_card *card = adapter->card; + const struct mwifiex_sdio_card_reg *reg = card->reg; int i; card->mp_end_port = port; - card->mp_data_port_mask = DATA_PORT_MASK; + card->mp_data_port_mask = reg->data_port_mask; - for (i = 1; i <= MAX_PORT - card->mp_end_port; i++) - card->mp_data_port_mask &= ~(1 << (MAX_PORT - i)); + if (reg->start_wr_port) { + for (i = 1; i <= card->max_ports - card->mp_end_port; i++) + card->mp_data_port_mask &= + ~(1 << (card->max_ports - i)); + } - card->curr_wr_port = 1; + card->curr_wr_port = reg->start_wr_port; dev_dbg(adapter->dev, "cmd: mp_end_port %d, data port mask 0x%x\n", port, card->mp_data_port_mask); @@ -1831,3 +2019,4 @@ MODULE_LICENSE("GPL v2"); MODULE_FIRMWARE(SD8786_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME); +MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME); diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h index 8cc5468654b4..6d51dfdd8251 100644 --- a/drivers/net/wireless/mwifiex/sdio.h +++ b/drivers/net/wireless/mwifiex/sdio.h @@ -32,30 +32,37 @@ #define SD8786_DEFAULT_FW_NAME "mrvl/sd8786_uapsta.bin" #define SD8787_DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin" #define SD8797_DEFAULT_FW_NAME "mrvl/sd8797_uapsta.bin" +#define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin" #define BLOCK_MODE 1 #define BYTE_MODE 0 #define REG_PORT 0 -#define RD_BITMAP_L 0x04 -#define RD_BITMAP_U 0x05 -#define WR_BITMAP_L 0x06 -#define WR_BITMAP_U 0x07 -#define RD_LEN_P0_L 0x08 -#define RD_LEN_P0_U 0x09 #define MWIFIEX_SDIO_IO_PORT_MASK 0xfffff #define MWIFIEX_SDIO_BYTE_MODE_MASK 0x80000000 +#define SDIO_MPA_ADDR_BASE 0x1000 #define CTRL_PORT 0 #define CTRL_PORT_MASK 0x0001 -#define DATA_PORT_MASK 0xfffe -#define MAX_MP_REGS 64 -#define MAX_PORT 16 - -#define SDIO_MP_AGGR_DEF_PKT_LIMIT 8 +#define CMD_PORT_UPLD_INT_MASK (0x1U<<6) +#define CMD_PORT_DNLD_INT_MASK (0x1U<<7) +#define HOST_TERM_CMD53 (0x1U << 2) +#define REG_PORT 0 +#define MEM_PORT 0x10000 +#define CMD_RD_LEN_0 0xB4 +#define CMD_RD_LEN_1 0xB5 +#define CARD_CONFIG_2_1_REG 0xCD +#define CMD53_NEW_MODE (0x1U << 0) +#define CMD_CONFIG_0 0xB8 +#define CMD_PORT_RD_LEN_EN (0x1U << 2) +#define CMD_CONFIG_1 0xB9 +#define CMD_PORT_AUTO_EN (0x1U << 0) +#define CMD_PORT_SLCT 0x8000 +#define UP_LD_CMD_PORT_HOST_INT_STATUS (0x40U) +#define DN_LD_CMD_PORT_HOST_INT_STATUS (0x80U) #define SDIO_MP_TX_AGGR_DEF_BUF_SIZE (8192) /* 8K */ @@ -75,14 +82,8 @@ /* Host Control Registers : Configuration */ #define CONFIGURATION_REG 0x00 -/* Host Control Registers : Host without Command 53 finish host*/ -#define HOST_TO_CARD_EVENT (0x1U << 3) -/* Host Control Registers : Host without Command 53 finish host */ -#define HOST_WO_CMD53_FINISH_HOST (0x1U << 2) /* Host Control Registers : Host power up */ #define HOST_POWER_UP (0x1U << 1) -/* Host Control Registers : Host power down */ -#define HOST_POWER_DOWN (0x1U << 0) /* Host Control Registers : Host interrupt mask */ #define HOST_INT_MASK_REG 0x02 @@ -90,8 +91,7 @@ #define UP_LD_HOST_INT_MASK (0x1U) /* Host Control Registers : Download host interrupt mask */ #define DN_LD_HOST_INT_MASK (0x2U) -/* Enable Host interrupt mask */ -#define HOST_INT_ENABLE (UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK) + /* Disable Host interrupt mask */ #define HOST_INT_DISABLE 0xff @@ -104,74 +104,15 @@ /* Host Control Registers : Host interrupt RSR */ #define HOST_INT_RSR_REG 0x01 -/* Host Control Registers : Upload host interrupt RSR */ -#define UP_LD_HOST_INT_RSR (0x1U) -#define SDIO_INT_MASK 0x3F /* Host Control Registers : Host interrupt status */ #define HOST_INT_STATUS_REG 0x28 -/* Host Control Registers : Upload CRC error */ -#define UP_LD_CRC_ERR (0x1U << 2) -/* Host Control Registers : Upload restart */ -#define UP_LD_RESTART (0x1U << 1) -/* Host Control Registers : Download restart */ -#define DN_LD_RESTART (0x1U << 0) - -/* Card Control Registers : Card status register */ -#define CARD_STATUS_REG 0x30 + /* Card Control Registers : Card I/O ready */ #define CARD_IO_READY (0x1U << 3) -/* Card Control Registers : CIS card ready */ -#define CIS_CARD_RDY (0x1U << 2) -/* Card Control Registers : Upload card ready */ -#define UP_LD_CARD_RDY (0x1U << 1) /* Card Control Registers : Download card ready */ #define DN_LD_CARD_RDY (0x1U << 0) -/* Card Control Registers : Host interrupt mask register */ -#define HOST_INTERRUPT_MASK_REG 0x34 -/* Card Control Registers : Host power interrupt mask */ -#define HOST_POWER_INT_MASK (0x1U << 3) -/* Card Control Registers : Abort card interrupt mask */ -#define ABORT_CARD_INT_MASK (0x1U << 2) -/* Card Control Registers : Upload card interrupt mask */ -#define UP_LD_CARD_INT_MASK (0x1U << 1) -/* Card Control Registers : Download card interrupt mask */ -#define DN_LD_CARD_INT_MASK (0x1U << 0) - -/* Card Control Registers : Card interrupt status register */ -#define CARD_INTERRUPT_STATUS_REG 0x38 -/* Card Control Registers : Power up interrupt */ -#define POWER_UP_INT (0x1U << 4) -/* Card Control Registers : Power down interrupt */ -#define POWER_DOWN_INT (0x1U << 3) - -/* Card Control Registers : Card interrupt RSR register */ -#define CARD_INTERRUPT_RSR_REG 0x3c -/* Card Control Registers : Power up RSR */ -#define POWER_UP_RSR (0x1U << 4) -/* Card Control Registers : Power down RSR */ -#define POWER_DOWN_RSR (0x1U << 3) - -/* Card Control Registers : Miscellaneous Configuration Register */ -#define CARD_MISC_CFG_REG 0x6C - -/* Host F1 read base 0 */ -#define HOST_F1_RD_BASE_0 0x0040 -/* Host F1 read base 1 */ -#define HOST_F1_RD_BASE_1 0x0041 -/* Host F1 card ready */ -#define HOST_F1_CARD_RDY 0x0020 - -/* Firmware status 0 register */ -#define CARD_FW_STATUS0_REG 0x60 -/* Firmware status 1 register */ -#define CARD_FW_STATUS1_REG 0x61 -/* Rx length register */ -#define CARD_RX_LEN_REG 0x62 -/* Rx unit register */ -#define CARD_RX_UNIT_REG 0x63 - /* Max retry number of CMD53 write */ #define MAX_WRITE_IOMEM_RETRY 2 @@ -192,7 +133,8 @@ if (a->mpa_tx.start_port <= port) \ a->mpa_tx.ports |= (1<<(a->mpa_tx.pkt_cnt)); \ else \ - a->mpa_tx.ports |= (1<<(a->mpa_tx.pkt_cnt+1+(MAX_PORT - \ + a->mpa_tx.ports |= (1<<(a->mpa_tx.pkt_cnt+1+ \ + (a->max_ports - \ a->mp_end_port))); \ a->mpa_tx.pkt_cnt++; \ } while (0) @@ -201,12 +143,6 @@ #define MP_TX_AGGR_PKT_LIMIT_REACHED(a) \ (a->mpa_tx.pkt_cnt == a->mpa_tx.pkt_aggr_limit) -/* SDIO Tx aggregation port limit ? */ -#define MP_TX_AGGR_PORT_LIMIT_REACHED(a) ((a->curr_wr_port < \ - a->mpa_tx.start_port) && (((MAX_PORT - \ - a->mpa_tx.start_port) + a->curr_wr_port) >= \ - SDIO_MP_AGGR_DEF_PKT_LIMIT)) - /* Reset SDIO Tx aggregation buffer parameters */ #define MP_TX_AGGR_BUF_RESET(a) do { \ a->mpa_tx.pkt_cnt = 0; \ @@ -219,12 +155,6 @@ #define MP_RX_AGGR_PKT_LIMIT_REACHED(a) \ (a->mpa_rx.pkt_cnt == a->mpa_rx.pkt_aggr_limit) -/* SDIO Tx aggregation port limit ? */ -#define MP_RX_AGGR_PORT_LIMIT_REACHED(a) ((a->curr_rd_port < \ - a->mpa_rx.start_port) && (((MAX_PORT - \ - a->mpa_rx.start_port) + a->curr_rd_port) >= \ - SDIO_MP_AGGR_DEF_PKT_LIMIT)) - /* SDIO Rx aggregation in progress ? */ #define MP_RX_AGGR_IN_PROGRESS(a) (a->mpa_rx.pkt_cnt > 0) @@ -232,20 +162,6 @@ #define MP_RX_AGGR_BUF_HAS_ROOM(a, rx_len) \ ((a->mpa_rx.buf_len+rx_len) <= a->mpa_rx.buf_size) -/* Prepare to copy current packet from card to SDIO Rx aggregation buffer */ -#define MP_RX_AGGR_SETUP(a, skb, port) do { \ - a->mpa_rx.buf_len += skb->len; \ - if (!a->mpa_rx.pkt_cnt) \ - a->mpa_rx.start_port = port; \ - if (a->mpa_rx.start_port <= port) \ - a->mpa_rx.ports |= (1<<(a->mpa_rx.pkt_cnt)); \ - else \ - a->mpa_rx.ports |= (1<<(a->mpa_rx.pkt_cnt+1)); \ - a->mpa_rx.skb_arr[a->mpa_rx.pkt_cnt] = skb; \ - a->mpa_rx.len_arr[a->mpa_rx.pkt_cnt] = skb->len; \ - a->mpa_rx.pkt_cnt++; \ -} while (0) - /* Reset SDIO Rx aggregation buffer parameters */ #define MP_RX_AGGR_BUF_RESET(a) do { \ a->mpa_rx.pkt_cnt = 0; \ @@ -254,14 +170,13 @@ a->mpa_rx.start_port = 0; \ } while (0) - /* data structure for SDIO MPA TX */ struct mwifiex_sdio_mpa_tx { /* multiport tx aggregation buffer pointer */ u8 *buf; u32 buf_len; u32 pkt_cnt; - u16 ports; + u32 ports; u16 start_port; u8 enabled; u32 buf_size; @@ -272,11 +187,11 @@ struct mwifiex_sdio_mpa_rx { u8 *buf; u32 buf_len; u32 pkt_cnt; - u16 ports; + u32 ports; u16 start_port; - struct sk_buff *skb_arr[SDIO_MP_AGGR_DEF_PKT_LIMIT]; - u32 len_arr[SDIO_MP_AGGR_DEF_PKT_LIMIT]; + struct sk_buff **skb_arr; + u32 *len_arr; u8 enabled; u32 buf_size; @@ -286,15 +201,47 @@ struct mwifiex_sdio_mpa_rx { int mwifiex_bus_register(void); void mwifiex_bus_unregister(void); +struct mwifiex_sdio_card_reg { + u8 start_rd_port; + u8 start_wr_port; + u8 base_0_reg; + u8 base_1_reg; + u8 poll_reg; + u8 host_int_enable; + u8 status_reg_0; + u8 status_reg_1; + u8 sdio_int_mask; + u32 data_port_mask; + u8 max_mp_regs; + u8 rd_bitmap_l; + u8 rd_bitmap_u; + u8 rd_bitmap_1l; + u8 rd_bitmap_1u; + u8 wr_bitmap_l; + u8 wr_bitmap_u; + u8 wr_bitmap_1l; + u8 wr_bitmap_1u; + u8 rd_len_p0_l; + u8 rd_len_p0_u; + u8 card_misc_cfg_reg; +}; + struct sdio_mmc_card { struct sdio_func *func; struct mwifiex_adapter *adapter; - u16 mp_rd_bitmap; - u16 mp_wr_bitmap; + const char *firmware; + const struct mwifiex_sdio_card_reg *reg; + u8 max_ports; + u8 mp_agg_pkt_limit; + bool supports_sdio_new_mode; + bool has_control_mask; + + u32 mp_rd_bitmap; + u32 mp_wr_bitmap; u16 mp_end_port; - u16 mp_data_port_mask; + u32 mp_data_port_mask; u8 curr_rd_port; u8 curr_wr_port; @@ -305,6 +252,98 @@ struct sdio_mmc_card { struct mwifiex_sdio_mpa_rx mpa_rx; }; +struct mwifiex_sdio_device { + const char *firmware; + const struct mwifiex_sdio_card_reg *reg; + u8 max_ports; + u8 mp_agg_pkt_limit; + bool supports_sdio_new_mode; + bool has_control_mask; +}; + +static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = { + .start_rd_port = 1, + .start_wr_port = 1, + .base_0_reg = 0x0040, + .base_1_reg = 0x0041, + .poll_reg = 0x30, + .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK, + .status_reg_0 = 0x60, + .status_reg_1 = 0x61, + .sdio_int_mask = 0x3f, + .data_port_mask = 0x0000fffe, + .max_mp_regs = 64, + .rd_bitmap_l = 0x04, + .rd_bitmap_u = 0x05, + .wr_bitmap_l = 0x06, + .wr_bitmap_u = 0x07, + .rd_len_p0_l = 0x08, + .rd_len_p0_u = 0x09, + .card_misc_cfg_reg = 0x6c, +}; + +static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = { + .start_rd_port = 0, + .start_wr_port = 0, + .base_0_reg = 0x60, + .base_1_reg = 0x61, + .poll_reg = 0x50, + .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK | + CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK, + .status_reg_0 = 0xc0, + .status_reg_1 = 0xc1, + .sdio_int_mask = 0xff, + .data_port_mask = 0xffffffff, + .max_mp_regs = 184, + .rd_bitmap_l = 0x04, + .rd_bitmap_u = 0x05, + .rd_bitmap_1l = 0x06, + .rd_bitmap_1u = 0x07, + .wr_bitmap_l = 0x08, + .wr_bitmap_u = 0x09, + .wr_bitmap_1l = 0x0a, + .wr_bitmap_1u = 0x0b, + .rd_len_p0_l = 0x0c, + .rd_len_p0_u = 0x0d, + .card_misc_cfg_reg = 0xcc, +}; + +static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = { + .firmware = SD8786_DEFAULT_FW_NAME, + .reg = &mwifiex_reg_sd87xx, + .max_ports = 16, + .mp_agg_pkt_limit = 8, + .supports_sdio_new_mode = false, + .has_control_mask = true, +}; + +static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = { + .firmware = SD8787_DEFAULT_FW_NAME, + .reg = &mwifiex_reg_sd87xx, + .max_ports = 16, + .mp_agg_pkt_limit = 8, + .supports_sdio_new_mode = false, + .has_control_mask = true, +}; + +static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = { + .firmware = SD8797_DEFAULT_FW_NAME, + .reg = &mwifiex_reg_sd87xx, + .max_ports = 16, + .mp_agg_pkt_limit = 8, + .supports_sdio_new_mode = false, + .has_control_mask = true, +}; + +static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = { + .firmware = SD8897_DEFAULT_FW_NAME, + .reg = &mwifiex_reg_sd8897, + .max_ports = 32, + .mp_agg_pkt_limit = 16, + .supports_sdio_new_mode = true, + .has_control_mask = false, +}; + /* * .cmdrsp_complete handler */ @@ -325,4 +364,77 @@ static inline int mwifiex_sdio_event_complete(struct mwifiex_adapter *adapter, return 0; } +static inline bool +mp_rx_aggr_port_limit_reached(struct sdio_mmc_card *card) +{ + u8 tmp; + + if (card->curr_rd_port < card->mpa_rx.start_port) { + if (card->supports_sdio_new_mode) + tmp = card->mp_end_port >> 1; + else + tmp = card->mp_agg_pkt_limit; + + if (((card->max_ports - card->mpa_rx.start_port) + + card->curr_rd_port) >= tmp) + return true; + } + + if (!card->supports_sdio_new_mode) + return false; + + if ((card->curr_rd_port - card->mpa_rx.start_port) >= + (card->mp_end_port >> 1)) + return true; + + return false; +} + +static inline bool +mp_tx_aggr_port_limit_reached(struct sdio_mmc_card *card) +{ + u16 tmp; + + if (card->curr_wr_port < card->mpa_tx.start_port) { + if (card->supports_sdio_new_mode) + tmp = card->mp_end_port >> 1; + else + tmp = card->mp_agg_pkt_limit; + + if (((card->max_ports - card->mpa_tx.start_port) + + card->curr_wr_port) >= tmp) + return true; + } + + if (!card->supports_sdio_new_mode) + return false; + + if ((card->curr_wr_port - card->mpa_tx.start_port) >= + (card->mp_end_port >> 1)) + return true; + + return false; +} + +/* Prepare to copy current packet from card to SDIO Rx aggregation buffer */ +static inline void mp_rx_aggr_setup(struct sdio_mmc_card *card, + struct sk_buff *skb, u8 port) +{ + card->mpa_rx.buf_len += skb->len; + + if (!card->mpa_rx.pkt_cnt) + card->mpa_rx.start_port = port; + + if (card->supports_sdio_new_mode) { + card->mpa_rx.ports |= (1 << port); + } else { + if (card->mpa_rx.start_port <= port) + card->mpa_rx.ports |= 1 << (card->mpa_rx.pkt_cnt); + else + card->mpa_rx.ports |= 1 << (card->mpa_rx.pkt_cnt + 1); + } + card->mpa_rx.skb_arr[card->mpa_rx.pkt_cnt] = skb; + card->mpa_rx.len_arr[card->mpa_rx.pkt_cnt] = skb->len; + card->mpa_rx.pkt_cnt++; +} #endif /* _MWIFIEX_SDIO_H */ diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c index b193e25977d2..8ece48580642 100644 --- a/drivers/net/wireless/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/mwifiex/sta_cmd.c @@ -1134,6 +1134,55 @@ mwifiex_cmd_mef_cfg(struct mwifiex_private *priv, return 0; } +/* This function parse cal data from ASCII to hex */ +static u32 mwifiex_parse_cal_cfg(u8 *src, size_t len, u8 *dst) +{ + u8 *s = src, *d = dst; + + while (s - src < len) { + if (*s && (isspace(*s) || *s == '\t')) { + s++; + continue; + } + if (isxdigit(*s)) { + *d++ = simple_strtol(s, NULL, 16); + s += 2; + } else { + s++; + } + } + + return d - dst; +} + +/* This function prepares command of set_cfg_data. */ +static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv, + struct host_cmd_ds_command *cmd, + u16 cmd_action) +{ + struct host_cmd_ds_802_11_cfg_data *cfg_data = &cmd->params.cfg_data; + struct mwifiex_adapter *adapter = priv->adapter; + u32 len, cal_data_offset; + u8 *tmp_cmd = (u8 *)cmd; + + cal_data_offset = S_DS_GEN + sizeof(*cfg_data); + if ((adapter->cal_data->data) && (adapter->cal_data->size > 0)) + len = mwifiex_parse_cal_cfg((u8 *)adapter->cal_data->data, + adapter->cal_data->size, + (u8 *)(tmp_cmd + cal_data_offset)); + else + return -1; + + cfg_data->action = cpu_to_le16(cmd_action); + cfg_data->type = cpu_to_le16(CFG_DATA_TYPE_CAL); + cfg_data->data_len = cpu_to_le16(len); + + cmd->command = cpu_to_le16(HostCmd_CMD_CFG_DATA); + cmd->size = cpu_to_le16(S_DS_GEN + sizeof(*cfg_data) + len); + + return 0; +} + /* * This function prepares the commands before sending them to the firmware. * @@ -1152,6 +1201,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, case HostCmd_CMD_GET_HW_SPEC: ret = mwifiex_cmd_get_hw_spec(priv, cmd_ptr); break; + case HostCmd_CMD_CFG_DATA: + ret = mwifiex_cmd_cfg_data(priv, cmd_ptr, cmd_action); + break; case HostCmd_CMD_MAC_CONTROL: ret = mwifiex_cmd_mac_control(priv, cmd_ptr, cmd_action, data_buf); @@ -1384,6 +1436,7 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, */ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta) { + struct mwifiex_adapter *adapter = priv->adapter; int ret; u16 enable = true; struct mwifiex_ds_11n_amsdu_aggr_ctrl amsdu_aggr_ctrl; @@ -1404,6 +1457,15 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta) HostCmd_ACT_GEN_SET, 0, NULL); if (ret) return -1; + + /* Download calibration data to firmware */ + if (adapter->cal_data) { + ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_CFG_DATA, + HostCmd_ACT_GEN_SET, 0, NULL); + if (ret) + return -1; + } + /* Read MAC address from HW */ ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_GET_HW_SPEC, HostCmd_ACT_GEN_GET, 0, NULL); diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c index 9f990e14966e..d85df158cc6c 100644 --- a/drivers/net/wireless/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c @@ -818,6 +818,18 @@ static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv, return 0; } +/* This function handles the command response of set_cfg_data */ +static int mwifiex_ret_cfg_data(struct mwifiex_private *priv, + struct host_cmd_ds_command *resp) +{ + if (resp->result != HostCmd_RESULT_OK) { + dev_err(priv->adapter->dev, "Cal data cmd resp failed\n"); + return -1; + } + + return 0; +} + /* * This function handles the command responses. * @@ -841,6 +853,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, case HostCmd_CMD_GET_HW_SPEC: ret = mwifiex_ret_get_hw_spec(priv, resp); break; + case HostCmd_CMD_CFG_DATA: + ret = mwifiex_ret_cfg_data(priv, resp); + break; case HostCmd_CMD_MAC_CONTROL: break; case HostCmd_CMD_802_11_MAC_ADDRESS: @@ -978,6 +993,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, case HostCmd_CMD_UAP_BSS_STOP: priv->bss_started = 0; break; + case HostCmd_CMD_UAP_STA_DEAUTH: + break; case HostCmd_CMD_MEF_CFG: break; default: diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index 1a8a19dbd635..23aa910bc5d0 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -104,16 +104,14 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, } else { priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE; - if (mcast_list->num_multicast_addr) { - dev_dbg(priv->adapter->dev, - "info: Set multicast list=%d\n", - mcast_list->num_multicast_addr); - /* Send multicast addresses to firmware */ - ret = mwifiex_send_cmd_async(priv, - HostCmd_CMD_MAC_MULTICAST_ADR, - HostCmd_ACT_GEN_SET, 0, - mcast_list); - } + dev_dbg(priv->adapter->dev, + "info: Set multicast list=%d\n", + mcast_list->num_multicast_addr); + /* Send multicast addresses to firmware */ + ret = mwifiex_send_cmd_async(priv, + HostCmd_CMD_MAC_MULTICAST_ADR, + HostCmd_ACT_GEN_SET, 0, + mcast_list); } } dev_dbg(priv->adapter->dev, diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c index b04b1db29100..2de882dead0f 100644 --- a/drivers/net/wireless/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/mwifiex/uap_cmd.c @@ -689,6 +689,23 @@ mwifiex_cmd_uap_sys_config(struct host_cmd_ds_command *cmd, u16 cmd_action, return 0; } +/* This function prepares AP specific deauth command with mac supplied in + * function parameter. + */ +static int mwifiex_cmd_uap_sta_deauth(struct mwifiex_private *priv, + struct host_cmd_ds_command *cmd, u8 *mac) +{ + struct host_cmd_ds_sta_deauth *sta_deauth = &cmd->params.sta_deauth; + + cmd->command = cpu_to_le16(HostCmd_CMD_UAP_STA_DEAUTH); + memcpy(sta_deauth->mac, mac, ETH_ALEN); + sta_deauth->reason = cpu_to_le16(WLAN_REASON_DEAUTH_LEAVING); + + cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_sta_deauth) + + S_DS_GEN); + return 0; +} + /* This function prepares the AP specific commands before sending them * to the firmware. * This is a generic function which calls specific command preparation @@ -710,6 +727,10 @@ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no, cmd->command = cpu_to_le16(cmd_no); cmd->size = cpu_to_le16(S_DS_GEN); break; + case HostCmd_CMD_UAP_STA_DEAUTH: + if (mwifiex_cmd_uap_sta_deauth(priv, cmd, data_buf)) + return -1; + break; default: dev_err(priv->adapter->dev, "PREP_CMD: unknown cmd %#x\n", cmd_no); diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c index 21c640d3b579..718066577c6c 100644 --- a/drivers/net/wireless/mwifiex/uap_event.c +++ b/drivers/net/wireless/mwifiex/uap_event.c @@ -107,18 +107,15 @@ mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies, */ static void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac) { - struct mwifiex_sta_node *node, *tmp; + struct mwifiex_sta_node *node; unsigned long flags; spin_lock_irqsave(&priv->sta_list_spinlock, flags); node = mwifiex_get_sta_entry(priv, mac); if (node) { - list_for_each_entry_safe(node, tmp, &priv->sta_list, - list) { - list_del(&node->list); - kfree(node); - } + list_del(&node->list); + kfree(node); } spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); @@ -295,3 +292,19 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) return 0; } + +/* This function deletes station entry from associated station list. + * Also if both AP and STA are 11n enabled, RxReorder tables and TxBA stream + * tables created for this station are deleted. + */ +void mwifiex_uap_del_sta_data(struct mwifiex_private *priv, + struct mwifiex_sta_node *node) +{ + if (priv->ap_11n_enabled && node->is_11n_enabled) { + mwifiex_11n_del_rx_reorder_tbl_by_ta(priv, node->mac_addr); + mwifiex_del_tx_ba_stream_tbl_by_ra(priv, node->mac_addr); + } + mwifiex_del_sta_entry(priv, node->mac_addr); + + return; +} diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index 4be3d33ceae8..944e8846f6fc 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c @@ -37,6 +37,9 @@ /* Offset for TOS field in the IP header */ #define IPTOS_OFFSET 5 +static bool enable_tx_amsdu; +module_param(enable_tx_amsdu, bool, 0644); + /* WMM information IE */ static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07, 0x00, 0x50, 0xf2, 0x02, @@ -1233,7 +1236,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter) mwifiex_send_delba(priv, tid_del, ra, 1); } } - if (mwifiex_is_amsdu_allowed(priv, tid) && + if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) && mwifiex_is_11n_aggragation_possible(priv, ptr, adapter->tx_buf_size)) mwifiex_11n_aggregate_pkt(priv, ptr, INTF_HEADER_LEN, diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index 6820fce4016b..a3707fd4ef62 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c @@ -1548,7 +1548,7 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw) if (!priv->pending_tx_pkts) return 0; - retry = 0; + retry = 1; rc = 0; spin_lock_bh(&priv->tx_lock); @@ -1572,13 +1572,19 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw) spin_lock_bh(&priv->tx_lock); - if (timeout) { + if (timeout || !priv->pending_tx_pkts) { WARN_ON(priv->pending_tx_pkts); if (retry) wiphy_notice(hw->wiphy, "tx rings drained\n"); break; } + if (retry) { + mwl8k_tx_start(priv); + retry = 0; + continue; + } + if (priv->pending_tx_pkts < oldcount) { wiphy_notice(hw->wiphy, "waiting for tx rings to drain (%d -> %d pkts)\n", @@ -2055,6 +2061,7 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, mwl8k_remove_stream(hw, stream); spin_unlock(&priv->stream_lock); } + mwl8k_tx_start(priv); spin_unlock_bh(&priv->tx_lock); pci_unmap_single(priv->pdev, dma, skb->len, PCI_DMA_TODEVICE); diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c index 1f9cb55c3360..bdfe637953f4 100644 --- a/drivers/net/wireless/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/orinoco/orinoco_usb.c @@ -881,7 +881,8 @@ static int ezusb_access_ltv(struct ezusb_priv *upriv, if (!upriv->udev) { dbg("Device disconnected"); - return -ENODEV; + retval = -ENODEV; + goto exit; } if (upriv->read_urb->status != -EINPROGRESS) diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c index 978e7eb26567..7fc46f26cf2b 100644 --- a/drivers/net/wireless/p54/p54spi.c +++ b/drivers/net/wireless/p54/p54spi.c @@ -42,8 +42,7 @@ MODULE_FIRMWARE("3826.arm"); -/* - * gpios should be handled in board files and provided via platform data, +/* gpios should be handled in board files and provided via platform data, * but because it's currently impossible for p54spi to have a header file * in include/linux, let's use module paramaters for now */ @@ -191,8 +190,7 @@ static int p54spi_request_eeprom(struct ieee80211_hw *dev) const struct firmware *eeprom; int ret; - /* - * allow users to customize their eeprom. + /* allow users to customize their eeprom. */ ret = request_firmware(&eeprom, "3826.eeprom", &priv->spi->dev); @@ -285,8 +283,7 @@ static void p54spi_power_on(struct p54s_priv *priv) gpio_set_value(p54spi_gpio_power, 1); enable_irq(gpio_to_irq(p54spi_gpio_irq)); - /* - * need to wait a while before device can be accessed, the length + /* need to wait a while before device can be accessed, the length * is just a guess */ msleep(10); @@ -365,7 +362,8 @@ static int p54spi_rx(struct p54s_priv *priv) /* Firmware may insert up to 4 padding bytes after the lmac header, * but it does not amend the size of SPI data transfer. * Such packets has correct data size in header, thus referencing - * past the end of allocated skb. Reserve extra 4 bytes for this case */ + * past the end of allocated skb. Reserve extra 4 bytes for this case + */ skb = dev_alloc_skb(len + 4); if (!skb) { p54spi_sleep(priv); @@ -383,7 +381,8 @@ static int p54spi_rx(struct p54s_priv *priv) } p54spi_sleep(priv); /* Put additional bytes to compensate for the possible - * alignment-caused truncation */ + * alignment-caused truncation + */ skb_put(skb, 4); if (p54_rx(priv->hw, skb) == 0) @@ -713,27 +712,7 @@ static struct spi_driver p54spi_driver = { .remove = p54spi_remove, }; -static int __init p54spi_init(void) -{ - int ret; - - ret = spi_register_driver(&p54spi_driver); - if (ret < 0) { - printk(KERN_ERR "failed to register SPI driver: %d", ret); - goto out; - } - -out: - return ret; -} - -static void __exit p54spi_exit(void) -{ - spi_unregister_driver(&p54spi_driver); -} - -module_init(p54spi_init); -module_exit(p54spi_exit); +module_spi_driver(p54spi_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>"); diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c index f7143733d7e9..3d53a09da5a1 100644 --- a/drivers/net/wireless/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/rt2x00/rt2400pci.c @@ -1767,33 +1767,45 @@ static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = { .config = rt2400pci_config, }; -static const struct data_queue_desc rt2400pci_queue_rx = { - .entry_num = 24, - .data_size = DATA_FRAME_SIZE, - .desc_size = RXD_DESC_SIZE, - .priv_size = sizeof(struct queue_entry_priv_mmio), -}; +static void rt2400pci_queue_init(struct data_queue *queue) +{ + switch (queue->qid) { + case QID_RX: + queue->limit = 24; + queue->data_size = DATA_FRAME_SIZE; + queue->desc_size = RXD_DESC_SIZE; + queue->priv_size = sizeof(struct queue_entry_priv_mmio); + break; -static const struct data_queue_desc rt2400pci_queue_tx = { - .entry_num = 24, - .data_size = DATA_FRAME_SIZE, - .desc_size = TXD_DESC_SIZE, - .priv_size = sizeof(struct queue_entry_priv_mmio), -}; + case QID_AC_VO: + case QID_AC_VI: + case QID_AC_BE: + case QID_AC_BK: + queue->limit = 24; + queue->data_size = DATA_FRAME_SIZE; + queue->desc_size = TXD_DESC_SIZE; + queue->priv_size = sizeof(struct queue_entry_priv_mmio); + break; -static const struct data_queue_desc rt2400pci_queue_bcn = { - .entry_num = 1, - .data_size = MGMT_FRAME_SIZE, - .desc_size = TXD_DESC_SIZE, - .priv_size = sizeof(struct queue_entry_priv_mmio), -}; + case QID_BEACON: + queue->limit = 1; + queue->data_size = MGMT_FRAME_SIZE; + queue->desc_size = TXD_DESC_SIZE; + queue->priv_size = sizeof(struct queue_entry_priv_mmio); + break; -static const struct data_queue_desc rt2400pci_queue_atim = { - .entry_num = 8, - .data_size = DATA_FRAME_SIZE, - .desc_size = TXD_DESC_SIZE, - .priv_size = sizeof(struct queue_entry_priv_mmio), -}; + case QID_ATIM: + queue->limit = 8; + queue->data_size = DATA_FRAME_SIZE; + queue->desc_size = TXD_DESC_SIZE; + queue->priv_size = sizeof(struct queue_entry_priv_mmio); + break; + + default: + BUG(); + break; + } +} static const struct rt2x00_ops rt2400pci_ops = { .name = KBUILD_MODNAME, @@ -1801,11 +1813,7 @@ static const struct rt2x00_ops rt2400pci_ops = { .eeprom_size = EEPROM_SIZE, .rf_size = RF_SIZE, .tx_queues = NUM_TX_QUEUES, - .extra_tx_headroom = 0, - .rx = &rt2400pci_queue_rx, - .tx = &rt2400pci_queue_tx, - .bcn = &rt2400pci_queue_bcn, - .atim = &rt2400pci_queue_atim, + .queue_init = rt2400pci_queue_init, .lib = &rt2400pci_rt2x00_ops, .hw = &rt2400pci_mac80211_ops, #ifdef CONFIG_RT2X00_LIB_DEBUGFS diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c index 77e45b223d15..0ac5c589ddce 100644 --- a/drivers/net/wireless/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/rt2x00/rt2500pci.c @@ -2056,33 +2056,45 @@ static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = { .config = rt2500pci_config, }; -static const struct data_queue_desc rt2500pci_queue_rx = { - .entry_num = 32, - .data_size = DATA_FRAME_SIZE, - .desc_size = RXD_DESC_SIZE, - .priv_size = sizeof(struct queue_entry_priv_mmio), -}; +static void rt2500pci_queue_init(struct data_queue *queue) +{ + switch (queue->qid) { + case QID_RX: + queue->limit = 32; + queue->data_size = DATA_FRAME_SIZE; + queue->desc_size = RXD_DESC_SIZE; + queue->priv_size = sizeof(struct queue_entry_priv_mmio); + break; -static const struct data_queue_desc rt2500pci_queue_tx = { - .entry_num = 32, - .data_size = DATA_FRAME_SIZE, - .desc_size = TXD_DESC_SIZE, - .priv_size = sizeof(struct queue_entry_priv_mmio), -}; + case QID_AC_VO: + case QID_AC_VI: + case QID_AC_BE: + case QID_AC_BK: + queue->limit = 32; + queue->data_size = DATA_FRAME_SIZE; + queue->desc_size = TXD_DESC_SIZE; + queue->priv_size = sizeof(struct queue_entry_priv_mmio); + break; -static const struct data_queue_desc rt2500pci_queue_bcn = { - .entry_num = 1, - .data_size = MGMT_FRAME_SIZE, - .desc_size = TXD_DESC_SIZE, - .priv_size = sizeof(struct queue_entry_priv_mmio), -}; + case QID_BEACON: + queue->limit = 1; + queue->data_size = MGMT_FRAME_SIZE; + queue->desc_size = TXD_DESC_SIZE; + queue->priv_size = sizeof(struct queue_entry_priv_mmio); + break; -static const struct data_queue_desc rt2500pci_queue_atim = { - .entry_num = 8, - .data_size = DATA_FRAME_SIZE, - .desc_size = TXD_DESC_SIZE, - .priv_size = sizeof(struct queue_entry_priv_mmio), -}; + case QID_ATIM: + queue->limit = 8; + queue->data_size = DATA_FRAME_SIZE; + queue->desc_size = TXD_DESC_SIZE; + queue->priv_size = sizeof(struct queue_entry_priv_mmio); + break; + + default: + BUG(); + break; + } +} static const struct rt2x00_ops rt2500pci_ops = { .name = KBUILD_MODNAME, @@ -2090,11 +2102,7 @@ static const struct rt2x00_ops rt2500pci_ops = { .eeprom_size = EEPROM_SIZE, .rf_size = RF_SIZE, .tx_queues = NUM_TX_QUEUES, - .extra_tx_headroom = 0, - .rx = &rt2500pci_queue_rx, - .tx = &rt2500pci_queue_tx, - .bcn = &rt2500pci_queue_bcn, - .atim = &rt2500pci_queue_atim, + .queue_init = rt2500pci_queue_init, .lib = &rt2500pci_rt2x00_ops, .hw = &rt2500pci_mac80211_ops, #ifdef CONFIG_RT2X00_LIB_DEBUGFS diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c index a7f7b365eff4..85acc79f68b8 100644 --- a/drivers/net/wireless/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/rt2x00/rt2500usb.c @@ -1867,33 +1867,45 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = { .config = rt2500usb_config, }; -static const struct data_queue_desc rt2500usb_queue_rx = { - .entry_num = 32, - .data_size = DATA_FRAME_SIZE, - .desc_size = RXD_DESC_SIZE, - .priv_size = sizeof(struct queue_entry_priv_usb), -}; +static void rt2500usb_queue_init(struct data_queue *queue) +{ + switch (queue->qid) { + case QID_RX: + queue->limit = 32; + queue->data_size = DATA_FRAME_SIZE; + queue->desc_size = RXD_DESC_SIZE; + queue->priv_size = sizeof(struct queue_entry_priv_usb); + break; -static const struct data_queue_desc rt2500usb_queue_tx = { - .entry_num = 32, - .data_size = DATA_FRAME_SIZE, - .desc_size = TXD_DESC_SIZE, - .priv_size = sizeof(struct queue_entry_priv_usb), -}; + case QID_AC_VO: + case QID_AC_VI: + case QID_AC_BE: + case QID_AC_BK: + queue->limit = 32; + queue->data_size = DATA_FRAME_SIZE; + queue->desc_size = TXD_DESC_SIZE; + queue->priv_size = sizeof(struct queue_entry_priv_usb); + break; -static const struct data_queue_desc rt2500usb_queue_bcn = { - .entry_num = 1, - .data_size = MGMT_FRAME_SIZE, - .desc_size = TXD_DESC_SIZE, - .priv_size = sizeof(struct queue_entry_priv_usb_bcn), -}; + case QID_BEACON: + queue->limit = 1; + queue->data_size = MGMT_FRAME_SIZE; + queue->desc_size = TXD_DESC_SIZE; + queue->priv_size = sizeof(struct queue_entry_priv_usb_bcn); + break; -static const struct data_queue_desc rt2500usb_queue_atim = { - .entry_num = 8, - .data_size = DATA_FRAME_SIZE, - .desc_size = TXD_DESC_SIZE, - .priv_size = sizeof(struct queue_entry_priv_usb), -}; + case QID_ATIM: + queue->limit = 8; + queue->data_size = DATA_FRAME_SIZE; + queue->desc_size = TXD_DESC_SIZE; + queue->priv_size = sizeof(struct queue_entry_priv_usb); + break; + + default: + BUG(); + break; + } +} static const struct rt2x00_ops rt2500usb_ops = { .name = KBUILD_MODNAME, @@ -1901,11 +1913,7 @@ static const struct rt2x00_ops rt2500usb_ops = { .eeprom_size = EEPROM_SIZE, .rf_size = RF_SIZE, .tx_queues = NUM_TX_QUEUES, - .extra_tx_headroom = TXD_DESC_SIZE, - .rx = &rt2500usb_queue_rx, - .tx = &rt2500usb_queue_tx, - .bcn = &rt2500usb_queue_bcn, - .atim = &rt2500usb_queue_atim, + .queue_init = rt2500usb_queue_init, .lib = &rt2500usb_rt2x00_ops, .hw = &rt2500usb_mac80211_ops, #ifdef CONFIG_RT2X00_LIB_DEBUGFS diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 72f32e5caa4d..3aa30ddcbfea 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -840,7 +840,7 @@ static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev, unsigned int beacon_base) { int i; - const int txwi_desc_size = rt2x00dev->ops->bcn->winfo_size; + const int txwi_desc_size = rt2x00dev->bcn->winfo_size; /* * For the Beacon base registers we only need to clear @@ -3960,379 +3960,577 @@ static void rt2800_init_bbp_early(struct rt2x00_dev *rt2x00dev) rt2800_bbp_write(rt2x00dev, 106, 0x35); } -static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev) +static void rt2800_disable_unused_dac_adc(struct rt2x00_dev *rt2x00dev) { - int ant, div_mode; u16 eeprom; u8 value; - rt2800_init_bbp_early(rt2x00dev); + rt2800_bbp_read(rt2x00dev, 138, &value); + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); + if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1) + value |= 0x20; + if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1) + value &= ~0x02; + rt2800_bbp_write(rt2x00dev, 138, value); +} - rt2800_bbp_read(rt2x00dev, 105, &value); - rt2x00_set_field8(&value, BBP105_MLD, - rt2x00dev->default_ant.rx_chain_num == 2); - rt2800_bbp_write(rt2x00dev, 105, value); +static void rt2800_init_bbp_305x_soc(struct rt2x00_dev *rt2x00dev) +{ + rt2800_bbp_write(rt2x00dev, 31, 0x08); + + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38); + + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x10); + + rt2800_bbp_write(rt2x00dev, 70, 0x0a); + + rt2800_bbp_write(rt2x00dev, 78, 0x0e); + rt2800_bbp_write(rt2x00dev, 80, 0x08); + + rt2800_bbp_write(rt2x00dev, 82, 0x62); + + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + + rt2800_bbp_write(rt2x00dev, 84, 0x99); + + rt2800_bbp_write(rt2x00dev, 86, 0x00); + + rt2800_bbp_write(rt2x00dev, 91, 0x04); + + rt2800_bbp_write(rt2x00dev, 92, 0x00); + + rt2800_bbp_write(rt2x00dev, 103, 0xc0); + + rt2800_bbp_write(rt2x00dev, 105, 0x01); + + rt2800_bbp_write(rt2x00dev, 106, 0x35); +} + +static void rt2800_init_bbp_28xx(struct rt2x00_dev *rt2x00dev) +{ + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38); + + if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) { + rt2800_bbp_write(rt2x00dev, 69, 0x16); + rt2800_bbp_write(rt2x00dev, 73, 0x12); + } else { + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x10); + } + + rt2800_bbp_write(rt2x00dev, 70, 0x0a); + + rt2800_bbp_write(rt2x00dev, 81, 0x37); + + rt2800_bbp_write(rt2x00dev, 82, 0x62); + + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + + if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D)) + rt2800_bbp_write(rt2x00dev, 84, 0x19); + else + rt2800_bbp_write(rt2x00dev, 84, 0x99); + + rt2800_bbp_write(rt2x00dev, 86, 0x00); + + rt2800_bbp_write(rt2x00dev, 91, 0x04); + + rt2800_bbp_write(rt2x00dev, 92, 0x00); + + rt2800_bbp_write(rt2x00dev, 103, 0x00); + + rt2800_bbp_write(rt2x00dev, 105, 0x05); + + rt2800_bbp_write(rt2x00dev, 106, 0x35); +} + +static void rt2800_init_bbp_30xx(struct rt2x00_dev *rt2x00dev) +{ + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38); + + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x10); + + rt2800_bbp_write(rt2x00dev, 70, 0x0a); + + rt2800_bbp_write(rt2x00dev, 79, 0x13); + rt2800_bbp_write(rt2x00dev, 80, 0x05); + rt2800_bbp_write(rt2x00dev, 81, 0x33); + + rt2800_bbp_write(rt2x00dev, 82, 0x62); + + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + + rt2800_bbp_write(rt2x00dev, 84, 0x99); + + rt2800_bbp_write(rt2x00dev, 86, 0x00); + + rt2800_bbp_write(rt2x00dev, 91, 0x04); + + rt2800_bbp_write(rt2x00dev, 92, 0x00); + + if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) || + rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) || + rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E)) + rt2800_bbp_write(rt2x00dev, 103, 0xc0); + else + rt2800_bbp_write(rt2x00dev, 103, 0x00); + + rt2800_bbp_write(rt2x00dev, 105, 0x05); + + rt2800_bbp_write(rt2x00dev, 106, 0x35); + + if (rt2x00_rt(rt2x00dev, RT3071) || + rt2x00_rt(rt2x00dev, RT3090)) + rt2800_disable_unused_dac_adc(rt2x00dev); +} + +static void rt2800_init_bbp_3290(struct rt2x00_dev *rt2x00dev) +{ + u8 value; rt2800_bbp4_mac_if_ctrl(rt2x00dev); - rt2800_bbp_write(rt2x00dev, 20, 0x06); rt2800_bbp_write(rt2x00dev, 31, 0x08); - rt2800_bbp_write(rt2x00dev, 65, 0x2C); - rt2800_bbp_write(rt2x00dev, 68, 0xDD); - rt2800_bbp_write(rt2x00dev, 69, 0x1A); - rt2800_bbp_write(rt2x00dev, 70, 0x05); + + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38); + + rt2800_bbp_write(rt2x00dev, 68, 0x0b); + + rt2800_bbp_write(rt2x00dev, 69, 0x12); rt2800_bbp_write(rt2x00dev, 73, 0x13); - rt2800_bbp_write(rt2x00dev, 74, 0x0F); - rt2800_bbp_write(rt2x00dev, 75, 0x4F); + rt2800_bbp_write(rt2x00dev, 75, 0x46); rt2800_bbp_write(rt2x00dev, 76, 0x28); + + rt2800_bbp_write(rt2x00dev, 77, 0x58); + + rt2800_bbp_write(rt2x00dev, 70, 0x0a); + + rt2800_bbp_write(rt2x00dev, 74, 0x0b); + rt2800_bbp_write(rt2x00dev, 79, 0x18); + rt2800_bbp_write(rt2x00dev, 80, 0x09); + rt2800_bbp_write(rt2x00dev, 81, 0x33); + + rt2800_bbp_write(rt2x00dev, 82, 0x62); + + rt2800_bbp_write(rt2x00dev, 83, 0x7a); + + rt2800_bbp_write(rt2x00dev, 84, 0x9a); + + rt2800_bbp_write(rt2x00dev, 86, 0x38); + + rt2800_bbp_write(rt2x00dev, 91, 0x04); + + rt2800_bbp_write(rt2x00dev, 92, 0x02); + + rt2800_bbp_write(rt2x00dev, 103, 0xc0); + + rt2800_bbp_write(rt2x00dev, 104, 0x92); + + rt2800_bbp_write(rt2x00dev, 105, 0x1c); + + rt2800_bbp_write(rt2x00dev, 106, 0x03); + + rt2800_bbp_write(rt2x00dev, 128, 0x12); + + rt2800_bbp_write(rt2x00dev, 67, 0x24); + rt2800_bbp_write(rt2x00dev, 143, 0x04); + rt2800_bbp_write(rt2x00dev, 142, 0x99); + rt2800_bbp_write(rt2x00dev, 150, 0x30); + rt2800_bbp_write(rt2x00dev, 151, 0x2e); + rt2800_bbp_write(rt2x00dev, 152, 0x20); + rt2800_bbp_write(rt2x00dev, 153, 0x34); + rt2800_bbp_write(rt2x00dev, 154, 0x40); + rt2800_bbp_write(rt2x00dev, 155, 0x3b); + rt2800_bbp_write(rt2x00dev, 253, 0x04); + + rt2800_bbp_read(rt2x00dev, 47, &value); + rt2x00_set_field8(&value, BBP47_TSSI_ADC6, 1); + rt2800_bbp_write(rt2x00dev, 47, value); + + /* Use 5-bit ADC for Acquisition and 8-bit ADC for data */ + rt2800_bbp_read(rt2x00dev, 3, &value); + rt2x00_set_field8(&value, BBP3_ADC_MODE_SWITCH, 1); + rt2x00_set_field8(&value, BBP3_ADC_INIT_MODE, 1); + rt2800_bbp_write(rt2x00dev, 3, value); +} + +static void rt2800_init_bbp_3352(struct rt2x00_dev *rt2x00dev) +{ + rt2800_bbp_write(rt2x00dev, 3, 0x00); + rt2800_bbp_write(rt2x00dev, 4, 0x50); + + rt2800_bbp_write(rt2x00dev, 31, 0x08); + + rt2800_bbp_write(rt2x00dev, 47, 0x48); + + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38); + + rt2800_bbp_write(rt2x00dev, 68, 0x0b); + + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x13); + rt2800_bbp_write(rt2x00dev, 75, 0x46); + rt2800_bbp_write(rt2x00dev, 76, 0x28); + rt2800_bbp_write(rt2x00dev, 77, 0x59); - rt2800_bbp_write(rt2x00dev, 84, 0x9A); + + rt2800_bbp_write(rt2x00dev, 70, 0x0a); + + rt2800_bbp_write(rt2x00dev, 78, 0x0e); + rt2800_bbp_write(rt2x00dev, 80, 0x08); + rt2800_bbp_write(rt2x00dev, 81, 0x37); + + rt2800_bbp_write(rt2x00dev, 82, 0x62); + + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + + rt2800_bbp_write(rt2x00dev, 84, 0x99); + rt2800_bbp_write(rt2x00dev, 86, 0x38); + rt2800_bbp_write(rt2x00dev, 88, 0x90); + rt2800_bbp_write(rt2x00dev, 91, 0x04); + rt2800_bbp_write(rt2x00dev, 92, 0x02); - rt2800_bbp_write(rt2x00dev, 95, 0x9a); - rt2800_bbp_write(rt2x00dev, 98, 0x12); - rt2800_bbp_write(rt2x00dev, 103, 0xC0); + + rt2800_bbp_write(rt2x00dev, 103, 0xc0); + rt2800_bbp_write(rt2x00dev, 104, 0x92); - /* FIXME BBP105 owerwrite */ - rt2800_bbp_write(rt2x00dev, 105, 0x3C); - rt2800_bbp_write(rt2x00dev, 106, 0x35); - rt2800_bbp_write(rt2x00dev, 128, 0x12); - rt2800_bbp_write(rt2x00dev, 134, 0xD0); - rt2800_bbp_write(rt2x00dev, 135, 0xF6); - rt2800_bbp_write(rt2x00dev, 137, 0x0F); - /* Initialize GLRT (Generalized Likehood Radio Test) */ - rt2800_init_bbp_5592_glrt(rt2x00dev); + rt2800_bbp_write(rt2x00dev, 105, 0x34); + + rt2800_bbp_write(rt2x00dev, 106, 0x05); + + rt2800_bbp_write(rt2x00dev, 120, 0x50); + + rt2800_bbp_write(rt2x00dev, 137, 0x0f); + + rt2800_bbp_write(rt2x00dev, 163, 0xbd); + /* Set ITxBF timeout to 0x9c40=1000msec */ + rt2800_bbp_write(rt2x00dev, 179, 0x02); + rt2800_bbp_write(rt2x00dev, 180, 0x00); + rt2800_bbp_write(rt2x00dev, 182, 0x40); + rt2800_bbp_write(rt2x00dev, 180, 0x01); + rt2800_bbp_write(rt2x00dev, 182, 0x9c); + rt2800_bbp_write(rt2x00dev, 179, 0x00); + /* Reprogram the inband interface to put right values in RXWI */ + rt2800_bbp_write(rt2x00dev, 142, 0x04); + rt2800_bbp_write(rt2x00dev, 143, 0x3b); + rt2800_bbp_write(rt2x00dev, 142, 0x06); + rt2800_bbp_write(rt2x00dev, 143, 0xa0); + rt2800_bbp_write(rt2x00dev, 142, 0x07); + rt2800_bbp_write(rt2x00dev, 143, 0xa1); + rt2800_bbp_write(rt2x00dev, 142, 0x08); + rt2800_bbp_write(rt2x00dev, 143, 0xa2); + + rt2800_bbp_write(rt2x00dev, 148, 0xc8); +} - rt2800_bbp4_mac_if_ctrl(rt2x00dev); +static void rt2800_init_bbp_3390(struct rt2x00_dev *rt2x00dev) +{ + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38); - rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); - div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY); - ant = (div_mode == 3) ? 1 : 0; - rt2800_bbp_read(rt2x00dev, 152, &value); - if (ant == 0) { - /* Main antenna */ - rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1); - } else { - /* Auxiliary antenna */ - rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0); - } - rt2800_bbp_write(rt2x00dev, 152, value); + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x10); - if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) { - rt2800_bbp_read(rt2x00dev, 254, &value); - rt2x00_set_field8(&value, BBP254_BIT7, 1); - rt2800_bbp_write(rt2x00dev, 254, value); - } + rt2800_bbp_write(rt2x00dev, 70, 0x0a); - rt2800_init_freq_calibration(rt2x00dev); + rt2800_bbp_write(rt2x00dev, 79, 0x13); + rt2800_bbp_write(rt2x00dev, 80, 0x05); + rt2800_bbp_write(rt2x00dev, 81, 0x33); - rt2800_bbp_write(rt2x00dev, 84, 0x19); - if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) + rt2800_bbp_write(rt2x00dev, 82, 0x62); + + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + + rt2800_bbp_write(rt2x00dev, 84, 0x99); + + rt2800_bbp_write(rt2x00dev, 86, 0x00); + + rt2800_bbp_write(rt2x00dev, 91, 0x04); + + rt2800_bbp_write(rt2x00dev, 92, 0x00); + + if (rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E)) rt2800_bbp_write(rt2x00dev, 103, 0xc0); + else + rt2800_bbp_write(rt2x00dev, 103, 0x00); + + rt2800_bbp_write(rt2x00dev, 105, 0x05); + + rt2800_bbp_write(rt2x00dev, 106, 0x35); + + rt2800_disable_unused_dac_adc(rt2x00dev); } -static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) +static void rt2800_init_bbp_3572(struct rt2x00_dev *rt2x00dev) { - unsigned int i; - u16 eeprom; - u8 reg_id; - u8 value; + rt2800_bbp_write(rt2x00dev, 31, 0x08); - if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev) || - rt2800_wait_bbp_ready(rt2x00dev))) - return -EACCES; + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38); - if (rt2x00_rt(rt2x00dev, RT5592)) { - rt2800_init_bbp_5592(rt2x00dev); - return 0; - } + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x10); - if (rt2x00_rt(rt2x00dev, RT3352)) { - rt2800_bbp_write(rt2x00dev, 3, 0x00); - rt2800_bbp_write(rt2x00dev, 4, 0x50); - } + rt2800_bbp_write(rt2x00dev, 70, 0x0a); - if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp4_mac_if_ctrl(rt2x00dev); + rt2800_bbp_write(rt2x00dev, 79, 0x13); + rt2800_bbp_write(rt2x00dev, 80, 0x05); + rt2800_bbp_write(rt2x00dev, 81, 0x33); - if (rt2800_is_305x_soc(rt2x00dev) || - rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT3572) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 31, 0x08); + rt2800_bbp_write(rt2x00dev, 82, 0x62); - if (rt2x00_rt(rt2x00dev, RT3352)) - rt2800_bbp_write(rt2x00dev, 47, 0x48); + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + + rt2800_bbp_write(rt2x00dev, 84, 0x99); + + rt2800_bbp_write(rt2x00dev, 86, 0x00); + + rt2800_bbp_write(rt2x00dev, 91, 0x04); + + rt2800_bbp_write(rt2x00dev, 92, 0x00); + + rt2800_bbp_write(rt2x00dev, 103, 0xc0); + + rt2800_bbp_write(rt2x00dev, 105, 0x05); + + rt2800_bbp_write(rt2x00dev, 106, 0x35); + + rt2800_disable_unused_dac_adc(rt2x00dev); +} + +static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev) +{ + int ant, div_mode; + u16 eeprom; + u8 value; + + rt2800_bbp4_mac_if_ctrl(rt2x00dev); + + rt2800_bbp_write(rt2x00dev, 31, 0x08); rt2800_bbp_write(rt2x00dev, 65, 0x2c); rt2800_bbp_write(rt2x00dev, 66, 0x38); - if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 68, 0x0b); + rt2800_bbp_write(rt2x00dev, 68, 0x0b); - if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) { - rt2800_bbp_write(rt2x00dev, 69, 0x16); - rt2800_bbp_write(rt2x00dev, 73, 0x12); - } else if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) { - rt2800_bbp_write(rt2x00dev, 69, 0x12); - rt2800_bbp_write(rt2x00dev, 73, 0x13); - rt2800_bbp_write(rt2x00dev, 75, 0x46); - rt2800_bbp_write(rt2x00dev, 76, 0x28); + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x13); + rt2800_bbp_write(rt2x00dev, 75, 0x46); + rt2800_bbp_write(rt2x00dev, 76, 0x28); - if (rt2x00_rt(rt2x00dev, RT3290)) - rt2800_bbp_write(rt2x00dev, 77, 0x58); - else - rt2800_bbp_write(rt2x00dev, 77, 0x59); - } else { - rt2800_bbp_write(rt2x00dev, 69, 0x12); - rt2800_bbp_write(rt2x00dev, 73, 0x10); - } + rt2800_bbp_write(rt2x00dev, 77, 0x59); rt2800_bbp_write(rt2x00dev, 70, 0x0a); - if (rt2x00_rt(rt2x00dev, RT3070) || - rt2x00_rt(rt2x00dev, RT3071) || - rt2x00_rt(rt2x00dev, RT3090) || - rt2x00_rt(rt2x00dev, RT3390) || - rt2x00_rt(rt2x00dev, RT3572) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) { - rt2800_bbp_write(rt2x00dev, 79, 0x13); - rt2800_bbp_write(rt2x00dev, 80, 0x05); - rt2800_bbp_write(rt2x00dev, 81, 0x33); - } else if (rt2800_is_305x_soc(rt2x00dev)) { - rt2800_bbp_write(rt2x00dev, 78, 0x0e); - rt2800_bbp_write(rt2x00dev, 80, 0x08); - } else if (rt2x00_rt(rt2x00dev, RT3290)) { - rt2800_bbp_write(rt2x00dev, 74, 0x0b); - rt2800_bbp_write(rt2x00dev, 79, 0x18); - rt2800_bbp_write(rt2x00dev, 80, 0x09); - rt2800_bbp_write(rt2x00dev, 81, 0x33); - } else if (rt2x00_rt(rt2x00dev, RT3352)) { - rt2800_bbp_write(rt2x00dev, 78, 0x0e); - rt2800_bbp_write(rt2x00dev, 80, 0x08); - rt2800_bbp_write(rt2x00dev, 81, 0x37); - } else { - rt2800_bbp_write(rt2x00dev, 81, 0x37); - } + rt2800_bbp_write(rt2x00dev, 79, 0x13); + rt2800_bbp_write(rt2x00dev, 80, 0x05); + rt2800_bbp_write(rt2x00dev, 81, 0x33); rt2800_bbp_write(rt2x00dev, 82, 0x62); - if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 83, 0x7a); - else - rt2800_bbp_write(rt2x00dev, 83, 0x6a); - if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D)) - rt2800_bbp_write(rt2x00dev, 84, 0x19); - else if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 84, 0x9a); - else - rt2800_bbp_write(rt2x00dev, 84, 0x99); + rt2800_bbp_write(rt2x00dev, 83, 0x7a); - if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 86, 0x38); - else - rt2800_bbp_write(rt2x00dev, 86, 0x00); + rt2800_bbp_write(rt2x00dev, 84, 0x9a); - if (rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT5392)) + rt2800_bbp_write(rt2x00dev, 86, 0x38); + + if (rt2x00_rt(rt2x00dev, RT5392)) rt2800_bbp_write(rt2x00dev, 88, 0x90); rt2800_bbp_write(rt2x00dev, 91, 0x04); - if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 92, 0x02); - else - rt2800_bbp_write(rt2x00dev, 92, 0x00); + rt2800_bbp_write(rt2x00dev, 92, 0x02); if (rt2x00_rt(rt2x00dev, RT5392)) { rt2800_bbp_write(rt2x00dev, 95, 0x9a); rt2800_bbp_write(rt2x00dev, 98, 0x12); } - if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) || - rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) || - rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) || - rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) || - rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT3572) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392) || - rt2800_is_305x_soc(rt2x00dev)) - rt2800_bbp_write(rt2x00dev, 103, 0xc0); - else - rt2800_bbp_write(rt2x00dev, 103, 0x00); + rt2800_bbp_write(rt2x00dev, 103, 0xc0); - if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 104, 0x92); + rt2800_bbp_write(rt2x00dev, 104, 0x92); - if (rt2800_is_305x_soc(rt2x00dev)) - rt2800_bbp_write(rt2x00dev, 105, 0x01); - else if (rt2x00_rt(rt2x00dev, RT3290)) - rt2800_bbp_write(rt2x00dev, 105, 0x1c); - else if (rt2x00_rt(rt2x00dev, RT3352)) - rt2800_bbp_write(rt2x00dev, 105, 0x34); - else if (rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 105, 0x3c); - else - rt2800_bbp_write(rt2x00dev, 105, 0x05); + rt2800_bbp_write(rt2x00dev, 105, 0x3c); - if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT5390)) + if (rt2x00_rt(rt2x00dev, RT5390)) rt2800_bbp_write(rt2x00dev, 106, 0x03); - else if (rt2x00_rt(rt2x00dev, RT3352)) - rt2800_bbp_write(rt2x00dev, 106, 0x05); else if (rt2x00_rt(rt2x00dev, RT5392)) rt2800_bbp_write(rt2x00dev, 106, 0x12); else - rt2800_bbp_write(rt2x00dev, 106, 0x35); - - if (rt2x00_rt(rt2x00dev, RT3352)) - rt2800_bbp_write(rt2x00dev, 120, 0x50); + WARN_ON(1); - if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 128, 0x12); + rt2800_bbp_write(rt2x00dev, 128, 0x12); if (rt2x00_rt(rt2x00dev, RT5392)) { rt2800_bbp_write(rt2x00dev, 134, 0xd0); rt2800_bbp_write(rt2x00dev, 135, 0xf6); } - if (rt2x00_rt(rt2x00dev, RT3352)) - rt2800_bbp_write(rt2x00dev, 137, 0x0f); + rt2800_disable_unused_dac_adc(rt2x00dev); - if (rt2x00_rt(rt2x00dev, RT3071) || - rt2x00_rt(rt2x00dev, RT3090) || - rt2x00_rt(rt2x00dev, RT3390) || - rt2x00_rt(rt2x00dev, RT3572) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) { - rt2800_bbp_read(rt2x00dev, 138, &value); + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); + div_mode = rt2x00_get_field16(eeprom, + EEPROM_NIC_CONF1_ANT_DIVERSITY); + ant = (div_mode == 3) ? 1 : 0; - rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); - if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1) - value |= 0x20; - if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1) - value &= ~0x02; + /* check if this is a Bluetooth combo card */ + if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) { + u32 reg; - rt2800_bbp_write(rt2x00dev, 138, value); + rt2800_register_read(rt2x00dev, GPIO_CTRL, ®); + rt2x00_set_field32(®, GPIO_CTRL_DIR3, 0); + rt2x00_set_field32(®, GPIO_CTRL_DIR6, 0); + rt2x00_set_field32(®, GPIO_CTRL_VAL3, 0); + rt2x00_set_field32(®, GPIO_CTRL_VAL6, 0); + if (ant == 0) + rt2x00_set_field32(®, GPIO_CTRL_VAL3, 1); + else if (ant == 1) + rt2x00_set_field32(®, GPIO_CTRL_VAL6, 1); + rt2800_register_write(rt2x00dev, GPIO_CTRL, reg); } - if (rt2x00_rt(rt2x00dev, RT3290)) { - rt2800_bbp_write(rt2x00dev, 67, 0x24); - rt2800_bbp_write(rt2x00dev, 143, 0x04); - rt2800_bbp_write(rt2x00dev, 142, 0x99); - rt2800_bbp_write(rt2x00dev, 150, 0x30); - rt2800_bbp_write(rt2x00dev, 151, 0x2e); - rt2800_bbp_write(rt2x00dev, 152, 0x20); - rt2800_bbp_write(rt2x00dev, 153, 0x34); - rt2800_bbp_write(rt2x00dev, 154, 0x40); - rt2800_bbp_write(rt2x00dev, 155, 0x3b); - rt2800_bbp_write(rt2x00dev, 253, 0x04); - - rt2800_bbp_read(rt2x00dev, 47, &value); - rt2x00_set_field8(&value, BBP47_TSSI_ADC6, 1); - rt2800_bbp_write(rt2x00dev, 47, value); - - /* Use 5-bit ADC for Acquisition and 8-bit ADC for data */ - rt2800_bbp_read(rt2x00dev, 3, &value); - rt2x00_set_field8(&value, BBP3_ADC_MODE_SWITCH, 1); - rt2x00_set_field8(&value, BBP3_ADC_INIT_MODE, 1); - rt2800_bbp_write(rt2x00dev, 3, value); + /* This chip has hardware antenna diversity*/ + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) { + rt2800_bbp_write(rt2x00dev, 150, 0); /* Disable Antenna Software OFDM */ + rt2800_bbp_write(rt2x00dev, 151, 0); /* Disable Antenna Software CCK */ + rt2800_bbp_write(rt2x00dev, 154, 0); /* Clear previously selected antenna */ } - if (rt2x00_rt(rt2x00dev, RT3352)) { - rt2800_bbp_write(rt2x00dev, 163, 0xbd); - /* Set ITxBF timeout to 0x9c40=1000msec */ - rt2800_bbp_write(rt2x00dev, 179, 0x02); - rt2800_bbp_write(rt2x00dev, 180, 0x00); - rt2800_bbp_write(rt2x00dev, 182, 0x40); - rt2800_bbp_write(rt2x00dev, 180, 0x01); - rt2800_bbp_write(rt2x00dev, 182, 0x9c); - rt2800_bbp_write(rt2x00dev, 179, 0x00); - /* Reprogram the inband interface to put right values in RXWI */ - rt2800_bbp_write(rt2x00dev, 142, 0x04); - rt2800_bbp_write(rt2x00dev, 143, 0x3b); - rt2800_bbp_write(rt2x00dev, 142, 0x06); - rt2800_bbp_write(rt2x00dev, 143, 0xa0); - rt2800_bbp_write(rt2x00dev, 142, 0x07); - rt2800_bbp_write(rt2x00dev, 143, 0xa1); - rt2800_bbp_write(rt2x00dev, 142, 0x08); - rt2800_bbp_write(rt2x00dev, 143, 0xa2); - - rt2800_bbp_write(rt2x00dev, 148, 0xc8); + rt2800_bbp_read(rt2x00dev, 152, &value); + if (ant == 0) + rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1); + else + rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0); + rt2800_bbp_write(rt2x00dev, 152, value); + + rt2800_init_freq_calibration(rt2x00dev); +} + +static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev) +{ + int ant, div_mode; + u16 eeprom; + u8 value; + + rt2800_init_bbp_early(rt2x00dev); + + rt2800_bbp_read(rt2x00dev, 105, &value); + rt2x00_set_field8(&value, BBP105_MLD, + rt2x00dev->default_ant.rx_chain_num == 2); + rt2800_bbp_write(rt2x00dev, 105, value); + + rt2800_bbp4_mac_if_ctrl(rt2x00dev); + + rt2800_bbp_write(rt2x00dev, 20, 0x06); + rt2800_bbp_write(rt2x00dev, 31, 0x08); + rt2800_bbp_write(rt2x00dev, 65, 0x2C); + rt2800_bbp_write(rt2x00dev, 68, 0xDD); + rt2800_bbp_write(rt2x00dev, 69, 0x1A); + rt2800_bbp_write(rt2x00dev, 70, 0x05); + rt2800_bbp_write(rt2x00dev, 73, 0x13); + rt2800_bbp_write(rt2x00dev, 74, 0x0F); + rt2800_bbp_write(rt2x00dev, 75, 0x4F); + rt2800_bbp_write(rt2x00dev, 76, 0x28); + rt2800_bbp_write(rt2x00dev, 77, 0x59); + rt2800_bbp_write(rt2x00dev, 84, 0x9A); + rt2800_bbp_write(rt2x00dev, 86, 0x38); + rt2800_bbp_write(rt2x00dev, 88, 0x90); + rt2800_bbp_write(rt2x00dev, 91, 0x04); + rt2800_bbp_write(rt2x00dev, 92, 0x02); + rt2800_bbp_write(rt2x00dev, 95, 0x9a); + rt2800_bbp_write(rt2x00dev, 98, 0x12); + rt2800_bbp_write(rt2x00dev, 103, 0xC0); + rt2800_bbp_write(rt2x00dev, 104, 0x92); + /* FIXME BBP105 owerwrite */ + rt2800_bbp_write(rt2x00dev, 105, 0x3C); + rt2800_bbp_write(rt2x00dev, 106, 0x35); + rt2800_bbp_write(rt2x00dev, 128, 0x12); + rt2800_bbp_write(rt2x00dev, 134, 0xD0); + rt2800_bbp_write(rt2x00dev, 135, 0xF6); + rt2800_bbp_write(rt2x00dev, 137, 0x0F); + + /* Initialize GLRT (Generalized Likehood Radio Test) */ + rt2800_init_bbp_5592_glrt(rt2x00dev); + + rt2800_bbp4_mac_if_ctrl(rt2x00dev); + + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); + div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY); + ant = (div_mode == 3) ? 1 : 0; + rt2800_bbp_read(rt2x00dev, 152, &value); + if (ant == 0) { + /* Main antenna */ + rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1); + } else { + /* Auxiliary antenna */ + rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0); } + rt2800_bbp_write(rt2x00dev, 152, value); - if (rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) { - int ant, div_mode; + if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) { + rt2800_bbp_read(rt2x00dev, 254, &value); + rt2x00_set_field8(&value, BBP254_BIT7, 1); + rt2800_bbp_write(rt2x00dev, 254, value); + } - rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); - div_mode = rt2x00_get_field16(eeprom, - EEPROM_NIC_CONF1_ANT_DIVERSITY); - ant = (div_mode == 3) ? 1 : 0; + rt2800_init_freq_calibration(rt2x00dev); - /* check if this is a Bluetooth combo card */ - if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) { - u32 reg; - - rt2800_register_read(rt2x00dev, GPIO_CTRL, ®); - rt2x00_set_field32(®, GPIO_CTRL_DIR3, 0); - rt2x00_set_field32(®, GPIO_CTRL_DIR6, 0); - rt2x00_set_field32(®, GPIO_CTRL_VAL3, 0); - rt2x00_set_field32(®, GPIO_CTRL_VAL6, 0); - if (ant == 0) - rt2x00_set_field32(®, GPIO_CTRL_VAL3, 1); - else if (ant == 1) - rt2x00_set_field32(®, GPIO_CTRL_VAL6, 1); - rt2800_register_write(rt2x00dev, GPIO_CTRL, reg); - } + rt2800_bbp_write(rt2x00dev, 84, 0x19); + if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) + rt2800_bbp_write(rt2x00dev, 103, 0xc0); +} - /* This chip has hardware antenna diversity*/ - if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) { - rt2800_bbp_write(rt2x00dev, 150, 0); /* Disable Antenna Software OFDM */ - rt2800_bbp_write(rt2x00dev, 151, 0); /* Disable Antenna Software CCK */ - rt2800_bbp_write(rt2x00dev, 154, 0); /* Clear previously selected antenna */ - } +static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u16 eeprom; + u8 reg_id; + u8 value; - rt2800_bbp_read(rt2x00dev, 152, &value); - if (ant == 0) - rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1); - else - rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0); - rt2800_bbp_write(rt2x00dev, 152, value); + if (rt2800_is_305x_soc(rt2x00dev)) + rt2800_init_bbp_305x_soc(rt2x00dev); - rt2800_init_freq_calibration(rt2x00dev); + switch (rt2x00dev->chip.rt) { + case RT2860: + case RT2872: + case RT2883: + rt2800_init_bbp_28xx(rt2x00dev); + break; + case RT3070: + case RT3071: + case RT3090: + rt2800_init_bbp_30xx(rt2x00dev); + break; + case RT3290: + rt2800_init_bbp_3290(rt2x00dev); + break; + case RT3352: + rt2800_init_bbp_3352(rt2x00dev); + break; + case RT3390: + rt2800_init_bbp_3390(rt2x00dev); + break; + case RT3572: + rt2800_init_bbp_3572(rt2x00dev); + break; + case RT5390: + case RT5392: + rt2800_init_bbp_53xx(rt2x00dev); + break; + case RT5592: + rt2800_init_bbp_5592(rt2x00dev); + return; } for (i = 0; i < EEPROM_BBP_SIZE; i++) { @@ -4344,8 +4542,6 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) rt2800_bbp_write(rt2x00dev, reg_id, value); } } - - return 0; } static void rt2800_led_open_drain_enable(struct rt2x00_dev *rt2x00dev) @@ -5196,9 +5392,11 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev) } msleep(1); - if (unlikely(rt2800_init_bbp(rt2x00dev))) + if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev) || + rt2800_wait_bbp_ready(rt2x00dev))) return -EIO; + rt2800_init_bbp(rt2x00dev); rt2800_init_rfcsr(rt2x00dev); if (rt2x00_is_usb(rt2x00dev) && diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index 6f4a861af336..7c7478219bbc 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c @@ -1014,7 +1014,7 @@ static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev) * Since we have only one producer and one consumer we don't * need to lock the kfifo. */ - for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) { + for (i = 0; i < rt2x00dev->tx->limit; i++) { rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO, &status); if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID)) @@ -1186,29 +1186,43 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = { .sta_remove = rt2800_sta_remove, }; -static const struct data_queue_desc rt2800pci_queue_rx = { - .entry_num = 128, - .data_size = AGGREGATION_SIZE, - .desc_size = RXD_DESC_SIZE, - .winfo_size = RXWI_DESC_SIZE, - .priv_size = sizeof(struct queue_entry_priv_mmio), -}; +static void rt2800pci_queue_init(struct data_queue *queue) +{ + switch (queue->qid) { + case QID_RX: + queue->limit = 128; + queue->data_size = AGGREGATION_SIZE; + queue->desc_size = RXD_DESC_SIZE; + queue->winfo_size = RXWI_DESC_SIZE; + queue->priv_size = sizeof(struct queue_entry_priv_mmio); + break; -static const struct data_queue_desc rt2800pci_queue_tx = { - .entry_num = 64, - .data_size = AGGREGATION_SIZE, - .desc_size = TXD_DESC_SIZE, - .winfo_size = TXWI_DESC_SIZE, - .priv_size = sizeof(struct queue_entry_priv_mmio), -}; + case QID_AC_VO: + case QID_AC_VI: + case QID_AC_BE: + case QID_AC_BK: + queue->limit = 64; + queue->data_size = AGGREGATION_SIZE; + queue->desc_size = TXD_DESC_SIZE; + queue->winfo_size = TXWI_DESC_SIZE; + queue->priv_size = sizeof(struct queue_entry_priv_mmio); + break; -static const struct data_queue_desc rt2800pci_queue_bcn = { - .entry_num = 8, - .data_size = 0, /* No DMA required for beacons */ - .desc_size = TXD_DESC_SIZE, - .winfo_size = TXWI_DESC_SIZE, - .priv_size = sizeof(struct queue_entry_priv_mmio), -}; + case QID_BEACON: + queue->limit = 8; + queue->data_size = 0; /* No DMA required for beacons */ + queue->desc_size = TXD_DESC_SIZE; + queue->winfo_size = TXWI_DESC_SIZE; + queue->priv_size = sizeof(struct queue_entry_priv_mmio); + break; + + case QID_ATIM: + /* fallthrough */ + default: + BUG(); + break; + } +} static const struct rt2x00_ops rt2800pci_ops = { .name = KBUILD_MODNAME, @@ -1217,10 +1231,7 @@ static const struct rt2x00_ops rt2800pci_ops = { .eeprom_size = EEPROM_SIZE, .rf_size = RF_SIZE, .tx_queues = NUM_TX_QUEUES, - .extra_tx_headroom = TXWI_DESC_SIZE, - .rx = &rt2800pci_queue_rx, - .tx = &rt2800pci_queue_tx, - .bcn = &rt2800pci_queue_bcn, + .queue_init = rt2800pci_queue_init, .lib = &rt2800pci_rt2x00_ops, .drv = &rt2800pci_rt2800_ops, .hw = &rt2800pci_mac80211_ops, diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index ac854d75bd6c..7edd903dd749 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -327,7 +327,7 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev) * this limit so reduce the number to prevent errors. */ rt2x00_set_field32(®, USB_DMA_CFG_RX_BULK_AGG_LIMIT, - ((rt2x00dev->ops->rx->entry_num * DATA_FRAME_SIZE) + ((rt2x00dev->rx->limit * DATA_FRAME_SIZE) / 1024) - 3); rt2x00_set_field32(®, USB_DMA_CFG_RX_BULK_EN, 1); rt2x00_set_field32(®, USB_DMA_CFG_TX_BULK_EN, 1); @@ -849,85 +849,63 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = { .sta_remove = rt2800_sta_remove, }; -static const struct data_queue_desc rt2800usb_queue_rx = { - .entry_num = 128, - .data_size = AGGREGATION_SIZE, - .desc_size = RXINFO_DESC_SIZE, - .winfo_size = RXWI_DESC_SIZE, - .priv_size = sizeof(struct queue_entry_priv_usb), -}; - -static const struct data_queue_desc rt2800usb_queue_tx = { - .entry_num = 16, - .data_size = AGGREGATION_SIZE, - .desc_size = TXINFO_DESC_SIZE, - .winfo_size = TXWI_DESC_SIZE, - .priv_size = sizeof(struct queue_entry_priv_usb), -}; - -static const struct data_queue_desc rt2800usb_queue_bcn = { - .entry_num = 8, - .data_size = MGMT_FRAME_SIZE, - .desc_size = TXINFO_DESC_SIZE, - .winfo_size = TXWI_DESC_SIZE, - .priv_size = sizeof(struct queue_entry_priv_usb), -}; +static void rt2800usb_queue_init(struct data_queue *queue) +{ + struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; + unsigned short txwi_size, rxwi_size; -static const struct rt2x00_ops rt2800usb_ops = { - .name = KBUILD_MODNAME, - .drv_data_size = sizeof(struct rt2800_drv_data), - .max_ap_intf = 8, - .eeprom_size = EEPROM_SIZE, - .rf_size = RF_SIZE, - .tx_queues = NUM_TX_QUEUES, - .extra_tx_headroom = TXINFO_DESC_SIZE + TXWI_DESC_SIZE, - .rx = &rt2800usb_queue_rx, - .tx = &rt2800usb_queue_tx, - .bcn = &rt2800usb_queue_bcn, - .lib = &rt2800usb_rt2x00_ops, - .drv = &rt2800usb_rt2800_ops, - .hw = &rt2800usb_mac80211_ops, -#ifdef CONFIG_RT2X00_LIB_DEBUGFS - .debugfs = &rt2800_rt2x00debug, -#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ -}; + if (rt2x00_rt(rt2x00dev, RT5592)) { + txwi_size = TXWI_DESC_SIZE_5592; + rxwi_size = RXWI_DESC_SIZE_5592; + } else { + txwi_size = TXWI_DESC_SIZE; + rxwi_size = RXWI_DESC_SIZE; + } -static const struct data_queue_desc rt2800usb_queue_rx_5592 = { - .entry_num = 128, - .data_size = AGGREGATION_SIZE, - .desc_size = RXINFO_DESC_SIZE, - .winfo_size = RXWI_DESC_SIZE_5592, - .priv_size = sizeof(struct queue_entry_priv_usb), -}; + switch (queue->qid) { + case QID_RX: + queue->limit = 128; + queue->data_size = AGGREGATION_SIZE; + queue->desc_size = RXINFO_DESC_SIZE; + queue->winfo_size = rxwi_size; + queue->priv_size = sizeof(struct queue_entry_priv_usb); + break; -static const struct data_queue_desc rt2800usb_queue_tx_5592 = { - .entry_num = 16, - .data_size = AGGREGATION_SIZE, - .desc_size = TXINFO_DESC_SIZE, - .winfo_size = TXWI_DESC_SIZE_5592, - .priv_size = sizeof(struct queue_entry_priv_usb), -}; + case QID_AC_VO: + case QID_AC_VI: + case QID_AC_BE: + case QID_AC_BK: + queue->limit = 16; + queue->data_size = AGGREGATION_SIZE; + queue->desc_size = TXINFO_DESC_SIZE; + queue->winfo_size = txwi_size; + queue->priv_size = sizeof(struct queue_entry_priv_usb); + break; -static const struct data_queue_desc rt2800usb_queue_bcn_5592 = { - .entry_num = 8, - .data_size = MGMT_FRAME_SIZE, - .desc_size = TXINFO_DESC_SIZE, - .winfo_size = TXWI_DESC_SIZE_5592, - .priv_size = sizeof(struct queue_entry_priv_usb), -}; + case QID_BEACON: + queue->limit = 8; + queue->data_size = MGMT_FRAME_SIZE; + queue->desc_size = TXINFO_DESC_SIZE; + queue->winfo_size = txwi_size; + queue->priv_size = sizeof(struct queue_entry_priv_usb); + break; + case QID_ATIM: + /* fallthrough */ + default: + BUG(); + break; + } +} -static const struct rt2x00_ops rt2800usb_ops_5592 = { +static const struct rt2x00_ops rt2800usb_ops = { .name = KBUILD_MODNAME, .drv_data_size = sizeof(struct rt2800_drv_data), .max_ap_intf = 8, .eeprom_size = EEPROM_SIZE, .rf_size = RF_SIZE, .tx_queues = NUM_TX_QUEUES, - .extra_tx_headroom = TXINFO_DESC_SIZE + TXWI_DESC_SIZE_5592, - .rx = &rt2800usb_queue_rx_5592, - .tx = &rt2800usb_queue_tx_5592, - .bcn = &rt2800usb_queue_bcn_5592, + .queue_init = rt2800usb_queue_init, .lib = &rt2800usb_rt2x00_ops, .drv = &rt2800usb_rt2800_ops, .hw = &rt2800usb_mac80211_ops, @@ -1248,15 +1226,15 @@ static struct usb_device_id rt2800usb_device_table[] = { #endif #ifdef CONFIG_RT2800USB_RT55XX /* Arcadyan */ - { USB_DEVICE(0x043e, 0x7a32), .driver_info = 5592 }, + { USB_DEVICE(0x043e, 0x7a32) }, /* AVM GmbH */ - { USB_DEVICE(0x057c, 0x8501), .driver_info = 5592 }, + { USB_DEVICE(0x057c, 0x8501) }, /* D-Link DWA-160-B2 */ - { USB_DEVICE(0x2001, 0x3c1a), .driver_info = 5592 }, + { USB_DEVICE(0x2001, 0x3c1a) }, /* Proware */ - { USB_DEVICE(0x043e, 0x7a13), .driver_info = 5592 }, + { USB_DEVICE(0x043e, 0x7a13) }, /* Ralink */ - { USB_DEVICE(0x148f, 0x5572), .driver_info = 5592 }, + { USB_DEVICE(0x148f, 0x5572) }, #endif #ifdef CONFIG_RT2800USB_UNKNOWN /* @@ -1361,9 +1339,6 @@ MODULE_LICENSE("GPL"); static int rt2800usb_probe(struct usb_interface *usb_intf, const struct usb_device_id *id) { - if (id->driver_info == 5592) - return rt2x00usb_probe(usb_intf, &rt2800usb_ops_5592); - return rt2x00usb_probe(usb_intf, &rt2800usb_ops); } diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h index 7510723a8c37..ee3fc570b11d 100644 --- a/drivers/net/wireless/rt2x00/rt2x00.h +++ b/drivers/net/wireless/rt2x00/rt2x00.h @@ -648,11 +648,7 @@ struct rt2x00_ops { const unsigned int eeprom_size; const unsigned int rf_size; const unsigned int tx_queues; - const unsigned int extra_tx_headroom; - const struct data_queue_desc *rx; - const struct data_queue_desc *tx; - const struct data_queue_desc *bcn; - const struct data_queue_desc *atim; + void (*queue_init)(struct data_queue *queue); const struct rt2x00lib_ops *lib; const void *drv; const struct ieee80211_ops *hw; @@ -1010,6 +1006,9 @@ struct rt2x00_dev { */ struct list_head bar_list; spinlock_t bar_list_lock; + + /* Extra TX headroom required for alignment purposes. */ + unsigned int extra_tx_headroom; }; struct rt2x00_bar_list_entry { diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index 90dc14336980..f03e3bba51c3 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -334,7 +334,7 @@ void rt2x00lib_txdone(struct queue_entry *entry, /* * Remove the extra tx headroom from the skb. */ - skb_pull(entry->skb, rt2x00dev->ops->extra_tx_headroom); + skb_pull(entry->skb, rt2x00dev->extra_tx_headroom); /* * Signal that the TX descriptor is no longer in the skb. @@ -1049,7 +1049,7 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev) */ rt2x00dev->hw->extra_tx_headroom = max_t(unsigned int, IEEE80211_TX_STATUS_HEADROOM, - rt2x00dev->ops->extra_tx_headroom); + rt2x00dev->extra_tx_headroom); /* * Take TX headroom required for alignment into account. @@ -1077,7 +1077,7 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev) */ int kfifo_size = roundup_pow_of_two(rt2x00dev->ops->tx_queues * - rt2x00dev->ops->tx->entry_num * + rt2x00dev->tx->limit * sizeof(u32)); status = kfifo_alloc(&rt2x00dev->txstatus_fifo, kfifo_size, @@ -1256,6 +1256,17 @@ static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev) rt2x00dev->hw->wiphy->n_iface_combinations = 1; } +static unsigned int rt2x00dev_extra_tx_headroom(struct rt2x00_dev *rt2x00dev) +{ + if (WARN_ON(!rt2x00dev->tx)) + return 0; + + if (rt2x00_is_usb(rt2x00dev)) + return rt2x00dev->tx[0].winfo_size + rt2x00dev->tx[0].desc_size; + + return rt2x00dev->tx[0].winfo_size; +} + /* * driver allocation handlers. */ @@ -1301,23 +1312,6 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) (rt2x00dev->ops->max_ap_intf - 1); /* - * Determine which operating modes are supported, all modes - * which require beaconing, depend on the availability of - * beacon entries. - */ - rt2x00dev->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); - if (rt2x00dev->ops->bcn->entry_num > 0) - rt2x00dev->hw->wiphy->interface_modes |= - BIT(NL80211_IFTYPE_ADHOC) | - BIT(NL80211_IFTYPE_AP) | -#ifdef CONFIG_MAC80211_MESH - BIT(NL80211_IFTYPE_MESH_POINT) | -#endif - BIT(NL80211_IFTYPE_WDS); - - rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; - - /* * Initialize work. */ rt2x00dev->workqueue = @@ -1347,6 +1341,26 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) if (retval) goto exit; + /* Cache TX headroom value */ + rt2x00dev->extra_tx_headroom = rt2x00dev_extra_tx_headroom(rt2x00dev); + + /* + * Determine which operating modes are supported, all modes + * which require beaconing, depend on the availability of + * beacon entries. + */ + rt2x00dev->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + if (rt2x00dev->bcn->limit > 0) + rt2x00dev->hw->wiphy->interface_modes |= + BIT(NL80211_IFTYPE_ADHOC) | + BIT(NL80211_IFTYPE_AP) | +#ifdef CONFIG_MAC80211_MESH + BIT(NL80211_IFTYPE_MESH_POINT) | +#endif + BIT(NL80211_IFTYPE_WDS); + + rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; + /* * Initialize ieee80211 structure. */ diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c index dc49e525ae5e..76d95deb274b 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.c +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c @@ -105,11 +105,13 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops) goto exit_release_regions; } + pci_enable_msi(pci_dev); + hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); if (!hw) { rt2x00_probe_err("Failed to allocate hardware\n"); retval = -ENOMEM; - goto exit_release_regions; + goto exit_disable_msi; } pci_set_drvdata(pci_dev, hw); @@ -150,6 +152,9 @@ exit_free_reg: exit_free_device: ieee80211_free_hw(hw); +exit_disable_msi: + pci_disable_msi(pci_dev); + exit_release_regions: pci_release_regions(pci_dev); @@ -174,6 +179,8 @@ void rt2x00pci_remove(struct pci_dev *pci_dev) rt2x00pci_free_reg(rt2x00dev); ieee80211_free_hw(hw); + pci_disable_msi(pci_dev); + /* * Free the PCI device data. */ diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index 2c12311467a9..6c0a91ff963c 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c @@ -542,8 +542,8 @@ static int rt2x00queue_write_tx_data(struct queue_entry *entry, /* * Add the requested extra tx headroom in front of the skb. */ - skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom); - memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom); + skb_push(entry->skb, rt2x00dev->extra_tx_headroom); + memset(entry->skb->data, 0, rt2x00dev->extra_tx_headroom); /* * Call the driver's write_tx_data function, if it exists. @@ -596,7 +596,7 @@ static void rt2x00queue_bar_check(struct queue_entry *entry) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct ieee80211_bar *bar = (void *) (entry->skb->data + - rt2x00dev->ops->extra_tx_headroom); + rt2x00dev->extra_tx_headroom); struct rt2x00_bar_list_entry *bar_entry; if (likely(!ieee80211_is_back_req(bar->frame_control))) @@ -1161,8 +1161,7 @@ void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) } } -static int rt2x00queue_alloc_entries(struct data_queue *queue, - const struct data_queue_desc *qdesc) +static int rt2x00queue_alloc_entries(struct data_queue *queue) { struct queue_entry *entries; unsigned int entry_size; @@ -1170,16 +1169,10 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue, rt2x00queue_reset(queue); - queue->limit = qdesc->entry_num; - queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10); - queue->data_size = qdesc->data_size; - queue->desc_size = qdesc->desc_size; - queue->winfo_size = qdesc->winfo_size; - /* * Allocate all queue entries. */ - entry_size = sizeof(*entries) + qdesc->priv_size; + entry_size = sizeof(*entries) + queue->priv_size; entries = kcalloc(queue->limit, entry_size, GFP_KERNEL); if (!entries) return -ENOMEM; @@ -1195,7 +1188,7 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue, entries[i].entry_idx = i; entries[i].priv_data = QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit, - sizeof(*entries), qdesc->priv_size); + sizeof(*entries), queue->priv_size); } #undef QUEUE_ENTRY_PRIV_OFFSET @@ -1237,23 +1230,22 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev) struct data_queue *queue; int status; - status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx); + status = rt2x00queue_alloc_entries(rt2x00dev->rx); if (status) goto exit; tx_queue_for_each(rt2x00dev, queue) { - status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx); + status = rt2x00queue_alloc_entries(queue); if (status) goto exit; } - status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn); + status = rt2x00queue_alloc_entries(rt2x00dev->bcn); if (status) goto exit; if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) { - status = rt2x00queue_alloc_entries(rt2x00dev->atim, - rt2x00dev->ops->atim); + status = rt2x00queue_alloc_entries(rt2x00dev->atim); if (status) goto exit; } @@ -1297,6 +1289,10 @@ static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev, queue->aifs = 2; queue->cw_min = 5; queue->cw_max = 10; + + rt2x00dev->ops->queue_init(queue); + + queue->threshold = DIV_ROUND_UP(queue->limit, 10); } int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h index 4a7b34e9261b..ebe117224979 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.h +++ b/drivers/net/wireless/rt2x00/rt2x00queue.h @@ -453,6 +453,7 @@ enum data_queue_flags { * @cw_max: The cw max value for outgoing frames (field ignored in RX queue). * @data_size: Maximum data size for the frames in this queue. * @desc_size: Hardware descriptor size for the data in this queue. + * @priv_size: Size of per-queue_entry private data. * @usb_endpoint: Device endpoint used for communication (USB only) * @usb_maxpacket: Max packet size for given endpoint (USB only) */ @@ -481,31 +482,13 @@ struct data_queue { unsigned short data_size; unsigned char desc_size; unsigned char winfo_size; + unsigned short priv_size; unsigned short usb_endpoint; unsigned short usb_maxpacket; }; /** - * struct data_queue_desc: Data queue description - * - * The information in this structure is used by drivers - * to inform rt2x00lib about the creation of the data queue. - * - * @entry_num: Maximum number of entries for a queue. - * @data_size: Maximum data size for the frames in this queue. - * @desc_size: Hardware descriptor size for the data in this queue. - * @priv_size: Size of per-queue_entry private data. - */ -struct data_queue_desc { - unsigned short entry_num; - unsigned short data_size; - unsigned char desc_size; - unsigned char winfo_size; - unsigned short priv_size; -}; - -/** * queue_end - Return pointer to the last queue (HELPER MACRO). * @__dev: Pointer to &struct rt2x00_dev * diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c index 0dc8180e251b..53754bc66d05 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/rt2x00/rt61pci.c @@ -2175,7 +2175,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev) * that the TX_STA_FIFO stack has a size of 16. We stick to our * tx ring size for now. */ - for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) { + for (i = 0; i < rt2x00dev->tx->limit; i++) { rt2x00mmio_register_read(rt2x00dev, STA_CSR4, ®); if (!rt2x00_get_field32(reg, STA_CSR4_VALID)) break; @@ -3025,26 +3025,40 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = { .config = rt61pci_config, }; -static const struct data_queue_desc rt61pci_queue_rx = { - .entry_num = 32, - .data_size = DATA_FRAME_SIZE, - .desc_size = RXD_DESC_SIZE, - .priv_size = sizeof(struct queue_entry_priv_mmio), -}; +static void rt61pci_queue_init(struct data_queue *queue) +{ + switch (queue->qid) { + case QID_RX: + queue->limit = 32; + queue->data_size = DATA_FRAME_SIZE; + queue->desc_size = RXD_DESC_SIZE; + queue->priv_size = sizeof(struct queue_entry_priv_mmio); + break; -static const struct data_queue_desc rt61pci_queue_tx = { - .entry_num = 32, - .data_size = DATA_FRAME_SIZE, - .desc_size = TXD_DESC_SIZE, - .priv_size = sizeof(struct queue_entry_priv_mmio), -}; + case QID_AC_VO: + case QID_AC_VI: + case QID_AC_BE: + case QID_AC_BK: + queue->limit = 32; + queue->data_size = DATA_FRAME_SIZE; + queue->desc_size = TXD_DESC_SIZE; + queue->priv_size = sizeof(struct queue_entry_priv_mmio); + break; -static const struct data_queue_desc rt61pci_queue_bcn = { - .entry_num = 4, - .data_size = 0, /* No DMA required for beacons */ - .desc_size = TXINFO_SIZE, - .priv_size = sizeof(struct queue_entry_priv_mmio), -}; + case QID_BEACON: + queue->limit = 4; + queue->data_size = 0; /* No DMA required for beacons */ + queue->desc_size = TXINFO_SIZE; + queue->priv_size = sizeof(struct queue_entry_priv_mmio); + break; + + case QID_ATIM: + /* fallthrough */ + default: + BUG(); + break; + } +} static const struct rt2x00_ops rt61pci_ops = { .name = KBUILD_MODNAME, @@ -3052,10 +3066,7 @@ static const struct rt2x00_ops rt61pci_ops = { .eeprom_size = EEPROM_SIZE, .rf_size = RF_SIZE, .tx_queues = NUM_TX_QUEUES, - .extra_tx_headroom = 0, - .rx = &rt61pci_queue_rx, - .tx = &rt61pci_queue_tx, - .bcn = &rt61pci_queue_bcn, + .queue_init = rt61pci_queue_init, .lib = &rt61pci_rt2x00_ops, .hw = &rt61pci_mac80211_ops, #ifdef CONFIG_RT2X00_LIB_DEBUGFS diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c index 377e09bb0b81..1616ed484ceb 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.c +++ b/drivers/net/wireless/rt2x00/rt73usb.c @@ -2359,26 +2359,40 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = { .config = rt73usb_config, }; -static const struct data_queue_desc rt73usb_queue_rx = { - .entry_num = 32, - .data_size = DATA_FRAME_SIZE, - .desc_size = RXD_DESC_SIZE, - .priv_size = sizeof(struct queue_entry_priv_usb), -}; +static void rt73usb_queue_init(struct data_queue *queue) +{ + switch (queue->qid) { + case QID_RX: + queue->limit = 32; + queue->data_size = DATA_FRAME_SIZE; + queue->desc_size = RXD_DESC_SIZE; + queue->priv_size = sizeof(struct queue_entry_priv_usb); + break; -static const struct data_queue_desc rt73usb_queue_tx = { - .entry_num = 32, - .data_size = DATA_FRAME_SIZE, - .desc_size = TXD_DESC_SIZE, - .priv_size = sizeof(struct queue_entry_priv_usb), -}; + case QID_AC_VO: + case QID_AC_VI: + case QID_AC_BE: + case QID_AC_BK: + queue->limit = 32; + queue->data_size = DATA_FRAME_SIZE; + queue->desc_size = TXD_DESC_SIZE; + queue->priv_size = sizeof(struct queue_entry_priv_usb); + break; -static const struct data_queue_desc rt73usb_queue_bcn = { - .entry_num = 4, - .data_size = MGMT_FRAME_SIZE, - .desc_size = TXINFO_SIZE, - .priv_size = sizeof(struct queue_entry_priv_usb), -}; + case QID_BEACON: + queue->limit = 4; + queue->data_size = MGMT_FRAME_SIZE; + queue->desc_size = TXINFO_SIZE; + queue->priv_size = sizeof(struct queue_entry_priv_usb); + break; + + case QID_ATIM: + /* fallthrough */ + default: + BUG(); + break; + } +} static const struct rt2x00_ops rt73usb_ops = { .name = KBUILD_MODNAME, @@ -2386,10 +2400,7 @@ static const struct rt2x00_ops rt73usb_ops = { .eeprom_size = EEPROM_SIZE, .rf_size = RF_SIZE, .tx_queues = NUM_TX_QUEUES, - .extra_tx_headroom = TXD_DESC_SIZE, - .rx = &rt73usb_queue_rx, - .tx = &rt73usb_queue_tx, - .bcn = &rt73usb_queue_bcn, + .queue_init = rt73usb_queue_init, .lib = &rt73usb_rt2x00_ops, .hw = &rt73usb_mac80211_ops, #ifdef CONFIG_RT2X00_LIB_DEBUGFS diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index 826f085c29dd..2bd598526217 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c @@ -359,6 +359,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/ {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/ + {RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/ {RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/ {} }; diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c index 19a765532603..47875ba09ff8 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c +++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c @@ -842,7 +842,7 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter( long val_y, ele_c = 0; u8 ofdm_index[2]; s8 cck_index = 0; - u8 ofdm_index_old[2]; + u8 ofdm_index_old[2] = {0, 0}; s8 cck_index_old = 0; u8 index; int i; diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 953111a502ee..796928ba875f 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -6018,6 +6018,15 @@ int wlcore_free_hw(struct wl1271 *wl) } EXPORT_SYMBOL_GPL(wlcore_free_hw); +#ifdef CONFIG_PM +static const struct wiphy_wowlan_support wlcore_wowlan_support = { + .flags = WIPHY_WOWLAN_ANY, + .n_patterns = WL1271_MAX_RX_FILTERS, + .pattern_min_len = 1, + .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE, +}; +#endif + static void wlcore_nvs_cb(const struct firmware *fw, void *context) { struct wl1271 *wl = context; @@ -6071,14 +6080,8 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context) if (!ret) { wl->irq_wake_enabled = true; device_init_wakeup(wl->dev, 1); - if (pdata->pwr_in_suspend) { - wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY; - wl->hw->wiphy->wowlan.n_patterns = - WL1271_MAX_RX_FILTERS; - wl->hw->wiphy->wowlan.pattern_min_len = 1; - wl->hw->wiphy->wowlan.pattern_max_len = - WL1271_RX_FILTER_MAX_PATTERN_SIZE; - } + if (pdata->pwr_in_suspend) + wl->hw->wiphy->wowlan = &wlcore_wowlan_support; } #endif disable_irq(wl->irq); diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig index 74a852e4e41f..b0b64ccb7d7d 100644 --- a/drivers/nfc/Kconfig +++ b/drivers/nfc/Kconfig @@ -36,6 +36,16 @@ config NFC_MEI_PHY If unsure, say N. +config NFC_SIM + tristate "NFC hardware simulator driver" + help + This driver declares two virtual NFC devices supporting NFC-DEP + protocol. An LLCP connection can be established between them and + all packets sent from one device is sent back to the other, acting as + loopback devices. + + If unsure, say N. + source "drivers/nfc/pn544/Kconfig" source "drivers/nfc/microread/Kconfig" diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile index aa6bd657ef40..be7636abcb3f 100644 --- a/drivers/nfc/Makefile +++ b/drivers/nfc/Makefile @@ -7,5 +7,6 @@ obj-$(CONFIG_NFC_MICROREAD) += microread/ obj-$(CONFIG_NFC_PN533) += pn533.o obj-$(CONFIG_NFC_WILINK) += nfcwilink.o obj-$(CONFIG_NFC_MEI_PHY) += mei_phy.o +obj-$(CONFIG_NFC_SIM) += nfcsim.o ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c index 1201bdbfb791..606bf55e76ec 100644 --- a/drivers/nfc/mei_phy.c +++ b/drivers/nfc/mei_phy.c @@ -30,7 +30,7 @@ struct mei_nfc_hdr { u16 req_id; u32 reserved; u16 data_size; -} __attribute__((packed)); +} __packed; #define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD) @@ -60,8 +60,8 @@ int nfc_mei_phy_enable(void *phy_id) r = mei_cl_enable_device(phy->device); if (r < 0) { - pr_err("MEI_PHY: Could not enable device\n"); - return r; + pr_err("MEI_PHY: Could not enable device\n"); + return r; } r = mei_cl_register_event_cb(phy->device, nfc_mei_event_cb, phy); diff --git a/drivers/nfc/microread/microread.c b/drivers/nfc/microread/microread.c index 3420d833db17..cdb9f6de132a 100644 --- a/drivers/nfc/microread/microread.c +++ b/drivers/nfc/microread/microread.c @@ -650,7 +650,7 @@ int microread_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name, { struct microread_info *info; unsigned long quirks = 0; - u32 protocols, se; + u32 protocols; struct nfc_hci_init_data init_data; int r; @@ -678,10 +678,8 @@ int microread_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name, NFC_PROTO_ISO14443_B_MASK | NFC_PROTO_NFC_DEP_MASK; - se = NFC_SE_UICC | NFC_SE_EMBEDDED; - info->hdev = nfc_hci_allocate_device(µread_hci_ops, &init_data, - quirks, protocols, se, llc_name, + quirks, protocols, llc_name, phy_headroom + MICROREAD_CMDS_HEADROOM, phy_tailroom + diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c new file mode 100644 index 000000000000..c5c30fb1d7bf --- /dev/null +++ b/drivers/nfc/nfcsim.c @@ -0,0 +1,541 @@ +/* + * NFC hardware simulation driver + * Copyright (c) 2013, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/nfc.h> +#include <net/nfc/nfc.h> + +#define DEV_ERR(_dev, fmt, args...) nfc_dev_err(&_dev->nfc_dev->dev, \ + "%s: " fmt, __func__, ## args) + +#define DEV_DBG(_dev, fmt, args...) nfc_dev_dbg(&_dev->nfc_dev->dev, \ + "%s: " fmt, __func__, ## args) + +#define NFCSIM_VERSION "0.1" + +#define NFCSIM_POLL_NONE 0 +#define NFCSIM_POLL_INITIATOR 1 +#define NFCSIM_POLL_TARGET 2 +#define NFCSIM_POLL_DUAL (NFCSIM_POLL_INITIATOR | NFCSIM_POLL_TARGET) + +struct nfcsim { + struct nfc_dev *nfc_dev; + + struct mutex lock; + + struct delayed_work recv_work; + + struct sk_buff *clone_skb; + + struct delayed_work poll_work; + u8 polling_mode; + u8 curr_polling_mode; + + u8 shutting_down; + + u8 up; + + u8 initiator; + + data_exchange_cb_t cb; + void *cb_context; + + struct nfcsim *peer_dev; +}; + +static struct nfcsim *dev0; +static struct nfcsim *dev1; + +struct workqueue_struct *wq; + +static void nfcsim_cleanup_dev(struct nfcsim *dev, u8 shutdown) +{ + DEV_DBG(dev, "shutdown=%d", shutdown); + + mutex_lock(&dev->lock); + + dev->polling_mode = NFCSIM_POLL_NONE; + dev->shutting_down = shutdown; + dev->cb = NULL; + dev_kfree_skb(dev->clone_skb); + dev->clone_skb = NULL; + + mutex_unlock(&dev->lock); + + cancel_delayed_work_sync(&dev->poll_work); + cancel_delayed_work_sync(&dev->recv_work); +} + +static int nfcsim_target_found(struct nfcsim *dev) +{ + struct nfc_target nfc_tgt; + + DEV_DBG(dev, ""); + + memset(&nfc_tgt, 0, sizeof(struct nfc_target)); + + nfc_tgt.supported_protocols = NFC_PROTO_NFC_DEP_MASK; + nfc_targets_found(dev->nfc_dev, &nfc_tgt, 1); + + return 0; +} + +static int nfcsim_dev_up(struct nfc_dev *nfc_dev) +{ + struct nfcsim *dev = nfc_get_drvdata(nfc_dev); + + DEV_DBG(dev, ""); + + mutex_lock(&dev->lock); + + dev->up = 1; + + mutex_unlock(&dev->lock); + + return 0; +} + +static int nfcsim_dev_down(struct nfc_dev *nfc_dev) +{ + struct nfcsim *dev = nfc_get_drvdata(nfc_dev); + + DEV_DBG(dev, ""); + + mutex_lock(&dev->lock); + + dev->up = 0; + + mutex_unlock(&dev->lock); + + return 0; +} + +static int nfcsim_dep_link_up(struct nfc_dev *nfc_dev, + struct nfc_target *target, + u8 comm_mode, u8 *gb, size_t gb_len) +{ + int rc; + struct nfcsim *dev = nfc_get_drvdata(nfc_dev); + struct nfcsim *peer = dev->peer_dev; + u8 *remote_gb; + size_t remote_gb_len; + + DEV_DBG(dev, "target_idx: %d, comm_mode: %d\n", target->idx, comm_mode); + + mutex_lock(&peer->lock); + + nfc_tm_activated(peer->nfc_dev, NFC_PROTO_NFC_DEP_MASK, + NFC_COMM_ACTIVE, gb, gb_len); + + remote_gb = nfc_get_local_general_bytes(peer->nfc_dev, &remote_gb_len); + if (!remote_gb) { + DEV_ERR(peer, "Can't get remote general bytes"); + + mutex_unlock(&peer->lock); + return -EINVAL; + } + + mutex_unlock(&peer->lock); + + mutex_lock(&dev->lock); + + rc = nfc_set_remote_general_bytes(nfc_dev, remote_gb, remote_gb_len); + if (rc) { + DEV_ERR(dev, "Can't set remote general bytes"); + mutex_unlock(&dev->lock); + return rc; + } + + rc = nfc_dep_link_is_up(nfc_dev, target->idx, NFC_COMM_ACTIVE, + NFC_RF_INITIATOR); + + mutex_unlock(&dev->lock); + + return rc; +} + +static int nfcsim_dep_link_down(struct nfc_dev *nfc_dev) +{ + struct nfcsim *dev = nfc_get_drvdata(nfc_dev); + + DEV_DBG(dev, ""); + + nfcsim_cleanup_dev(dev, 0); + + return 0; +} + +static int nfcsim_start_poll(struct nfc_dev *nfc_dev, + u32 im_protocols, u32 tm_protocols) +{ + struct nfcsim *dev = nfc_get_drvdata(nfc_dev); + int rc; + + mutex_lock(&dev->lock); + + if (dev->polling_mode != NFCSIM_POLL_NONE) { + DEV_ERR(dev, "Already in polling mode"); + rc = -EBUSY; + goto exit; + } + + if (im_protocols & NFC_PROTO_NFC_DEP_MASK) + dev->polling_mode |= NFCSIM_POLL_INITIATOR; + + if (tm_protocols & NFC_PROTO_NFC_DEP_MASK) + dev->polling_mode |= NFCSIM_POLL_TARGET; + + if (dev->polling_mode == NFCSIM_POLL_NONE) { + DEV_ERR(dev, "Unsupported polling mode"); + rc = -EINVAL; + goto exit; + } + + dev->initiator = 0; + dev->curr_polling_mode = NFCSIM_POLL_NONE; + + queue_delayed_work(wq, &dev->poll_work, 0); + + DEV_DBG(dev, "Start polling: im: 0x%X, tm: 0x%X", im_protocols, + tm_protocols); + + rc = 0; +exit: + mutex_unlock(&dev->lock); + + return rc; +} + +static void nfcsim_stop_poll(struct nfc_dev *nfc_dev) +{ + struct nfcsim *dev = nfc_get_drvdata(nfc_dev); + + DEV_DBG(dev, "Stop poll"); + + mutex_lock(&dev->lock); + + dev->polling_mode = NFCSIM_POLL_NONE; + + mutex_unlock(&dev->lock); + + cancel_delayed_work_sync(&dev->poll_work); +} + +static int nfcsim_activate_target(struct nfc_dev *nfc_dev, + struct nfc_target *target, u32 protocol) +{ + struct nfcsim *dev = nfc_get_drvdata(nfc_dev); + + DEV_DBG(dev, ""); + + return -ENOTSUPP; +} + +static void nfcsim_deactivate_target(struct nfc_dev *nfc_dev, + struct nfc_target *target) +{ + struct nfcsim *dev = nfc_get_drvdata(nfc_dev); + + DEV_DBG(dev, ""); +} + +static void nfcsim_wq_recv(struct work_struct *work) +{ + struct nfcsim *dev = container_of(work, struct nfcsim, + recv_work.work); + + mutex_lock(&dev->lock); + + if (dev->shutting_down || !dev->up || !dev->clone_skb) { + dev_kfree_skb(dev->clone_skb); + goto exit; + } + + if (dev->initiator) { + if (!dev->cb) { + DEV_ERR(dev, "Null recv callback"); + dev_kfree_skb(dev->clone_skb); + goto exit; + } + + dev->cb(dev->cb_context, dev->clone_skb, 0); + dev->cb = NULL; + } else { + nfc_tm_data_received(dev->nfc_dev, dev->clone_skb); + } + +exit: + dev->clone_skb = NULL; + + mutex_unlock(&dev->lock); +} + +static int nfcsim_tx(struct nfc_dev *nfc_dev, struct nfc_target *target, + struct sk_buff *skb, data_exchange_cb_t cb, + void *cb_context) +{ + struct nfcsim *dev = nfc_get_drvdata(nfc_dev); + struct nfcsim *peer = dev->peer_dev; + int err; + + mutex_lock(&dev->lock); + + if (dev->shutting_down || !dev->up) { + mutex_unlock(&dev->lock); + err = -ENODEV; + goto exit; + } + + dev->cb = cb; + dev->cb_context = cb_context; + + mutex_unlock(&dev->lock); + + mutex_lock(&peer->lock); + + peer->clone_skb = skb_clone(skb, GFP_KERNEL); + + if (!peer->clone_skb) { + DEV_ERR(dev, "skb_clone failed"); + mutex_unlock(&peer->lock); + err = -ENOMEM; + goto exit; + } + + /* This simulates an arbitrary transmission delay between the 2 devices. + * If packet transmission occurs immediately between them, we have a + * non-stop flow of several tens of thousands SYMM packets per second + * and a burning cpu. + * + * TODO: Add support for a sysfs entry to control this delay. + */ + queue_delayed_work(wq, &peer->recv_work, msecs_to_jiffies(5)); + + mutex_unlock(&peer->lock); + + err = 0; +exit: + dev_kfree_skb(skb); + + return err; +} + +static int nfcsim_im_transceive(struct nfc_dev *nfc_dev, + struct nfc_target *target, struct sk_buff *skb, + data_exchange_cb_t cb, void *cb_context) +{ + return nfcsim_tx(nfc_dev, target, skb, cb, cb_context); +} + +static int nfcsim_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb) +{ + return nfcsim_tx(nfc_dev, NULL, skb, NULL, NULL); +} + +static struct nfc_ops nfcsim_nfc_ops = { + .dev_up = nfcsim_dev_up, + .dev_down = nfcsim_dev_down, + .dep_link_up = nfcsim_dep_link_up, + .dep_link_down = nfcsim_dep_link_down, + .start_poll = nfcsim_start_poll, + .stop_poll = nfcsim_stop_poll, + .activate_target = nfcsim_activate_target, + .deactivate_target = nfcsim_deactivate_target, + .im_transceive = nfcsim_im_transceive, + .tm_send = nfcsim_tm_send, +}; + +static void nfcsim_set_polling_mode(struct nfcsim *dev) +{ + if (dev->polling_mode == NFCSIM_POLL_NONE) { + dev->curr_polling_mode = NFCSIM_POLL_NONE; + return; + } + + if (dev->curr_polling_mode == NFCSIM_POLL_NONE) { + if (dev->polling_mode & NFCSIM_POLL_INITIATOR) + dev->curr_polling_mode = NFCSIM_POLL_INITIATOR; + else + dev->curr_polling_mode = NFCSIM_POLL_TARGET; + + return; + } + + if (dev->polling_mode == NFCSIM_POLL_DUAL) { + if (dev->curr_polling_mode == NFCSIM_POLL_TARGET) + dev->curr_polling_mode = NFCSIM_POLL_INITIATOR; + else + dev->curr_polling_mode = NFCSIM_POLL_TARGET; + } +} + +static void nfcsim_wq_poll(struct work_struct *work) +{ + struct nfcsim *dev = container_of(work, struct nfcsim, poll_work.work); + struct nfcsim *peer = dev->peer_dev; + + /* These work items run on an ordered workqueue and are therefore + * serialized. So we can take both mutexes without being dead locked. + */ + mutex_lock(&dev->lock); + mutex_lock(&peer->lock); + + nfcsim_set_polling_mode(dev); + + if (dev->curr_polling_mode == NFCSIM_POLL_NONE) { + DEV_DBG(dev, "Not polling"); + goto unlock; + } + + DEV_DBG(dev, "Polling as %s", + dev->curr_polling_mode == NFCSIM_POLL_INITIATOR ? + "initiator" : "target"); + + if (dev->curr_polling_mode == NFCSIM_POLL_TARGET) + goto sched_work; + + if (peer->curr_polling_mode == NFCSIM_POLL_TARGET) { + peer->polling_mode = NFCSIM_POLL_NONE; + dev->polling_mode = NFCSIM_POLL_NONE; + + dev->initiator = 1; + + nfcsim_target_found(dev); + + goto unlock; + } + +sched_work: + /* This defines the delay for an initiator to check if the other device + * is polling in target mode. + * If the device starts in dual mode polling, it switches between + * initiator and target at every round. + * Because the wq is ordered and only 1 work item is executed at a time, + * we'll always have one device polling as initiator and the other as + * target at some point, even if both are started in dual mode. + */ + queue_delayed_work(wq, &dev->poll_work, msecs_to_jiffies(200)); + +unlock: + mutex_unlock(&peer->lock); + mutex_unlock(&dev->lock); +} + +static struct nfcsim *nfcsim_init_dev(void) +{ + struct nfcsim *dev; + int rc = -ENOMEM; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (dev == NULL) + return ERR_PTR(-ENOMEM); + + mutex_init(&dev->lock); + + INIT_DELAYED_WORK(&dev->recv_work, nfcsim_wq_recv); + INIT_DELAYED_WORK(&dev->poll_work, nfcsim_wq_poll); + + dev->nfc_dev = nfc_allocate_device(&nfcsim_nfc_ops, + NFC_PROTO_NFC_DEP_MASK, + 0, 0); + if (!dev->nfc_dev) + goto error; + + nfc_set_drvdata(dev->nfc_dev, dev); + + rc = nfc_register_device(dev->nfc_dev); + if (rc) + goto free_nfc_dev; + + return dev; + +free_nfc_dev: + nfc_free_device(dev->nfc_dev); + +error: + kfree(dev); + + return ERR_PTR(rc); +} + +static void nfcsim_free_device(struct nfcsim *dev) +{ + nfc_unregister_device(dev->nfc_dev); + + nfc_free_device(dev->nfc_dev); + + kfree(dev); +} + +int __init nfcsim_init(void) +{ + int rc; + + /* We need an ordered wq to ensure that poll_work items are executed + * one at a time. + */ + wq = alloc_ordered_workqueue("nfcsim", 0); + if (!wq) { + rc = -ENOMEM; + goto exit; + } + + dev0 = nfcsim_init_dev(); + if (IS_ERR(dev0)) { + rc = PTR_ERR(dev0); + goto exit; + } + + dev1 = nfcsim_init_dev(); + if (IS_ERR(dev1)) { + kfree(dev0); + + rc = PTR_ERR(dev1); + goto exit; + } + + dev0->peer_dev = dev1; + dev1->peer_dev = dev0; + + pr_debug("NFCsim " NFCSIM_VERSION " initialized\n"); + + rc = 0; +exit: + if (rc) + pr_err("Failed to initialize nfcsim driver (%d)\n", + rc); + + return rc; +} + +void __exit nfcsim_exit(void) +{ + nfcsim_cleanup_dev(dev0, 1); + nfcsim_cleanup_dev(dev1, 1); + + nfcsim_free_device(dev0); + nfcsim_free_device(dev1); + + destroy_workqueue(wq); +} + +module_init(nfcsim_init); +module_exit(nfcsim_exit); + +MODULE_DESCRIPTION("NFCSim driver ver " NFCSIM_VERSION); +MODULE_VERSION(NFCSIM_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c index 3b731acbc408..59f95d8fc98c 100644 --- a/drivers/nfc/nfcwilink.c +++ b/drivers/nfc/nfcwilink.c @@ -109,7 +109,7 @@ enum { NFCWILINK_FW_DOWNLOAD, }; -static int nfcwilink_send(struct sk_buff *skb); +static int nfcwilink_send(struct nci_dev *ndev, struct sk_buff *skb); static inline struct sk_buff *nfcwilink_skb_alloc(unsigned int len, gfp_t how) { @@ -156,8 +156,6 @@ static int nfcwilink_get_bts_file_name(struct nfcwilink *drv, char *file_name) return -ENOMEM; } - skb->dev = (void *)drv->ndev; - cmd = (struct nci_vs_nfcc_info_cmd *) skb_put(skb, sizeof(struct nci_vs_nfcc_info_cmd)); cmd->gid = NCI_VS_NFCC_INFO_CMD_GID; @@ -166,7 +164,7 @@ static int nfcwilink_get_bts_file_name(struct nfcwilink *drv, char *file_name) drv->nfcc_info.plen = 0; - rc = nfcwilink_send(skb); + rc = nfcwilink_send(drv->ndev, skb); if (rc) return rc; @@ -232,11 +230,9 @@ static int nfcwilink_send_bts_cmd(struct nfcwilink *drv, __u8 *data, int len) return -ENOMEM; } - skb->dev = (void *)drv->ndev; - memcpy(skb_put(skb, len), data, len); - rc = nfcwilink_send(skb); + rc = nfcwilink_send(drv->ndev, skb); if (rc) return rc; @@ -371,10 +367,8 @@ static long nfcwilink_receive(void *priv_data, struct sk_buff *skb) return 0; } - skb->dev = (void *) drv->ndev; - /* Forward skb to NCI core layer */ - rc = nci_recv_frame(skb); + rc = nci_recv_frame(drv->ndev, skb); if (rc < 0) { nfc_dev_err(&drv->pdev->dev, "nci_recv_frame failed %d", rc); return rc; @@ -480,9 +474,8 @@ static int nfcwilink_close(struct nci_dev *ndev) return rc; } -static int nfcwilink_send(struct sk_buff *skb) +static int nfcwilink_send(struct nci_dev *ndev, struct sk_buff *skb) { - struct nci_dev *ndev = (struct nci_dev *)skb->dev; struct nfcwilink *drv = nci_get_drvdata(ndev); struct nfcwilink_hdr hdr = {NFCWILINK_CHNL, NFCWILINK_OPCODE, 0x0000}; long len; @@ -542,7 +535,6 @@ static int nfcwilink_probe(struct platform_device *pdev) drv->ndev = nci_allocate_device(&nfcwilink_ops, protocols, - NFC_SE_NONE, NFCWILINK_HDR_LEN, 0); if (!drv->ndev) { diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c index 8f6f2baa930d..bfb4a4e7c604 100644 --- a/drivers/nfc/pn533.c +++ b/drivers/nfc/pn533.c @@ -258,7 +258,7 @@ static const struct pn533_poll_modulations poll_mod[] = { .opcode = PN533_FELICA_OPC_SENSF_REQ, .sc = PN533_FELICA_SENSF_SC_ALL, .rc = PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE, - .tsn = 0, + .tsn = 0x03, }, }, .len = 7, @@ -271,7 +271,7 @@ static const struct pn533_poll_modulations poll_mod[] = { .opcode = PN533_FELICA_OPC_SENSF_REQ, .sc = PN533_FELICA_SENSF_SC_ALL, .rc = PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE, - .tsn = 0, + .tsn = 0x03, }, }, .len = 7, @@ -1235,7 +1235,7 @@ static int pn533_target_found_type_a(struct nfc_target *nfc_tgt, u8 *tgt_data, struct pn533_target_felica { u8 pol_res; u8 opcode; - u8 nfcid2[8]; + u8 nfcid2[NFC_NFCID2_MAXSIZE]; u8 pad[8]; /* optional */ u8 syst_code[]; @@ -1275,6 +1275,9 @@ static int pn533_target_found_felica(struct nfc_target *nfc_tgt, u8 *tgt_data, memcpy(nfc_tgt->sensf_res, &tgt_felica->opcode, 9); nfc_tgt->sensf_res_len = 9; + memcpy(nfc_tgt->nfcid2, tgt_felica->nfcid2, NFC_NFCID2_MAXSIZE); + nfc_tgt->nfcid2_len = NFC_NFCID2_MAXSIZE; + return 0; } @@ -2084,6 +2087,9 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, if (comm_mode == NFC_COMM_PASSIVE) skb_len += PASSIVE_DATA_LEN; + if (target && target->nfcid2_len) + skb_len += NFC_NFCID3_MAXSIZE; + skb = pn533_alloc_skb(dev, skb_len); if (!skb) return -ENOMEM; @@ -2100,6 +2106,12 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, *next |= 1; } + if (target && target->nfcid2_len) { + memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), target->nfcid2, + target->nfcid2_len); + *next |= 2; + } + if (gb != NULL && gb_len > 0) { memcpy(skb_put(skb, gb_len), gb, gb_len); *next |= 4; /* We have some Gi */ @@ -2489,7 +2501,7 @@ static void pn533_acr122_poweron_rdr_resp(struct urb *urb) nfc_dev_dbg(&urb->dev->dev, "%s", __func__); - print_hex_dump(KERN_ERR, "ACR122 RX: ", DUMP_PREFIX_NONE, 16, 1, + print_hex_dump_debug("ACR122 RX: ", DUMP_PREFIX_NONE, 16, 1, urb->transfer_buffer, urb->transfer_buffer_length, false); @@ -2520,7 +2532,7 @@ static int pn533_acr122_poweron_rdr(struct pn533 *dev) dev->out_urb->transfer_buffer = cmd; dev->out_urb->transfer_buffer_length = sizeof(cmd); - print_hex_dump(KERN_ERR, "ACR122 TX: ", DUMP_PREFIX_NONE, 16, 1, + print_hex_dump_debug("ACR122 TX: ", DUMP_PREFIX_NONE, 16, 1, cmd, sizeof(cmd), false); rc = usb_submit_urb(dev->out_urb, GFP_KERNEL); @@ -2774,17 +2786,18 @@ static int pn533_probe(struct usb_interface *interface, goto destroy_wq; nfc_dev_info(&dev->interface->dev, - "NXP PN533 firmware ver %d.%d now attached", - fw_ver.ver, fw_ver.rev); + "NXP PN5%02X firmware ver %d.%d now attached", + fw_ver.ic, fw_ver.ver, fw_ver.rev); dev->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols, - NFC_SE_NONE, dev->ops->tx_header_len + PN533_CMD_DATAEXCH_HEAD_LEN, dev->ops->tx_tail_len); - if (!dev->nfc_dev) + if (!dev->nfc_dev) { + rc = -ENOMEM; goto destroy_wq; + } nfc_set_parent_dev(dev->nfc_dev, &interface->dev); nfc_set_drvdata(dev->nfc_dev, dev); diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c index 9c5f16e7baef..0d17da7675b7 100644 --- a/drivers/nfc/pn544/pn544.c +++ b/drivers/nfc/pn544/pn544.c @@ -551,20 +551,25 @@ static int pn544_hci_complete_target_discovered(struct nfc_hci_dev *hdev, return -EPROTO; } - r = nfc_hci_send_cmd(hdev, PN544_RF_READER_F_GATE, - PN544_RF_READER_CMD_ACTIVATE_NEXT, - uid_skb->data, uid_skb->len, NULL); - kfree_skb(uid_skb); - - r = nfc_hci_send_cmd(hdev, + /* Type F NFC-DEP IDm has prefix 0x01FE */ + if ((uid_skb->data[0] == 0x01) && (uid_skb->data[1] == 0xfe)) { + kfree_skb(uid_skb); + r = nfc_hci_send_cmd(hdev, PN544_RF_READER_NFCIP1_INITIATOR_GATE, PN544_HCI_CMD_CONTINUE_ACTIVATION, NULL, 0, NULL); - if (r < 0) - return r; + if (r < 0) + return r; - target->hci_reader_gate = PN544_RF_READER_NFCIP1_INITIATOR_GATE; - target->supported_protocols = NFC_PROTO_NFC_DEP_MASK; + target->supported_protocols = NFC_PROTO_NFC_DEP_MASK; + target->hci_reader_gate = + PN544_RF_READER_NFCIP1_INITIATOR_GATE; + } else { + r = nfc_hci_send_cmd(hdev, PN544_RF_READER_F_GATE, + PN544_RF_READER_CMD_ACTIVATE_NEXT, + uid_skb->data, uid_skb->len, NULL); + kfree_skb(uid_skb); + } } else if (target->supported_protocols & NFC_PROTO_ISO14443_MASK) { /* * TODO: maybe other ISO 14443 require some kind of continue @@ -706,12 +711,9 @@ static int pn544_hci_check_presence(struct nfc_hci_dev *hdev, return nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE, PN544_RF_READER_CMD_ACTIVATE_NEXT, target->nfcid1, target->nfcid1_len, NULL); - } else if (target->supported_protocols & NFC_PROTO_JEWEL_MASK) { - return nfc_hci_send_cmd(hdev, target->hci_reader_gate, - PN544_JEWEL_RAW_CMD, NULL, 0, NULL); - } else if (target->supported_protocols & NFC_PROTO_FELICA_MASK) { - return nfc_hci_send_cmd(hdev, PN544_RF_READER_F_GATE, - PN544_FELICA_RAW, NULL, 0, NULL); + } else if (target->supported_protocols & (NFC_PROTO_JEWEL_MASK | + NFC_PROTO_FELICA_MASK)) { + return -EOPNOTSUPP; } else if (target->supported_protocols & NFC_PROTO_NFC_DEP_MASK) { return nfc_hci_send_cmd(hdev, target->hci_reader_gate, PN544_HCI_CMD_ATTREQUEST, @@ -801,7 +803,7 @@ int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name, struct nfc_hci_dev **hdev) { struct pn544_hci_info *info; - u32 protocols, se; + u32 protocols; struct nfc_hci_init_data init_data; int r; @@ -834,10 +836,8 @@ int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name, NFC_PROTO_ISO14443_B_MASK | NFC_PROTO_NFC_DEP_MASK; - se = NFC_SE_UICC | NFC_SE_EMBEDDED; - info->hdev = nfc_hci_allocate_device(&pn544_hci_ops, &init_data, 0, - protocols, se, llc_name, + protocols, llc_name, phy_headroom + PN544_CMDS_HEADROOM, phy_tailroom, phy_payload); if (!info->hdev) { diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c index a3a851e49321..18c0d8d1ddf7 100644 --- a/drivers/pcmcia/m8xx_pcmcia.c +++ b/drivers/pcmcia/m8xx_pcmcia.c @@ -68,12 +68,6 @@ MODULE_LICENSE("Dual MPL/GPL"); #if !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B) -/* The RPX series use SLOT_B */ -#if defined(CONFIG_RPXCLASSIC) || defined(CONFIG_RPXLITE) -#define CONFIG_PCMCIA_SLOT_B -#define CONFIG_BD_IS_MHZ -#endif - /* The ADS board use SLOT_A */ #ifdef CONFIG_ADS #define CONFIG_PCMCIA_SLOT_A @@ -253,81 +247,6 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev); #define PCMCIA_BMT_LIMIT (15*4) /* Bus Monitor Timeout value */ -/* ------------------------------------------------------------------------- */ -/* board specific stuff: */ -/* voltage_set(), hardware_enable() and hardware_disable() */ -/* ------------------------------------------------------------------------- */ -/* RPX Boards from Embedded Planet */ - -#if defined(CONFIG_RPXCLASSIC) || defined(CONFIG_RPXLITE) - -/* The RPX boards seems to have it's bus monitor timeout set to 6*8 clocks. - * SYPCR is write once only, therefore must the slowest memory be faster - * than the bus monitor or we will get a machine check due to the bus timeout. - */ - -#define PCMCIA_BOARD_MSG "RPX CLASSIC or RPX LITE" - -#undef PCMCIA_BMT_LIMIT -#define PCMCIA_BMT_LIMIT (6*8) - -static int voltage_set(int slot, int vcc, int vpp) -{ - u32 reg = 0; - - switch (vcc) { - case 0: - break; - case 33: - reg |= BCSR1_PCVCTL4; - break; - case 50: - reg |= BCSR1_PCVCTL5; - break; - default: - return 1; - } - - switch (vpp) { - case 0: - break; - case 33: - case 50: - if (vcc == vpp) - reg |= BCSR1_PCVCTL6; - else - return 1; - break; - case 120: - reg |= BCSR1_PCVCTL7; - default: - return 1; - } - - if (!((vcc == 50) || (vcc == 0))) - return 1; - - /* first, turn off all power */ - - out_be32(((u32 *) RPX_CSR_ADDR), - in_be32(((u32 *) RPX_CSR_ADDR)) & ~(BCSR1_PCVCTL4 | - BCSR1_PCVCTL5 | - BCSR1_PCVCTL6 | - BCSR1_PCVCTL7)); - - /* enable new powersettings */ - - out_be32(((u32 *) RPX_CSR_ADDR), in_be32(((u32 *) RPX_CSR_ADDR)) | reg); - - return 0; -} - -#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V -#define hardware_enable(_slot_) /* No hardware to enable */ -#define hardware_disable(_slot_) /* No hardware to disable */ - -#endif /* CONFIG_RPXCLASSIC */ - /* FADS Boards from Motorola */ #if defined(CONFIG_FADS) @@ -419,65 +338,6 @@ static inline int voltage_set(int slot, int vcc, int vpp) #endif -/* ------------------------------------------------------------------------- */ -/* Motorola MBX860 */ - -#if defined(CONFIG_MBX) - -#define PCMCIA_BOARD_MSG "MBX" - -static int voltage_set(int slot, int vcc, int vpp) -{ - u8 reg = 0; - - switch (vcc) { - case 0: - break; - case 33: - reg |= CSR2_VCC_33; - break; - case 50: - reg |= CSR2_VCC_50; - break; - default: - return 1; - } - - switch (vpp) { - case 0: - break; - case 33: - case 50: - if (vcc == vpp) - reg |= CSR2_VPP_VCC; - else - return 1; - break; - case 120: - if ((vcc == 33) || (vcc == 50)) - reg |= CSR2_VPP_12; - else - return 1; - default: - return 1; - } - - /* first, turn off all power */ - out_8((u8 *) MBX_CSR2_ADDR, - in_8((u8 *) MBX_CSR2_ADDR) & ~(CSR2_VCC_MASK | CSR2_VPP_MASK)); - - /* enable new powersettings */ - out_8((u8 *) MBX_CSR2_ADDR, in_8((u8 *) MBX_CSR2_ADDR) | reg); - - return 0; -} - -#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V -#define hardware_enable(_slot_) /* No hardware to enable */ -#define hardware_disable(_slot_) /* No hardware to disable */ - -#endif /* CONFIG_MBX */ - #if defined(CONFIG_PRxK) #include <asm/cpld.h> extern volatile fpga_pc_regs *fpga_pc; diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 3338437b559b..85772616efbf 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -781,4 +781,12 @@ config APPLE_GMUX graphics as well as the backlight. Currently only backlight control is supported by the driver. +config PVPANIC + tristate "pvpanic device support" + depends on ACPI + ---help--- + This driver provides support for the pvpanic device. pvpanic is + a paravirtualized device provided by QEMU; it lets a virtual machine + (guest) communicate panic events to the host. + endif # X86_PLATFORM_DEVICES diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index ace2b38942fe..ef0ec746f78c 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -51,3 +51,5 @@ obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o + +obj-$(CONFIG_PVPANIC) += pvpanic.o diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index 210b5b872125..8fcb41e18b9c 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -171,6 +171,15 @@ static struct dmi_system_id asus_quirks[] = { }, .driver_data = &quirk_asus_x401u, }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. X75A", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X75A"), + }, + .driver_data = &quirk_asus_x401u, + }, {}, }; diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index fa3ee6209572..1134119521ac 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -284,6 +284,7 @@ static void __init parse_da_table(const struct dmi_header *dm) { /* Final token is a terminator, so we don't want to copy it */ int tokens = (dm->length-11)/sizeof(struct calling_interface_token)-1; + struct calling_interface_token *new_da_tokens; struct calling_interface_structure *table = container_of(dm, struct calling_interface_structure, header); @@ -296,12 +297,13 @@ static void __init parse_da_table(const struct dmi_header *dm) da_command_address = table->cmdIOAddress; da_command_code = table->cmdIOCode; - da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) * - sizeof(struct calling_interface_token), - GFP_KERNEL); + new_da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) * + sizeof(struct calling_interface_token), + GFP_KERNEL); - if (!da_tokens) + if (!new_da_tokens) return; + da_tokens = new_da_tokens; memcpy(da_tokens+da_num_tokens, table->tokens, sizeof(struct calling_interface_token) * tokens); diff --git a/drivers/platform/x86/dell-wmi-aio.c b/drivers/platform/x86/dell-wmi-aio.c index 3f945457f71c..bcf8cc6b5537 100644 --- a/drivers/platform/x86/dell-wmi-aio.c +++ b/drivers/platform/x86/dell-wmi-aio.c @@ -34,6 +34,14 @@ MODULE_LICENSE("GPL"); #define EVENT_GUID1 "284A0E6B-380E-472A-921F-E52786257FB4" #define EVENT_GUID2 "02314822-307C-4F66-BF0E-48AEAEB26CC8" +struct dell_wmi_event { + u16 length; + /* 0x000: A hot key pressed or an event occurred + * 0x00F: A sequence of hot keys are pressed */ + u16 type; + u16 event[]; +}; + static const char *dell_wmi_aio_guids[] = { EVENT_GUID1, EVENT_GUID2, @@ -46,15 +54,41 @@ MODULE_ALIAS("wmi:"EVENT_GUID2); static const struct key_entry dell_wmi_aio_keymap[] = { { KE_KEY, 0xc0, { KEY_VOLUMEUP } }, { KE_KEY, 0xc1, { KEY_VOLUMEDOWN } }, + { KE_KEY, 0xe030, { KEY_VOLUMEUP } }, + { KE_KEY, 0xe02e, { KEY_VOLUMEDOWN } }, + { KE_KEY, 0xe020, { KEY_MUTE } }, + { KE_KEY, 0xe027, { KEY_DISPLAYTOGGLE } }, + { KE_KEY, 0xe006, { KEY_BRIGHTNESSUP } }, + { KE_KEY, 0xe005, { KEY_BRIGHTNESSDOWN } }, + { KE_KEY, 0xe00b, { KEY_SWITCHVIDEOMODE } }, { KE_END, 0 } }; static struct input_dev *dell_wmi_aio_input_dev; +/* + * The new WMI event data format will follow the dell_wmi_event structure + * So, we will check if the buffer matches the format + */ +static bool dell_wmi_aio_event_check(u8 *buffer, int length) +{ + struct dell_wmi_event *event = (struct dell_wmi_event *)buffer; + + if (event == NULL || length < 6) + return false; + + if ((event->type == 0 || event->type == 0xf) && + event->length >= 2) + return true; + + return false; +} + static void dell_wmi_aio_notify(u32 value, void *context) { struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; + struct dell_wmi_event *event; acpi_status status; status = wmi_get_event_data(value, &response); @@ -65,7 +99,7 @@ static void dell_wmi_aio_notify(u32 value, void *context) obj = (union acpi_object *)response.pointer; if (obj) { - unsigned int scancode; + unsigned int scancode = 0; switch (obj->type) { case ACPI_TYPE_INTEGER: @@ -75,13 +109,22 @@ static void dell_wmi_aio_notify(u32 value, void *context) scancode, 1, true); break; case ACPI_TYPE_BUFFER: - /* Broken machines return the scancode in a buffer */ - if (obj->buffer.pointer && obj->buffer.length > 0) { - scancode = obj->buffer.pointer[0]; + if (dell_wmi_aio_event_check(obj->buffer.pointer, + obj->buffer.length)) { + event = (struct dell_wmi_event *) + obj->buffer.pointer; + scancode = event->event[0]; + } else { + /* Broken machines return the scancode in a + buffer */ + if (obj->buffer.pointer && + obj->buffer.length > 0) + scancode = obj->buffer.pointer[0]; + } + if (scancode) sparse_keymap_report_event( dell_wmi_aio_input_dev, scancode, 1, true); - } break; } } diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 1a779bbfb87d..8df0c5a21be2 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -71,6 +71,14 @@ enum hp_wmi_event_ids { HPWMI_WIRELESS = 5, HPWMI_CPU_BATTERY_THROTTLE = 6, HPWMI_LOCK_SWITCH = 7, + HPWMI_LID_SWITCH = 8, + HPWMI_SCREEN_ROTATION = 9, + HPWMI_COOLSENSE_SYSTEM_MOBILE = 0x0A, + HPWMI_COOLSENSE_SYSTEM_HOT = 0x0B, + HPWMI_PROXIMITY_SENSOR = 0x0C, + HPWMI_BACKLIT_KB_BRIGHTNESS = 0x0D, + HPWMI_PEAKSHIFT_PERIOD = 0x0F, + HPWMI_BATTERY_CHARGE_PERIOD = 0x10, }; struct bios_args { @@ -536,6 +544,22 @@ static void hp_wmi_notify(u32 value, void *context) break; case HPWMI_LOCK_SWITCH: break; + case HPWMI_LID_SWITCH: + break; + case HPWMI_SCREEN_ROTATION: + break; + case HPWMI_COOLSENSE_SYSTEM_MOBILE: + break; + case HPWMI_COOLSENSE_SYSTEM_HOT: + break; + case HPWMI_PROXIMITY_SENSOR: + break; + case HPWMI_BACKLIT_KB_BRIGHTNESS: + break; + case HPWMI_PEAKSHIFT_PERIOD: + break; + case HPWMI_BATTERY_CHARGE_PERIOD: + break; default: pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data); break; diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c index e64a7a870d42..a8e43cf70fac 100644 --- a/drivers/platform/x86/hp_accel.c +++ b/drivers/platform/x86/hp_accel.c @@ -362,7 +362,8 @@ static int lis3lv02d_suspend(struct device *dev) static int lis3lv02d_resume(struct device *dev) { - return lis3lv02d_poweron(&lis3_dev); + lis3lv02d_poweron(&lis3_dev); + return 0; } static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume); diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 17f00b8dc5cb..89c4519d48ac 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -640,7 +640,8 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv) for (bit = 0; bit < 16; bit++) { if (test_bit(bit, &value)) { switch (bit) { - case 6: + case 0: /* Z580 */ + case 6: /* Z570 */ /* Thermal Management button */ ideapad_input_report(priv, 65); break; @@ -648,6 +649,9 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv) /* OneKey Theater button */ ideapad_input_report(priv, 64); break; + default: + pr_info("Unknown special button: %lu\n", bit); + break; } } } diff --git a/drivers/platform/x86/pvpanic.c b/drivers/platform/x86/pvpanic.c new file mode 100644 index 000000000000..47ae0c47d4b5 --- /dev/null +++ b/drivers/platform/x86/pvpanic.c @@ -0,0 +1,124 @@ +/* + * pvpanic.c - pvpanic Device Support + * + * Copyright (C) 2013 Fujitsu. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <acpi/acpi_bus.h> +#include <acpi/acpi_drivers.h> + +MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>"); +MODULE_DESCRIPTION("pvpanic device driver"); +MODULE_LICENSE("GPL"); + +static int pvpanic_add(struct acpi_device *device); +static int pvpanic_remove(struct acpi_device *device); + +static const struct acpi_device_id pvpanic_device_ids[] = { + { "QEMU0001", 0 }, + { "", 0 }, +}; +MODULE_DEVICE_TABLE(acpi, pvpanic_device_ids); + +#define PVPANIC_PANICKED (1 << 0) + +static u16 port; + +static struct acpi_driver pvpanic_driver = { + .name = "pvpanic", + .class = "QEMU", + .ids = pvpanic_device_ids, + .ops = { + .add = pvpanic_add, + .remove = pvpanic_remove, + }, + .owner = THIS_MODULE, +}; + +static void +pvpanic_send_event(unsigned int event) +{ + outb(event, port); +} + +static int +pvpanic_panic_notify(struct notifier_block *nb, unsigned long code, + void *unused) +{ + pvpanic_send_event(PVPANIC_PANICKED); + return NOTIFY_DONE; +} + +static struct notifier_block pvpanic_panic_nb = { + .notifier_call = pvpanic_panic_notify, +}; + + +static acpi_status +pvpanic_walk_resources(struct acpi_resource *res, void *context) +{ + switch (res->type) { + case ACPI_RESOURCE_TYPE_END_TAG: + return AE_OK; + + case ACPI_RESOURCE_TYPE_IO: + port = res->data.io.minimum; + return AE_OK; + + default: + return AE_ERROR; + } +} + +static int pvpanic_add(struct acpi_device *device) +{ + acpi_status status; + u64 ret; + + status = acpi_evaluate_integer(device->handle, "_STA", NULL, + &ret); + + if (ACPI_FAILURE(status) || (ret & 0x0B) != 0x0B) + return -ENODEV; + + acpi_walk_resources(device->handle, METHOD_NAME__CRS, + pvpanic_walk_resources, NULL); + + if (!port) + return -ENODEV; + + atomic_notifier_chain_register(&panic_notifier_list, + &pvpanic_panic_nb); + + return 0; +} + +static int pvpanic_remove(struct acpi_device *device) +{ + + atomic_notifier_chain_unregister(&panic_notifier_list, + &pvpanic_panic_nb); + return 0; +} + +module_acpi_driver(pvpanic_driver); diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c index 5f770059fd4d..1a90b62a71c6 100644 --- a/drivers/platform/x86/samsung-q10.c +++ b/drivers/platform/x86/samsung-q10.c @@ -176,10 +176,7 @@ static int __init samsungq10_init(void) samsungq10_probe, NULL, 0, NULL, 0); - if (IS_ERR(samsungq10_device)) - return PTR_ERR(samsungq10_device); - - return 0; + return PTR_RET(samsungq10_device); } static void __exit samsungq10_exit(void) diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index d544e3aaf761..2ac045f27f10 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -1255,6 +1255,11 @@ static void sony_nc_notify(struct acpi_device *device, u32 event) real_ev = __sony_nc_gfx_switch_status_get(); break; + case 0x015B: + /* Hybrid GFX switching SVS151290S */ + ev_type = GFX_SWITCH; + real_ev = __sony_nc_gfx_switch_status_get(); + break; default: dprintk("Unknown event 0x%x for handle 0x%x\n", event, handle); @@ -1353,6 +1358,7 @@ static void sony_nc_function_setup(struct acpi_device *device, break; case 0x0128: case 0x0146: + case 0x015B: result = sony_nc_gfx_switch_setup(pf_device, handle); if (result) pr_err("couldn't set up GFX Switch status (%d)\n", @@ -1375,6 +1381,7 @@ static void sony_nc_function_setup(struct acpi_device *device, case 0x0143: case 0x014b: case 0x014c: + case 0x0163: result = sony_nc_kbd_backlight_setup(pf_device, handle); if (result) pr_err("couldn't set up keyboard backlight function (%d)\n", @@ -1426,6 +1433,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd) break; case 0x0128: case 0x0146: + case 0x015B: sony_nc_gfx_switch_cleanup(pd); break; case 0x0131: @@ -1439,6 +1447,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd) case 0x0143: case 0x014b: case 0x014c: + case 0x0163: sony_nc_kbd_backlight_cleanup(pd); break; default: @@ -1485,6 +1494,7 @@ static void sony_nc_function_resume(void) case 0x0143: case 0x014b: case 0x014c: + case 0x0163: sony_nc_kbd_backlight_resume(); break; default: @@ -2390,7 +2400,9 @@ static int __sony_nc_gfx_switch_status_get(void) { unsigned int result; - if (sony_call_snc_handle(gfxs_ctl->handle, 0x0100, &result)) + if (sony_call_snc_handle(gfxs_ctl->handle, + gfxs_ctl->handle == 0x015B ? 0x0000 : 0x0100, + &result)) return -EIO; switch (gfxs_ctl->handle) { @@ -2400,6 +2412,12 @@ static int __sony_nc_gfx_switch_status_get(void) */ return result & 0x1 ? SPEED : STAMINA; break; + case 0x015B: + /* 0: discrete GFX (speed) + * 1: integrated GFX (stamina) + */ + return result & 0x1 ? STAMINA : SPEED; + break; case 0x0128: /* it's a more elaborated bitmask, for now: * 2: integrated GFX (stamina) diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index db95c547c09d..86af29f53bbe 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -1353,6 +1353,8 @@ config SCSI_LPFC tristate "Emulex LightPulse Fibre Channel Support" depends on PCI && SCSI select SCSI_FC_ATTRS + select GENERIC_CSUM + select CRC_T10DIF help This lpfc driver supports the Emulex LightPulse Family of Fibre Channel PCI host adapters. diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c index 64136c56e706..33072388ea16 100644 --- a/drivers/scsi/aic94xx/aic94xx_dev.c +++ b/drivers/scsi/aic94xx/aic94xx_dev.c @@ -84,7 +84,7 @@ static void asd_set_ddb_type(struct domain_device *dev) struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; int ddb = (int) (unsigned long) dev->lldd_dev; - if (dev->dev_type == SATA_PM_PORT) + if (dev->dev_type == SAS_SATA_PM_PORT) asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_PM_PORT); else if (dev->tproto) asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_TARGET); @@ -116,7 +116,7 @@ void asd_set_dmamode(struct domain_device *dev) int ddb = (int) (unsigned long) dev->lldd_dev; u32 qdepth = 0; - if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) { + if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM_PORT) { if (ata_id_has_ncq(ata_dev->id)) qdepth = ata_id_queue_depth(ata_dev->id); asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK, @@ -140,8 +140,8 @@ static int asd_init_sata(struct domain_device *dev) int ddb = (int) (unsigned long) dev->lldd_dev; asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF); - if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM || - dev->dev_type == SATA_PM_PORT) { + if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM || + dev->dev_type == SAS_SATA_PM_PORT) { struct dev_to_host_fis *fis = (struct dev_to_host_fis *) dev->frame_rcvd; asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status); @@ -174,7 +174,7 @@ static int asd_init_target_ddb(struct domain_device *dev) asd_ddbsite_write_byte(asd_ha, ddb, CONN_MASK, dev->port->phy_mask); if (dev->port->oob_mode != SATA_OOB_MODE) { flags |= OPEN_REQUIRED; - if ((dev->dev_type == SATA_DEV) || + if ((dev->dev_type == SAS_SATA_DEV) || (dev->tproto & SAS_PROTOCOL_STP)) { struct smp_resp *rps_resp = &dev->sata_dev.rps_resp; if (rps_resp->frame_type == SMP_RESPONSE && @@ -188,8 +188,8 @@ static int asd_init_target_ddb(struct domain_device *dev) } else { flags |= CONCURRENT_CONN_SUPP; if (!dev->parent && - (dev->dev_type == EDGE_DEV || - dev->dev_type == FANOUT_DEV)) + (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || + dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)) asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN, 4); else @@ -198,7 +198,7 @@ static int asd_init_target_ddb(struct domain_device *dev) asd_ddbsite_write_byte(asd_ha, ddb, NUM_CTX, 1); } } - if (dev->dev_type == SATA_PM) + if (dev->dev_type == SAS_SATA_PM) flags |= SATA_MULTIPORT; asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags); @@ -211,7 +211,7 @@ static int asd_init_target_ddb(struct domain_device *dev) asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF); asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF); - if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { + if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { i = asd_init_sata(dev); if (i < 0) { asd_free_ddb(asd_ha, ddb); @@ -219,7 +219,7 @@ static int asd_init_target_ddb(struct domain_device *dev) } } - if (dev->dev_type == SAS_END_DEV) { + if (dev->dev_type == SAS_END_DEVICE) { struct sas_end_device *rdev = rphy_to_end_device(dev->rphy); if (rdev->I_T_nexus_loss_timeout > 0) asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT, @@ -328,10 +328,10 @@ int asd_dev_found(struct domain_device *dev) spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags); switch (dev->dev_type) { - case SATA_PM: + case SAS_SATA_PM: res = asd_init_sata_pm_ddb(dev); break; - case SATA_PM_PORT: + case SAS_SATA_PM_PORT: res = asd_init_sata_pm_port_ddb(dev); break; default: diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c index 81b736c76fff..4df867e07b20 100644 --- a/drivers/scsi/aic94xx/aic94xx_hwi.c +++ b/drivers/scsi/aic94xx/aic94xx_hwi.c @@ -74,7 +74,7 @@ static void asd_init_phy_identify(struct asd_phy *phy) memset(phy->identify_frame, 0, sizeof(*phy->identify_frame)); - phy->identify_frame->dev_type = SAS_END_DEV; + phy->identify_frame->dev_type = SAS_END_DEVICE; if (phy->sas_phy.role & PHY_ROLE_INITIATOR) phy->identify_frame->initiator_bits = phy->sas_phy.iproto; if (phy->sas_phy.role & PHY_ROLE_TARGET) diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c index cf9040933da6..d4c35df3d4ae 100644 --- a/drivers/scsi/aic94xx/aic94xx_tmf.c +++ b/drivers/scsi/aic94xx/aic94xx_tmf.c @@ -184,7 +184,7 @@ int asd_I_T_nexus_reset(struct domain_device *dev) struct sas_phy *phy = sas_get_local_phy(dev); /* Standard mandates link reset for ATA (type 0) and * hard reset for SSP (type 1) */ - int reset_type = (dev->dev_type == SATA_DEV || + int reset_type = (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE); diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h index f1733dfa3ae2..777e7c0bbb4b 100644 --- a/drivers/scsi/be2iscsi/be.h +++ b/drivers/scsi/be2iscsi/be.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c index 5c87768c109c..e66aa7c11a8a 100644 --- a/drivers/scsi/be2iscsi/be_cmds.c +++ b/drivers/scsi/be2iscsi/be_cmds.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -155,6 +155,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba, uint16_t status = 0, addl_status = 0, wrb_num = 0; struct be_mcc_wrb *temp_wrb; struct be_cmd_req_hdr *ioctl_hdr; + struct be_cmd_resp_hdr *ioctl_resp_hdr; struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; if (beiscsi_error(phba)) @@ -204,6 +205,12 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba, ioctl_hdr->subsystem, ioctl_hdr->opcode, status, addl_status); + + if (status == MCC_STATUS_INSUFFICIENT_BUFFER) { + ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr; + if (ioctl_resp_hdr->response_length) + goto release_mcc_tag; + } rc = -EAGAIN; } @@ -267,6 +274,7 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl, struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); struct be_cmd_req_hdr *hdr = embedded_payload(wrb); + struct be_cmd_resp_hdr *resp_hdr; be_dws_le_to_cpu(compl, 4); @@ -284,6 +292,11 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl, hdr->subsystem, hdr->opcode, compl_status, extd_status); + if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) { + resp_hdr = (struct be_cmd_resp_hdr *) hdr; + if (resp_hdr->response_length) + return 0; + } return -EBUSY; } return 0; @@ -335,30 +348,26 @@ static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session) void beiscsi_async_link_state_process(struct beiscsi_hba *phba, struct be_async_event_link_state *evt) { - switch (evt->port_link_status) { - case ASYNC_EVENT_LINK_DOWN: + if ((evt->port_link_status == ASYNC_EVENT_LINK_DOWN) || + ((evt->port_link_status & ASYNC_EVENT_LOGICAL) && + (evt->port_fault != BEISCSI_PHY_LINK_FAULT_NONE))) { + phba->state = BE_ADAPTER_LINK_DOWN; + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, - "BC_%d : Link Down on Physical Port %d\n", + "BC_%d : Link Down on Port %d\n", evt->physical_port); - phba->state |= BE_ADAPTER_LINK_DOWN; iscsi_host_for_each_session(phba->shost, be2iscsi_fail_session); - break; - case ASYNC_EVENT_LINK_UP: + } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) || + ((evt->port_link_status & ASYNC_EVENT_LOGICAL) && + (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) { phba->state = BE_ADAPTER_UP; + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, - "BC_%d : Link UP on Physical Port %d\n", - evt->physical_port); - break; - default: - beiscsi_log(phba, KERN_ERR, - BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, - "BC_%d : Unexpected Async Notification %d on" - "Physical Port %d\n", - evt->port_link_status, + "BC_%d : Link UP on Port %d\n", evt->physical_port); } } @@ -479,7 +488,7 @@ static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl) { void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET; struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); - int wait = 0; + uint32_t wait = 0; u32 ready; do { @@ -527,6 +536,10 @@ int be_mbox_notify(struct be_ctrl_info *ctrl) struct be_mcc_compl *compl = &mbox->compl; struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); + status = be_mbox_db_ready_wait(ctrl); + if (status) + return status; + val &= ~MPU_MAILBOX_DB_RDY_MASK; val |= MPU_MAILBOX_DB_HI_MASK; val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; @@ -580,6 +593,10 @@ static int be_mbox_notify_wait(struct beiscsi_hba *phba) struct be_mcc_compl *compl = &mbox->compl; struct be_ctrl_info *ctrl = &phba->ctrl; + status = be_mbox_db_ready_wait(ctrl); + if (status) + return status; + val |= MPU_MAILBOX_DB_HI_MASK; /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; @@ -732,6 +749,16 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl, return status; } +/** + * be_cmd_fw_initialize()- Initialize FW + * @ctrl: Pointer to function control structure + * + * Send FW initialize pattern for the function. + * + * return + * Success: 0 + * Failure: Non-Zero value + **/ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl) { struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); @@ -762,6 +789,47 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl) return status; } +/** + * be_cmd_fw_uninit()- Uinitialize FW + * @ctrl: Pointer to function control structure + * + * Send FW uninitialize pattern for the function + * + * return + * Success: 0 + * Failure: Non-Zero value + **/ +int be_cmd_fw_uninit(struct be_ctrl_info *ctrl) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); + int status; + u8 *endian_check; + + spin_lock(&ctrl->mbox_lock); + memset(wrb, 0, sizeof(*wrb)); + + endian_check = (u8 *) wrb; + *endian_check++ = 0xFF; + *endian_check++ = 0xAA; + *endian_check++ = 0xBB; + *endian_check++ = 0xFF; + *endian_check++ = 0xFF; + *endian_check++ = 0xCC; + *endian_check++ = 0xDD; + *endian_check = 0xFF; + + be_dws_cpu_to_le(wrb, sizeof(*wrb)); + + status = be_mbox_notify(ctrl); + if (status) + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BC_%d : be_cmd_fw_uninit Failed\n"); + + spin_unlock(&ctrl->mbox_lock); + return status; +} + int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, struct be_queue_info *cq, struct be_queue_info *eq, bool sol_evts, bool no_delay, int coalesce_wm) @@ -783,20 +851,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, OPCODE_COMMON_CQ_CREATE, sizeof(*req)); req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); - if (chip_skh_r(ctrl->pdev)) { - req->hdr.version = MBX_CMD_VER2; - req->page_size = 1; - AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, - ctxt, coalesce_wm); - AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, - ctxt, no_delay); - AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, - __ilog2_u32(cq->len / 256)); - AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); - AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1); - AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id); - AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1); - } else { + if (is_chip_be2_be3r(phba)) { AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm); AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay); @@ -809,6 +864,19 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1); AMAP_SET_BITS(struct amap_cq_context, func, ctxt, PCI_FUNC(ctrl->pdev->devfn)); + } else { + req->hdr.version = MBX_CMD_VER2; + req->page_size = 1; + AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, + ctxt, coalesce_wm); + AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, + ctxt, no_delay); + AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, + __ilog2_u32(cq->len / 256)); + AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); + AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1); + AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id); + AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1); } be_dws_cpu_to_le(ctxt, sizeof(req->context)); @@ -949,6 +1017,7 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct be_defq_create_req *req = embedded_payload(wrb); struct be_dma_mem *q_mem = &dq->dma_mem; + struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); void *ctxt = &req->context; int status; @@ -961,17 +1030,36 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req)); req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); - AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0); - AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt, - 1); - AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt, - PCI_FUNC(ctrl->pdev->devfn)); - AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt, - be_encoded_q_len(length / sizeof(struct phys_addr))); - AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size, - ctxt, entry_size); - AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt, - cq->id); + + if (is_chip_be2_be3r(phba)) { + AMAP_SET_BITS(struct amap_be_default_pdu_context, + rx_pdid, ctxt, 0); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + rx_pdid_valid, ctxt, 1); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn)); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + ring_size, ctxt, + be_encoded_q_len(length / + sizeof(struct phys_addr))); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + default_buffer_size, ctxt, entry_size); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + cq_id_recv, ctxt, cq->id); + } else { + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + rx_pdid, ctxt, 0); + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + rx_pdid_valid, ctxt, 1); + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + ring_size, ctxt, + be_encoded_q_len(length / + sizeof(struct phys_addr))); + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + default_buffer_size, ctxt, entry_size); + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + cq_id_recv, ctxt, cq->id); + } be_dws_cpu_to_le(ctxt, sizeof(req->context)); diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h index 23397d51ac54..99073086dfe0 100644 --- a/drivers/scsi/be2iscsi/be_cmds.h +++ b/drivers/scsi/be2iscsi/be_cmds.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -52,6 +52,10 @@ struct be_mcc_wrb { /* Completion Status */ #define MCC_STATUS_SUCCESS 0x0 +#define MCC_STATUS_FAILED 0x1 +#define MCC_STATUS_ILLEGAL_REQUEST 0x2 +#define MCC_STATUS_ILLEGAL_FIELD 0x3 +#define MCC_STATUS_INSUFFICIENT_BUFFER 0x4 #define CQE_STATUS_COMPL_MASK 0xFFFF #define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ @@ -118,7 +122,8 @@ struct be_async_event_trailer { enum { ASYNC_EVENT_LINK_DOWN = 0x0, - ASYNC_EVENT_LINK_UP = 0x1 + ASYNC_EVENT_LINK_UP = 0x1, + ASYNC_EVENT_LOGICAL = 0x2 }; /** @@ -130,6 +135,9 @@ struct be_async_event_link_state { u8 port_link_status; u8 port_duplex; u8 port_speed; +#define BEISCSI_PHY_LINK_FAULT_NONE 0x00 +#define BEISCSI_PHY_LINK_FAULT_LOCAL 0x01 +#define BEISCSI_PHY_LINK_FAULT_REMOTE 0x02 u8 port_fault; u8 rsvd0[7]; struct be_async_event_trailer trailer; @@ -697,6 +705,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba, uint32_t tag, struct be_mcc_wrb **wrb, void *cmd_va); /*ISCSI Functuions */ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl); +int be_cmd_fw_uninit(struct be_ctrl_info *ctrl); struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem); struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba); @@ -751,6 +760,18 @@ struct amap_be_default_pdu_context { u8 rsvd4[32]; /* dword 3 */ } __packed; +struct amap_default_pdu_context_ext { + u8 rsvd0[16]; /* dword 0 */ + u8 ring_size[4]; /* dword 0 */ + u8 rsvd1[12]; /* dword 0 */ + u8 rsvd2[22]; /* dword 1 */ + u8 rx_pdid[9]; /* dword 1 */ + u8 rx_pdid_valid; /* dword 1 */ + u8 default_buffer_size[16]; /* dword 2 */ + u8 cq_id_recv[16]; /* dword 2 */ + u8 rsvd3[32]; /* dword 3 */ +} __packed; + struct be_defq_create_req { struct be_cmd_req_hdr hdr; u16 num_pages; @@ -896,7 +917,7 @@ struct amap_it_dmsg_cqe_v2 { * stack to notify the * controller of a posted Work Request Block */ -#define DB_WRB_POST_CID_MASK 0x3FF /* bits 0 - 9 */ +#define DB_WRB_POST_CID_MASK 0xFFFF /* bits 0 - 16 */ #define DB_DEF_PDU_WRB_INDEX_MASK 0xFF /* bits 0 - 9 */ #define DB_DEF_PDU_WRB_INDEX_SHIFT 16 diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index 9014690fe841..ef36be003f67 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -161,7 +161,9 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba, struct beiscsi_conn *beiscsi_conn, unsigned int cid) { - if (phba->conn_table[cid]) { + uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); + + if (phba->conn_table[cri_index]) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : Connection table already occupied. Detected clash\n"); @@ -169,9 +171,9 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba, } else { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : phba->conn_table[%d]=%p(beiscsi_conn)\n", - cid, beiscsi_conn); + cri_index, beiscsi_conn); - phba->conn_table[cid] = beiscsi_conn; + phba->conn_table[cri_index] = beiscsi_conn; } return 0; } @@ -990,9 +992,27 @@ static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid) static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep) { struct beiscsi_hba *phba = beiscsi_ep->phba; + struct beiscsi_conn *beiscsi_conn; beiscsi_put_cid(phba, beiscsi_ep->ep_cid); beiscsi_ep->phba = NULL; + phba->ep_array[BE_GET_CRI_FROM_CID + (beiscsi_ep->ep_cid)] = NULL; + + /** + * Check if any connection resource allocated by driver + * is to be freed.This case occurs when target redirection + * or connection retry is done. + **/ + if (!beiscsi_ep->conn) + return; + + beiscsi_conn = beiscsi_ep->conn; + if (beiscsi_conn->login_in_progress) { + beiscsi_free_mgmt_task_handles(beiscsi_conn, + beiscsi_conn->task); + beiscsi_conn->login_in_progress = 0; + } } /** @@ -1009,7 +1029,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, { struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; struct beiscsi_hba *phba = beiscsi_ep->phba; - struct be_mcc_wrb *wrb; struct tcp_connect_and_offload_out *ptcpcnct_out; struct be_dma_mem nonemb_cmd; unsigned int tag; @@ -1029,15 +1048,8 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, "BS_%d : In beiscsi_open_conn, ep_cid=%d\n", beiscsi_ep->ep_cid); - phba->ep_array[beiscsi_ep->ep_cid - - phba->fw_config.iscsi_cid_start] = ep; - if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start + - phba->params.cxns_per_ctrl * 2)) { - - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, - "BS_%d : Failed in allocate iscsi cid\n"); - goto free_ep; - } + phba->ep_array[BE_GET_CRI_FROM_CID + (beiscsi_ep->ep_cid)] = ep; beiscsi_ep->cid_vld = 0; nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, @@ -1049,24 +1061,24 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, "BS_%d : Failed to allocate memory for" " mgmt_open_connection\n"); - beiscsi_put_cid(phba, beiscsi_ep->ep_cid); + beiscsi_free_ep(beiscsi_ep); return -ENOMEM; } nonemb_cmd.size = sizeof(struct tcp_connect_and_offload_in); memset(nonemb_cmd.va, 0, nonemb_cmd.size); tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd); - if (!tag) { + if (tag <= 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : mgmt_open_connection Failed for cid=%d\n", beiscsi_ep->ep_cid); - beiscsi_put_cid(phba, beiscsi_ep->ep_cid); pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); + beiscsi_free_ep(beiscsi_ep); return -EAGAIN; } - ret = beiscsi_mccq_compl(phba, tag, &wrb, NULL); + ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, @@ -1074,10 +1086,11 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); - goto free_ep; + beiscsi_free_ep(beiscsi_ep); + return -EBUSY; } - ptcpcnct_out = embedded_payload(wrb); + ptcpcnct_out = (struct tcp_connect_and_offload_out *)nonemb_cmd.va; beiscsi_ep = ep->dd_data; beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle; beiscsi_ep->cid_vld = 1; @@ -1087,10 +1100,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); return 0; - -free_ep: - beiscsi_free_ep(beiscsi_ep); - return -EBUSY; } /** @@ -1119,6 +1128,13 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, return ERR_PTR(ret); } + if (beiscsi_error(phba)) { + ret = -EIO; + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, + "BS_%d : The FW state Not Stable!!!\n"); + return ERR_PTR(ret); + } + if (phba->state != BE_ADAPTER_UP) { ret = -EBUSY; beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, @@ -1201,8 +1217,10 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag) static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba, unsigned int cid) { - if (phba->conn_table[cid]) - phba->conn_table[cid] = NULL; + uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); + + if (phba->conn_table[cri_index]) + phba->conn_table[cri_index] = NULL; else { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : Connection table Not occupied.\n"); diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h index 38eab7232159..31ddc8494398 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.h +++ b/drivers/scsi/be2iscsi/be_iscsi.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 4e2733d23003..d24a2867bc21 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -153,10 +153,14 @@ BEISCSI_RW_ATTR(log_enable, 0x00, DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL); DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL); +DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL); +DEVICE_ATTR(beiscsi_active_cid_count, S_IRUGO, beiscsi_active_cid_disp, NULL); struct device_attribute *beiscsi_attrs[] = { &dev_attr_beiscsi_log_enable, &dev_attr_beiscsi_drvr_ver, &dev_attr_beiscsi_adapter_family, + &dev_attr_beiscsi_fw_ver, + &dev_attr_beiscsi_active_cid_count, NULL, }; @@ -702,7 +706,7 @@ static void beiscsi_get_params(struct beiscsi_hba *phba) + BE2_TMFS + BE2_NOPOUT_REQ)); phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count; - phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2; + phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count; phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count; phba->params.num_sge_per_io = BE2_SGE; phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; @@ -1032,7 +1036,6 @@ static void hwi_ring_cq_db(struct beiscsi_hba *phba, static unsigned int beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn, struct beiscsi_hba *phba, - unsigned short cid, struct pdu_base *ppdu, unsigned long pdu_len, void *pbuffer, unsigned long buf_len) @@ -1144,9 +1147,10 @@ struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid) struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr; struct wrb_handle *pwrb_handle, *pwrb_handle_tmp; + uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); phwi_ctrlr = phba->phwi_ctrlr; - pwrb_context = &phwi_ctrlr->wrb_context[cid]; + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; if (pwrb_context->wrb_handles_available >= 2) { pwrb_handle = pwrb_context->pwrb_handle_base[ pwrb_context->alloc_index]; @@ -1322,8 +1326,9 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn, hdr->t2retain = 0; hdr->flags = csol_cqe->i_flags; hdr->response = csol_cqe->i_resp; - hdr->exp_cmdsn = csol_cqe->exp_cmdsn; - hdr->max_cmdsn = (csol_cqe->exp_cmdsn + csol_cqe->cmd_wnd - 1); + hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); + hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + + csol_cqe->cmd_wnd - 1); hdr->dlength[0] = 0; hdr->dlength[1] = 0; @@ -1346,9 +1351,9 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn, hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; hdr->flags = csol_cqe->i_flags; hdr->response = csol_cqe->i_resp; - hdr->exp_cmdsn = csol_cqe->exp_cmdsn; - hdr->max_cmdsn = (csol_cqe->exp_cmdsn + - csol_cqe->cmd_wnd - 1); + hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); + hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + + csol_cqe->cmd_wnd - 1); hdr->itt = io_task->libiscsi_itt; __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); @@ -1363,35 +1368,29 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, struct hwi_controller *phwi_ctrlr; struct iscsi_task *task; struct beiscsi_io_task *io_task; - struct iscsi_conn *conn = beiscsi_conn->conn; - struct iscsi_session *session = conn->session; - uint16_t wrb_index, cid; + uint16_t wrb_index, cid, cri_index; phwi_ctrlr = phba->phwi_ctrlr; - if (chip_skh_r(phba->pcidev)) { - wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, + if (is_chip_be2_be3r(phba)) { + wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, wrb_idx, psol); - cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, + cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, cid, psol); } else { - wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, + wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, wrb_idx, psol); - cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, + cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, cid, psol); } - pwrb_context = &phwi_ctrlr->wrb_context[ - cid - phba->fw_config.iscsi_cid_start]; + cri_index = BE_GET_CRI_FROM_CID(cid); + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index]; task = pwrb_handle->pio_handle; io_task = task->dd_data; - spin_lock_bh(&phba->mgmt_sgl_lock); - free_mgmt_sgl_handle(phba, io_task->psgl_handle); - spin_unlock_bh(&phba->mgmt_sgl_lock); - spin_lock_bh(&session->lock); - free_wrb_handle(phba, pwrb_context, pwrb_handle); - spin_unlock_bh(&session->lock); + memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb)); + iscsi_put_task(task); } static void @@ -1406,8 +1405,8 @@ be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn, hdr = (struct iscsi_nopin *)task->hdr; hdr->flags = csol_cqe->i_flags; hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); - hdr->max_cmdsn = be32_to_cpu(hdr->exp_cmdsn + - csol_cqe->cmd_wnd - 1); + hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + + csol_cqe->cmd_wnd - 1); hdr->opcode = ISCSI_OP_NOOP_IN; hdr->itt = io_task->libiscsi_itt; @@ -1418,7 +1417,26 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba, struct sol_cqe *psol, struct common_sol_cqe *csol_cqe) { - if (chip_skh_r(phba->pcidev)) { + if (is_chip_be2_be3r(phba)) { + csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe, + i_exp_cmd_sn, psol); + csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe, + i_res_cnt, psol); + csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, + i_cmd_wnd, psol); + csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe, + wrb_index, psol); + csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe, + cid, psol); + csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe, + hw_sts, psol); + csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe, + i_resp, psol); + csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe, + i_sts, psol); + csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe, + i_flags, psol); + } else { csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2, i_exp_cmd_sn, psol); csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2, @@ -1429,7 +1447,7 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba, cid, psol); csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, hw_sts, psol); - csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, + csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2, i_cmd_wnd, psol); if (AMAP_GET_BITS(struct amap_sol_cqe_v2, cmd_cmpl, psol)) @@ -1445,25 +1463,6 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba, if (AMAP_GET_BITS(struct amap_sol_cqe_v2, o, psol)) csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW; - } else { - csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe, - i_exp_cmd_sn, psol); - csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe, - i_res_cnt, psol); - csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, - i_cmd_wnd, psol); - csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe, - wrb_index, psol); - csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe, - cid, psol); - csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe, - hw_sts, psol); - csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe, - i_resp, psol); - csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe, - i_sts, psol); - csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe, - i_flags, psol); } } @@ -1480,14 +1479,15 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, struct iscsi_conn *conn = beiscsi_conn->conn; struct iscsi_session *session = conn->session; struct common_sol_cqe csol_cqe = {0}; + uint16_t cri_index = 0; phwi_ctrlr = phba->phwi_ctrlr; /* Copy the elements to a common structure */ adapter_get_sol_cqe(phba, psol, &csol_cqe); - pwrb_context = &phwi_ctrlr->wrb_context[ - csol_cqe.cid - phba->fw_config.iscsi_cid_start]; + cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid); + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; pwrb_handle = pwrb_context->pwrb_handle_basestd[ csol_cqe.wrb_index]; @@ -1561,15 +1561,15 @@ hwi_get_async_handle(struct beiscsi_hba *phba, unsigned char is_header = 0; unsigned int index, dpl; - if (chip_skh_r(phba->pcidev)) { - dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, + if (is_chip_be2_be3r(phba)) { + dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, dpl, pdpdu_cqe); - index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, + index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, index, pdpdu_cqe); } else { - dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, + dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, dpl, pdpdu_cqe); - index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, + index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, index, pdpdu_cqe); } @@ -1613,8 +1613,8 @@ hwi_get_async_handle(struct beiscsi_hba *phba, WARN_ON(!pasync_handle); - pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start; + pasync_handle->cri = + BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid); pasync_handle->is_header = is_header; pasync_handle->buffer_len = dpl; *pcq_index = index; @@ -1856,8 +1856,6 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn, } status = beiscsi_process_async_pdu(beiscsi_conn, phba, - (beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start), phdr, hdr_len, pfirst_buffer, offset); @@ -2011,6 +2009,7 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) unsigned int num_processed = 0; unsigned int tot_nump = 0; unsigned short code = 0, cid = 0; + uint16_t cri_index = 0; struct beiscsi_conn *beiscsi_conn; struct beiscsi_endpoint *beiscsi_ep; struct iscsi_endpoint *ep; @@ -2028,7 +2027,9 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) 32] & CQE_CODE_MASK); /* Get the CID */ - if (chip_skh_r(phba->pcidev)) { + if (is_chip_be2_be3r(phba)) { + cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol); + } else { if ((code == DRIVERMSG_NOTIFY) || (code == UNSOL_HDR_NOTIFY) || (code == UNSOL_DATA_NOTIFY)) @@ -2038,10 +2039,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) else cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, cid, sol); - } else - cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol); + } - ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start]; + cri_index = BE_GET_CRI_FROM_CID(cid); + ep = phba->ep_array[cri_index]; beiscsi_ep = ep->dd_data; beiscsi_conn = beiscsi_ep->conn; @@ -2191,7 +2192,7 @@ void beiscsi_process_all_cqs(struct work_struct *work) static int be_iopoll(struct blk_iopoll *iop, int budget) { - static unsigned int ret; + unsigned int ret; struct beiscsi_hba *phba; struct be_eq_obj *pbe_eq; @@ -2416,11 +2417,11 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) /* Check for the data_count */ dsp_value = (task->data_count) ? 1 : 0; - if (chip_skh_r(phba->pcidev)) - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, + if (is_chip_be2_be3r(phba)) + AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, dsp_value); else - AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, dsp_value); /* Map addr only if there is data_count */ @@ -2538,8 +2539,9 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba) static int beiscsi_alloc_mem(struct beiscsi_hba *phba) { - struct be_mem_descriptor *mem_descr; dma_addr_t bus_add; + struct hwi_controller *phwi_ctrlr; + struct be_mem_descriptor *mem_descr; struct mem_array *mem_arr, *mem_arr_orig; unsigned int i, j, alloc_size, curr_alloc_size; @@ -2547,9 +2549,18 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba) if (!phba->phwi_ctrlr) return -ENOMEM; + /* Allocate memory for wrb_context */ + phwi_ctrlr = phba->phwi_ctrlr; + phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) * + phba->params.cxns_per_ctrl, + GFP_KERNEL); + if (!phwi_ctrlr->wrb_context) + return -ENOMEM; + phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr), GFP_KERNEL); if (!phba->init_mem) { + kfree(phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); return -ENOMEM; } @@ -2558,6 +2569,7 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba) GFP_KERNEL); if (!mem_arr_orig) { kfree(phba->init_mem); + kfree(phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); return -ENOMEM; } @@ -2628,6 +2640,7 @@ free_mem: } kfree(mem_arr_orig); kfree(phba->init_mem); + kfree(phba->phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); return -ENOMEM; } @@ -2666,6 +2679,7 @@ static void iscsi_init_global_templates(struct beiscsi_hba *phba) static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) { struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb; + struct hwi_context_memory *phwi_ctxt; struct wrb_handle *pwrb_handle = NULL; struct hwi_controller *phwi_ctrlr; struct hwi_wrb_context *pwrb_context; @@ -2680,7 +2694,18 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) mem_descr_wrb += HWI_MEM_WRB; phwi_ctrlr = phba->phwi_ctrlr; - for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { + /* Allocate memory for WRBQ */ + phwi_ctxt = phwi_ctrlr->phwi_ctxt; + phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) * + phba->fw_config.iscsi_cid_count, + GFP_KERNEL); + if (!phwi_ctxt->be_wrbq) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : WRBQ Mem Alloc Failed\n"); + return -ENOMEM; + } + + for (index = 0; index < phba->params.cxns_per_ctrl; index++) { pwrb_context = &phwi_ctrlr->wrb_context[index]; pwrb_context->pwrb_handle_base = kzalloc(sizeof(struct wrb_handle *) * @@ -2723,7 +2748,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) } } idx = 0; - for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { + for (index = 0; index < phba->params.cxns_per_ctrl; index++) { pwrb_context = &phwi_ctrlr->wrb_context[index]; if (!num_cxn_wrb) { pwrb = mem_descr_wrb->mem_array[idx].virtual_address; @@ -2752,7 +2777,7 @@ init_wrb_hndl_failed: return -ENOMEM; } -static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) +static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) { struct hwi_controller *phwi_ctrlr; struct hba_parameters *p = &phba->params; @@ -2770,6 +2795,15 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx; memset(pasync_ctx, 0, sizeof(*pasync_ctx)); + pasync_ctx->async_entry = kzalloc(sizeof(struct hwi_async_entry) * + phba->fw_config.iscsi_cid_count, + GFP_KERNEL); + if (!pasync_ctx->async_entry) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx Mem Alloc Failed\n"); + return -ENOMEM; + } + pasync_ctx->num_entries = p->asyncpdus_per_ctrl; pasync_ctx->buffer_size = p->defpdu_hdr_sz; @@ -2934,6 +2968,8 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) pasync_ctx->async_header.ep_read_ptr = -1; pasync_ctx->async_data.host_write_ptr = 0; pasync_ctx->async_data.ep_read_ptr = -1; + + return 0; } static int @@ -3293,6 +3329,7 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba, void *wrb_vaddr; struct be_dma_mem sgl; struct be_mem_descriptor *mem_descr; + struct hwi_wrb_context *pwrb_context; int status; idx = 0; @@ -3351,8 +3388,9 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba, kfree(pwrb_arr); return status; } - phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i]. - id; + pwrb_context = &phwi_ctrlr->wrb_context[i]; + pwrb_context->cid = phwi_context->be_wrbq[i].id; + BE_SET_CID_TO_CRI(i, pwrb_context->cid); } kfree(pwrb_arr); return 0; @@ -3365,7 +3403,7 @@ static void free_wrb_handles(struct beiscsi_hba *phba) struct hwi_wrb_context *pwrb_context; phwi_ctrlr = phba->phwi_ctrlr; - for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { + for (index = 0; index < phba->params.cxns_per_ctrl; index++) { pwrb_context = &phwi_ctrlr->wrb_context[index]; kfree(pwrb_context->pwrb_handle_base); kfree(pwrb_context->pwrb_handle_basestd); @@ -3394,6 +3432,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba) struct be_ctrl_info *ctrl = &phba->ctrl; struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; + struct hwi_async_pdu_context *pasync_ctx; int i, eq_num; phwi_ctrlr = phba->phwi_ctrlr; @@ -3403,6 +3442,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba) if (q->created) beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); } + kfree(phwi_context->be_wrbq); free_wrb_handles(phba); q = &phwi_context->be_def_hdrq; @@ -3430,6 +3470,10 @@ static void hwi_cleanup(struct beiscsi_hba *phba) beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); } be_mcc_queues_destroy(phba); + + pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx; + kfree(pasync_ctx->async_entry); + be_cmd_fw_uninit(ctrl); } static int be_mcc_queues_create(struct beiscsi_hba *phba, @@ -3607,7 +3651,12 @@ static int hwi_init_controller(struct beiscsi_hba *phba) if (beiscsi_init_wrb_handle(phba)) return -ENOMEM; - hwi_init_async_pdu_ctx(phba); + if (hwi_init_async_pdu_ctx(phba)) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx failed\n"); + return -ENOMEM; + } + if (hwi_init_port(phba) != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : hwi_init_controller failed\n"); @@ -3637,6 +3686,7 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba) mem_descr++; } kfree(phba->init_mem); + kfree(phba->phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); } @@ -3769,7 +3819,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) static int hba_setup_cid_tbls(struct beiscsi_hba *phba) { - int i, new_cid; + int i; phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl, GFP_KERNEL); @@ -3780,19 +3830,33 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba) return -ENOMEM; } phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) * - phba->params.cxns_per_ctrl * 2, GFP_KERNEL); + phba->params.cxns_per_ctrl, GFP_KERNEL); if (!phba->ep_array) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : Failed to allocate memory in " "hba_setup_cid_tbls\n"); kfree(phba->cid_array); + phba->cid_array = NULL; return -ENOMEM; } - new_cid = phba->fw_config.iscsi_cid_start; - for (i = 0; i < phba->params.cxns_per_ctrl; i++) { - phba->cid_array[i] = new_cid; - new_cid += 2; + + phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) * + phba->params.cxns_per_ctrl, GFP_KERNEL); + if (!phba->conn_table) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Failed to allocate memory in" + "hba_setup_cid_tbls\n"); + + kfree(phba->cid_array); + kfree(phba->ep_array); + phba->cid_array = NULL; + phba->ep_array = NULL; + return -ENOMEM; } + + for (i = 0; i < phba->params.cxns_per_ctrl; i++) + phba->cid_array[i] = phba->phwi_ctrlr->wrb_context[i].cid; + phba->avlbl_cids = phba->params.cxns_per_ctrl; return 0; } @@ -4062,6 +4126,53 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba) kfree(phba->eh_sgl_hndl_base); kfree(phba->cid_array); kfree(phba->ep_array); + kfree(phba->conn_table); +} + +/** + * beiscsi_free_mgmt_task_handles()- Free driver CXN resources + * @beiscsi_conn: ptr to the conn to be cleaned up + * @task: ptr to iscsi_task resource to be freed. + * + * Free driver mgmt resources binded to CXN. + **/ +void +beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, + struct iscsi_task *task) +{ + struct beiscsi_io_task *io_task; + struct beiscsi_hba *phba = beiscsi_conn->phba; + struct hwi_wrb_context *pwrb_context; + struct hwi_controller *phwi_ctrlr; + uint16_t cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); + + phwi_ctrlr = phba->phwi_ctrlr; + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; + + io_task = task->dd_data; + + if (io_task->pwrb_handle) { + memset(io_task->pwrb_handle->pwrb, 0, + sizeof(struct iscsi_wrb)); + free_wrb_handle(phba, pwrb_context, + io_task->pwrb_handle); + io_task->pwrb_handle = NULL; + } + + if (io_task->psgl_handle) { + spin_lock_bh(&phba->mgmt_sgl_lock); + free_mgmt_sgl_handle(phba, + io_task->psgl_handle); + io_task->psgl_handle = NULL; + spin_unlock_bh(&phba->mgmt_sgl_lock); + } + + if (io_task->mtask_addr) + pci_unmap_single(phba->pcidev, + io_task->mtask_addr, + io_task->mtask_data_count, + PCI_DMA_TODEVICE); } /** @@ -4078,10 +4189,11 @@ static void beiscsi_cleanup_task(struct iscsi_task *task) struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr; + uint16_t cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); phwi_ctrlr = phba->phwi_ctrlr; - pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start]; + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; if (io_task->cmd_bhs) { pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, @@ -4103,27 +4215,8 @@ static void beiscsi_cleanup_task(struct iscsi_task *task) io_task->psgl_handle = NULL; } } else { - if (!beiscsi_conn->login_in_progress) { - if (io_task->pwrb_handle) { - free_wrb_handle(phba, pwrb_context, - io_task->pwrb_handle); - io_task->pwrb_handle = NULL; - } - if (io_task->psgl_handle) { - spin_lock(&phba->mgmt_sgl_lock); - free_mgmt_sgl_handle(phba, - io_task->psgl_handle); - spin_unlock(&phba->mgmt_sgl_lock); - io_task->psgl_handle = NULL; - } - if (io_task->mtask_addr) { - pci_unmap_single(phba->pcidev, - io_task->mtask_addr, - io_task->mtask_data_count, - PCI_DMA_TODEVICE); - io_task->mtask_addr = 0; - } - } + if (!beiscsi_conn->login_in_progress) + beiscsi_free_mgmt_task_handles(beiscsi_conn, task); } } @@ -4146,15 +4239,14 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, beiscsi_cleanup_task(task); spin_unlock_bh(&session->lock); - pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start)); + pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid); /* Check for the adapter family */ - if (chip_skh_r(phba->pcidev)) - beiscsi_offload_cxn_v2(params, pwrb_handle); - else + if (is_chip_be2_be3r(phba)) beiscsi_offload_cxn_v0(params, pwrb_handle, phba->init_mem); + else + beiscsi_offload_cxn_v2(params, pwrb_handle); be_dws_le_to_cpu(pwrb_handle->pwrb, sizeof(struct iscsi_target_context_update_wrb)); @@ -4194,6 +4286,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr; itt_t itt; + uint16_t cri_index = 0; struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; dma_addr_t paddr; @@ -4223,8 +4316,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) goto free_hndls; } io_task->pwrb_handle = alloc_wrb_handle(phba, - beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start); + beiscsi_conn->beiscsi_conn_cid); if (!io_task->pwrb_handle) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, @@ -4236,6 +4328,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) } else { io_task->scsi_cmnd = NULL; if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { + beiscsi_conn->task = task; if (!beiscsi_conn->login_in_progress) { spin_lock(&phba->mgmt_sgl_lock); io_task->psgl_handle = (struct sgl_handle *) @@ -4257,8 +4350,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) io_task->psgl_handle; io_task->pwrb_handle = alloc_wrb_handle(phba, - beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start); + beiscsi_conn->beiscsi_conn_cid); if (!io_task->pwrb_handle) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | @@ -4278,7 +4370,6 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) io_task->pwrb_handle = beiscsi_conn->plogin_wrb_handle; } - beiscsi_conn->task = task; } else { spin_lock(&phba->mgmt_sgl_lock); io_task->psgl_handle = alloc_mgmt_sgl_handle(phba); @@ -4295,8 +4386,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) } io_task->pwrb_handle = alloc_wrb_handle(phba, - beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start); + beiscsi_conn->beiscsi_conn_cid); if (!io_task->pwrb_handle) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, @@ -4324,12 +4414,13 @@ free_io_hndls: free_mgmt_hndls: spin_lock(&phba->mgmt_sgl_lock); free_mgmt_sgl_handle(phba, io_task->psgl_handle); + io_task->psgl_handle = NULL; spin_unlock(&phba->mgmt_sgl_lock); free_hndls: phwi_ctrlr = phba->phwi_ctrlr; - pwrb_context = &phwi_ctrlr->wrb_context[ - beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start]; + cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; if (io_task->pwrb_handle) free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); io_task->pwrb_handle = NULL; @@ -4351,7 +4442,6 @@ int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, unsigned int doorbell = 0; pwrb = io_task->pwrb_handle->pwrb; - memset(pwrb, 0, sizeof(*pwrb)); io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0; io_task->bhs_len = sizeof(struct be_cmd_bhs); @@ -4465,19 +4555,7 @@ static int beiscsi_mtask(struct iscsi_task *task) pwrb = io_task->pwrb_handle->pwrb; memset(pwrb, 0, sizeof(*pwrb)); - if (chip_skh_r(phba->pcidev)) { - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, - be32_to_cpu(task->cmdsn)); - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, - io_task->pwrb_handle->wrb_index); - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, - io_task->psgl_handle->sgl_index); - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, - task->data_count); - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, - io_task->pwrb_handle->nxt_wrb_index); - pwrb_typeoffset = SKH_WRB_TYPE_OFFSET; - } else { + if (is_chip_be2_be3r(phba)) { AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, be32_to_cpu(task->cmdsn)); AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, @@ -4489,6 +4567,18 @@ static int beiscsi_mtask(struct iscsi_task *task) AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, io_task->pwrb_handle->nxt_wrb_index); pwrb_typeoffset = BE_WRB_TYPE_OFFSET; + } else { + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, + be32_to_cpu(task->cmdsn)); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, + io_task->pwrb_handle->wrb_index); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, + io_task->psgl_handle->sgl_index); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, + task->data_count); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, + io_task->pwrb_handle->nxt_wrb_index); + pwrb_typeoffset = SKH_WRB_TYPE_OFFSET; } @@ -4501,19 +4591,19 @@ static int beiscsi_mtask(struct iscsi_task *task) case ISCSI_OP_NOOP_OUT: if (task->hdr->ttt != ISCSI_RESERVED_TAG) { ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); - if (chip_skh_r(phba->pcidev)) - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, + if (is_chip_be2_be3r(phba)) + AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1); else - AMAP_SET_BITS(struct amap_iscsi_wrb, + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dmsg, pwrb, 1); } else { ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset); - if (chip_skh_r(phba->pcidev)) - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, + if (is_chip_be2_be3r(phba)) + AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); else - AMAP_SET_BITS(struct amap_iscsi_wrb, + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dmsg, pwrb, 0); } hwi_write_buffer(pwrb, task); @@ -4540,9 +4630,9 @@ static int beiscsi_mtask(struct iscsi_task *task) } /* Set the task type */ - io_task->wrb_type = (chip_skh_r(phba->pcidev)) ? - AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb) : - AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb); + io_task->wrb_type = (is_chip_be2_be3r(phba)) ? + AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) : + AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb); doorbell |= cid & DB_WRB_POST_CID_MASK; doorbell |= (io_task->pwrb_handle->wrb_index & @@ -4834,6 +4924,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, case OC_SKH_ID1: phba->generation = BE_GEN4; phba->iotask_fn = beiscsi_iotask_v2; + break; default: phba->generation = 0; } diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 5946577d79d6..2c06ef3c02ac 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -36,7 +36,7 @@ #include "be.h" #define DRV_NAME "be2iscsi" -#define BUILD_STR "10.0.272.0" +#define BUILD_STR "10.0.467.0" #define BE_NAME "Emulex OneConnect" \ "Open-iSCSI Driver version" BUILD_STR #define DRV_DESC BE_NAME " " "Driver" @@ -66,8 +66,9 @@ #define MAX_CPUS 64 #define BEISCSI_MAX_NUM_CPUS 7 -#define OC_SKH_MAX_NUM_CPUS 63 +#define OC_SKH_MAX_NUM_CPUS 31 +#define BEISCSI_VER_STRLEN 32 #define BEISCSI_SGLIST_ELEMENTS 30 @@ -265,7 +266,9 @@ struct invalidate_command_table { unsigned short cid; } __packed; -#define chip_skh_r(pdev) (pdev->device == OC_SKH_ID1) +#define chip_be2(phba) (phba->generation == BE_GEN2) +#define chip_be3_r(phba) (phba->generation == BE_GEN3) +#define is_chip_be2_be3r(phba) (chip_be3_r(phba) || (chip_be2(phba))) struct beiscsi_hba { struct hba_parameters params; struct hwi_controller *phwi_ctrlr; @@ -304,10 +307,15 @@ struct beiscsi_hba { unsigned short avlbl_cids; unsigned short cid_alloc; unsigned short cid_free; - struct beiscsi_conn *conn_table[BE2_MAX_SESSIONS * 2]; struct list_head hba_queue; +#define BE_MAX_SESSION 2048 +#define BE_SET_CID_TO_CRI(cri_index, cid) \ + (phba->cid_to_cri_map[cid] = cri_index) +#define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid]) + unsigned short cid_to_cri_map[BE_MAX_SESSION]; unsigned short *cid_array; struct iscsi_endpoint **ep_array; + struct beiscsi_conn **conn_table; struct iscsi_boot_kset *boot_kset; struct Scsi_Host *shost; struct iscsi_iface *ipv4_iface; @@ -339,6 +347,7 @@ struct beiscsi_hba { struct delayed_work beiscsi_hw_check_task; u8 mac_address[ETH_ALEN]; + char fw_ver_str[BEISCSI_VER_STRLEN]; char wq_name[20]; struct workqueue_struct *wq; /* The actuak work queue */ struct be_ctrl_info ctrl; @@ -563,7 +572,7 @@ struct hwi_async_pdu_context { * This is a varying size list! Do not add anything * after this entry!! */ - struct hwi_async_entry async_entry[BE2_MAX_SESSIONS * 2]; + struct hwi_async_entry *async_entry; }; #define PDUCQE_CODE_MASK 0x0000003F @@ -749,6 +758,8 @@ void free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle); void beiscsi_process_all_cqs(struct work_struct *work); +void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, + struct iscsi_task *task); static inline bool beiscsi_error(struct beiscsi_hba *phba) { @@ -933,7 +944,7 @@ struct hwi_controller { struct sgl_handle *psgl_handle_base; unsigned int wrb_mem_index; - struct hwi_wrb_context wrb_context[BE2_MAX_SESSIONS * 2]; + struct hwi_wrb_context *wrb_context; struct mcc_wrb *pmcc_wrb_base; struct be_ring default_pdu_hdr; struct be_ring default_pdu_data; @@ -970,9 +981,7 @@ struct hwi_context_memory { struct be_queue_info be_def_hdrq; struct be_queue_info be_def_dataq; - struct be_queue_info be_wrbq[BE2_MAX_SESSIONS]; - struct be_mcc_wrb_context *pbe_mcc_context; - + struct be_queue_info *be_wrbq; struct hwi_async_pdu_context *pasync_ctx; }; diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 55cc9902263d..245a9595a93a 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -368,6 +368,8 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl, beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : phba->fw_config.iscsi_features = %d\n", phba->fw_config.iscsi_features); + memcpy(phba->fw_ver_str, resp->params.hba_attribs. + firmware_version_string, BEISCSI_VER_STRLEN); } else beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BG_%d : Failed in mgmt_check_supported_fw\n"); @@ -1260,6 +1262,45 @@ beiscsi_drvr_ver_disp(struct device *dev, struct device_attribute *attr, } /** + * beiscsi_fw_ver_disp()- Display Firmware Version + * @dev: ptr to device not used. + * @attr: device attribute, not used. + * @buf: contains formatted text Firmware version + * + * return + * size of the formatted string + **/ +ssize_t +beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct beiscsi_hba *phba = iscsi_host_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%s\n", phba->fw_ver_str); +} + +/** + * beiscsi_active_cid_disp()- Display Sessions Active + * @dev: ptr to device not used. + * @attr: device attribute, not used. + * @buf: contains formatted text Session Count + * + * return + * size of the formatted string + **/ +ssize_t +beiscsi_active_cid_disp(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct beiscsi_hba *phba = iscsi_host_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", + (phba->params.cxns_per_ctrl - phba->avlbl_cids)); +} + +/** * beiscsi_adap_family_disp()- Display adapter family. * @dev: ptr to device to get priv structure * @attr: device attribute, not used. diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h index 2e4968add799..04af7e74fe48 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.h +++ b/drivers/scsi/be2iscsi/be_mgmt.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -156,25 +156,25 @@ union invalidate_commands_params { } __packed; struct mgmt_hba_attributes { - u8 flashrom_version_string[32]; - u8 manufacturer_name[32]; + u8 flashrom_version_string[BEISCSI_VER_STRLEN]; + u8 manufacturer_name[BEISCSI_VER_STRLEN]; u32 supported_modes; u8 seeprom_version_lo; u8 seeprom_version_hi; u8 rsvd0[2]; u32 fw_cmd_data_struct_version; u32 ep_fw_data_struct_version; - u32 future_reserved[12]; + u8 ncsi_version_string[12]; u32 default_extended_timeout; - u8 controller_model_number[32]; + u8 controller_model_number[BEISCSI_VER_STRLEN]; u8 controller_description[64]; - u8 controller_serial_number[32]; - u8 ip_version_string[32]; - u8 firmware_version_string[32]; - u8 bios_version_string[32]; - u8 redboot_version_string[32]; - u8 driver_version_string[32]; - u8 fw_on_flash_version_string[32]; + u8 controller_serial_number[BEISCSI_VER_STRLEN]; + u8 ip_version_string[BEISCSI_VER_STRLEN]; + u8 firmware_version_string[BEISCSI_VER_STRLEN]; + u8 bios_version_string[BEISCSI_VER_STRLEN]; + u8 redboot_version_string[BEISCSI_VER_STRLEN]; + u8 driver_version_string[BEISCSI_VER_STRLEN]; + u8 fw_on_flash_version_string[BEISCSI_VER_STRLEN]; u32 functionalities_supported; u16 max_cdblength; u8 asic_revision; @@ -190,7 +190,8 @@ struct mgmt_hba_attributes { u32 firmware_post_status; u32 hba_mtu[8]; u8 iscsi_features; - u8 future_u8[3]; + u8 asic_generation; + u8 future_u8[2]; u32 future_u32[3]; } __packed; @@ -207,7 +208,7 @@ struct mgmt_controller_attributes { u64 unique_identifier; u8 netfilters; u8 rsvd0[3]; - u8 future_u32[4]; + u32 future_u32[4]; } __packed; struct be_mgmt_controller_attributes { @@ -311,6 +312,12 @@ int mgmt_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag); ssize_t beiscsi_drvr_ver_disp(struct device *dev, struct device_attribute *attr, char *buf); +ssize_t beiscsi_fw_ver_disp(struct device *dev, + struct device_attribute *attr, char *buf); + +ssize_t beiscsi_active_cid_disp(struct device *dev, + struct device_attribute *attr, char *buf); + ssize_t beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr, char *buf); diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index 11596b2c4702..08b22a901c25 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -2,7 +2,7 @@ #define _BNX2FC_H_ /* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -64,10 +64,12 @@ #include "bnx2fc_constants.h" #define BNX2FC_NAME "bnx2fc" -#define BNX2FC_VERSION "1.0.13" +#define BNX2FC_VERSION "1.0.14" #define PFX "bnx2fc: " +#define BCM_CHIP_LEN 16 + #define BNX2X_DOORBELL_PCI_BAR 2 #define BNX2FC_MAX_BD_LEN 0xffff @@ -241,6 +243,8 @@ struct bnx2fc_hba { int wait_for_link_down; int num_ofld_sess; struct list_head vports; + + char chip_num[BCM_CHIP_LEN]; }; struct bnx2fc_interface { diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c index bdbbb13b8534..b1c9a4f8caee 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_els.c +++ b/drivers/scsi/bnx2fc/bnx2fc_els.c @@ -3,7 +3,7 @@ * This file contains helper routines that handle ELS requests * and responses. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 7dffec1e5715..69ac55495c1d 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -3,7 +3,7 @@ * cnic modules to create FCoE instances, send/receive non-offloaded * FIP/FCoE packets, listen to link events etc. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); #define DRV_MODULE_NAME "bnx2fc" #define DRV_MODULE_VERSION BNX2FC_VERSION -#define DRV_MODULE_RELDATE "Dec 21, 2012" +#define DRV_MODULE_RELDATE "Mar 08, 2013" static char version[] = @@ -679,6 +679,7 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) { struct fcoe_port *port = lport_priv(lport); struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; struct Scsi_Host *shost = lport->host; int rc = 0; @@ -699,8 +700,9 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) } if (!lport->vport) fc_host_max_npiv_vports(lport->host) = USHRT_MAX; - sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s", - BNX2FC_NAME, BNX2FC_VERSION, + snprintf(fc_host_symbolic_name(lport->host), 256, + "%s (Broadcom %s) v%s over %s", + BNX2FC_NAME, hba->chip_num, BNX2FC_VERSION, interface->netdev->name); return 0; @@ -1656,23 +1658,60 @@ mem_err: static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba) { struct cnic_dev *cnic; + struct pci_dev *pdev; if (!hba->cnic) { printk(KERN_ERR PFX "cnic is NULL\n"); return -ENODEV; } cnic = hba->cnic; - hba->pcidev = cnic->pcidev; - if (hba->pcidev) - pci_dev_get(hba->pcidev); + pdev = hba->pcidev = cnic->pcidev; + if (!hba->pcidev) + return -ENODEV; + switch (pdev->device) { + case PCI_DEVICE_ID_NX2_57710: + strncpy(hba->chip_num, "BCM57710", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57711: + strncpy(hba->chip_num, "BCM57711", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57712: + case PCI_DEVICE_ID_NX2_57712_MF: + case PCI_DEVICE_ID_NX2_57712_VF: + strncpy(hba->chip_num, "BCM57712", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57800: + case PCI_DEVICE_ID_NX2_57800_MF: + case PCI_DEVICE_ID_NX2_57800_VF: + strncpy(hba->chip_num, "BCM57800", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57810: + case PCI_DEVICE_ID_NX2_57810_MF: + case PCI_DEVICE_ID_NX2_57810_VF: + strncpy(hba->chip_num, "BCM57810", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57840: + case PCI_DEVICE_ID_NX2_57840_MF: + case PCI_DEVICE_ID_NX2_57840_VF: + case PCI_DEVICE_ID_NX2_57840_2_20: + case PCI_DEVICE_ID_NX2_57840_4_10: + strncpy(hba->chip_num, "BCM57840", BCM_CHIP_LEN); + break; + default: + pr_err(PFX "Unknown device id 0x%x\n", pdev->device); + break; + } + pci_dev_get(hba->pcidev); return 0; } static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba) { - if (hba->pcidev) + if (hba->pcidev) { + hba->chip_num[0] = '\0'; pci_dev_put(hba->pcidev); + } hba->pcidev = NULL; } diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index 50510ffe1bf5..c0d035a8f8f9 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -2,7 +2,7 @@ * This file contains the code that low level functions that interact * with 57712 FCoE firmware. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -126,7 +126,11 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) fcoe_init3.error_bit_map_lo = 0xffffffff; fcoe_init3.error_bit_map_hi = 0xffffffff; - fcoe_init3.perf_config = 1; + /* + * enable both cached connection and cached tasks + * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both + */ + fcoe_init3.perf_config = 3; kwqe_arr[0] = (struct kwqe *) &fcoe_init1; kwqe_arr[1] = (struct kwqe *) &fcoe_init2; diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 723a9a8ba5ee..575142e92d9c 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -1,7 +1,7 @@ /* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver. * IO manager and SCSI IO processing. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -1270,8 +1270,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) spin_lock_bh(&tgt->tgt_lock); io_req->wait_for_comp = 0; - if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, - &io_req->req_flags))) { + if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "IO completed in a different context\n"); + rc = SUCCESS; + } else if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, + &io_req->req_flags))) { /* Let the scsi-ml try to recover this command */ printk(KERN_ERR PFX "abort failed, xid = 0x%x\n", io_req->xid); diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index c57a3bb8a9fb..4d93177dfb53 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c @@ -2,7 +2,7 @@ * Handles operations such as session offload/upload etc, and manages * session resources such as connection id and qp resources. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/csiostor/csio_lnode.h b/drivers/scsi/csiostor/csio_lnode.h index 0f9c04175b11..372a67d122d3 100644 --- a/drivers/scsi/csiostor/csio_lnode.h +++ b/drivers/scsi/csiostor/csio_lnode.h @@ -114,7 +114,7 @@ struct csio_lnode_stats { uint32_t n_rnode_match; /* matched rnode */ uint32_t n_dev_loss_tmo; /* Device loss timeout */ uint32_t n_fdmi_err; /* fdmi err */ - uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO]; /* fw events */ + uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO + 1]; /* fw events */ enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */ uint32_t n_rnode_alloc; /* rnode allocated */ uint32_t n_rnode_free; /* rnode freed */ diff --git a/drivers/scsi/csiostor/csio_rnode.h b/drivers/scsi/csiostor/csio_rnode.h index 65940096a80d..433434221222 100644 --- a/drivers/scsi/csiostor/csio_rnode.h +++ b/drivers/scsi/csiostor/csio_rnode.h @@ -63,7 +63,7 @@ struct csio_rnode_stats { uint32_t n_err_nomem; /* error nomem */ uint32_t n_evt_unexp; /* unexpected event */ uint32_t n_evt_drop; /* unexpected event */ - uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO]; /* fw events */ + uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO + 1]; /* fw events */ enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */ uint32_t n_lun_rst; /* Number of resets of * of LUNs under this diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index 98436c363035..b6d1f92ed33c 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -38,7 +38,7 @@ #define DRV_NAME "fnic" #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" -#define DRV_VERSION "1.5.0.2" +#define DRV_VERSION "1.5.0.22" #define PFX DRV_NAME ": " #define DFX DRV_NAME "%d: " @@ -192,6 +192,18 @@ enum fnic_state { struct mempool; +enum fnic_evt { + FNIC_EVT_START_VLAN_DISC = 1, + FNIC_EVT_START_FCF_DISC = 2, + FNIC_EVT_MAX, +}; + +struct fnic_event { + struct list_head list; + struct fnic *fnic; + enum fnic_evt event; +}; + /* Per-instance private data structure */ struct fnic { struct fc_lport *lport; @@ -254,6 +266,18 @@ struct fnic { struct sk_buff_head frame_queue; struct sk_buff_head tx_queue; + /*** FIP related data members -- start ***/ + void (*set_vlan)(struct fnic *, u16 vlan); + struct work_struct fip_frame_work; + struct sk_buff_head fip_frame_queue; + struct timer_list fip_timer; + struct list_head vlans; + spinlock_t vlans_lock; + + struct work_struct event_work; + struct list_head evlist; + /*** FIP related data members -- end ***/ + /* copy work queue cache line section */ ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX]; /* completion queue cache line section */ @@ -278,6 +302,7 @@ static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip) } extern struct workqueue_struct *fnic_event_queue; +extern struct workqueue_struct *fnic_fip_queue; extern struct device_attribute *fnic_attrs[]; void fnic_clear_intr_mode(struct fnic *fnic); @@ -289,6 +314,7 @@ int fnic_send(struct fc_lport *, struct fc_frame *); void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf); void fnic_handle_frame(struct work_struct *work); void fnic_handle_link(struct work_struct *work); +void fnic_handle_event(struct work_struct *work); int fnic_rq_cmpl_handler(struct fnic *fnic, int); int fnic_alloc_rq_frame(struct vnic_rq *rq); void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); @@ -321,6 +347,12 @@ void fnic_handle_link_event(struct fnic *fnic); int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *); +void fnic_handle_fip_frame(struct work_struct *work); +void fnic_handle_fip_event(struct fnic *fnic); +void fnic_fcoe_reset_vlans(struct fnic *fnic); +void fnic_fcoe_evlist_free(struct fnic *fnic); +extern void fnic_handle_fip_timer(struct fnic *fnic); + static inline int fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags) { diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index 483eb9dbe663..006fa92a02df 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c @@ -31,12 +31,20 @@ #include <scsi/libfc.h> #include "fnic_io.h" #include "fnic.h" +#include "fnic_fip.h" #include "cq_enet_desc.h" #include "cq_exch_desc.h" +static u8 fcoe_all_fcfs[ETH_ALEN]; +struct workqueue_struct *fnic_fip_queue; struct workqueue_struct *fnic_event_queue; static void fnic_set_eth_mode(struct fnic *); +static void fnic_fcoe_send_vlan_req(struct fnic *fnic); +static void fnic_fcoe_start_fcf_disc(struct fnic *fnic); +static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *); +static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag); +static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb); void fnic_handle_link(struct work_struct *work) { @@ -69,6 +77,11 @@ void fnic_handle_link(struct work_struct *work) FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); fcoe_ctlr_link_down(&fnic->ctlr); + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + /* start FCoE VLAN discovery */ + fnic_fcoe_send_vlan_req(fnic); + return; + } FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); fcoe_ctlr_link_up(&fnic->ctlr); @@ -79,6 +92,11 @@ void fnic_handle_link(struct work_struct *work) } else if (fnic->link_status) { /* DOWN -> UP */ spin_unlock_irqrestore(&fnic->fnic_lock, flags); + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + /* start FCoE VLAN discovery */ + fnic_fcoe_send_vlan_req(fnic); + return; + } FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); fcoe_ctlr_link_up(&fnic->ctlr); } else { @@ -128,6 +146,441 @@ void fnic_handle_frame(struct work_struct *work) } } +void fnic_fcoe_evlist_free(struct fnic *fnic) +{ + struct fnic_event *fevt = NULL; + struct fnic_event *next = NULL; + unsigned long flags; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (list_empty(&fnic->evlist)) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { + list_del(&fevt->list); + kfree(fevt); + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} + +void fnic_handle_event(struct work_struct *work) +{ + struct fnic *fnic = container_of(work, struct fnic, event_work); + struct fnic_event *fevt = NULL; + struct fnic_event *next = NULL; + unsigned long flags; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (list_empty(&fnic->evlist)) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { + if (fnic->stop_rx_link_events) { + list_del(&fevt->list); + kfree(fevt); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + /* + * If we're in a transitional state, just re-queue and return. + * The queue will be serviced when we get to a stable state. + */ + if (fnic->state != FNIC_IN_FC_MODE && + fnic->state != FNIC_IN_ETH_MODE) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + list_del(&fevt->list); + switch (fevt->event) { + case FNIC_EVT_START_VLAN_DISC: + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + fnic_fcoe_send_vlan_req(fnic); + spin_lock_irqsave(&fnic->fnic_lock, flags); + break; + case FNIC_EVT_START_FCF_DISC: + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Start FCF Discovery\n"); + fnic_fcoe_start_fcf_disc(fnic); + break; + default: + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Unknown event 0x%x\n", fevt->event); + break; + } + kfree(fevt); + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} + +/** + * Check if the Received FIP FLOGI frame is rejected + * @fip: The FCoE controller that received the frame + * @skb: The received FIP frame + * + * Returns non-zero if the frame is rejected with unsupported cmd with + * insufficient resource els explanation. + */ +static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip, + struct sk_buff *skb) +{ + struct fc_lport *lport = fip->lp; + struct fip_header *fiph; + struct fc_frame_header *fh = NULL; + struct fip_desc *desc; + struct fip_encaps *els; + enum fip_desc_type els_dtype = 0; + u16 op; + u8 els_op; + u8 sub; + + size_t els_len = 0; + size_t rlen; + size_t dlen = 0; + + if (skb_linearize(skb)) + return 0; + + if (skb->len < sizeof(*fiph)) + return 0; + + fiph = (struct fip_header *)skb->data; + op = ntohs(fiph->fip_op); + sub = fiph->fip_subcode; + + if (op != FIP_OP_LS) + return 0; + + if (sub != FIP_SC_REP) + return 0; + + rlen = ntohs(fiph->fip_dl_len) * 4; + if (rlen + sizeof(*fiph) > skb->len) + return 0; + + desc = (struct fip_desc *)(fiph + 1); + dlen = desc->fip_dlen * FIP_BPW; + + if (desc->fip_dtype == FIP_DT_FLOGI) { + + shost_printk(KERN_DEBUG, lport->host, + " FIP TYPE FLOGI: fab name:%llx " + "vfid:%d map:%x\n", + fip->sel_fcf->fabric_name, fip->sel_fcf->vfid, + fip->sel_fcf->fc_map); + if (dlen < sizeof(*els) + sizeof(*fh) + 1) + return 0; + + els_len = dlen - sizeof(*els); + els = (struct fip_encaps *)desc; + fh = (struct fc_frame_header *)(els + 1); + els_dtype = desc->fip_dtype; + + if (!fh) + return 0; + + /* + * ELS command code, reason and explanation should be = Reject, + * unsupported command and insufficient resource + */ + els_op = *(u8 *)(fh + 1); + if (els_op == ELS_LS_RJT) { + shost_printk(KERN_INFO, lport->host, + "Flogi Request Rejected by Switch\n"); + return 1; + } + shost_printk(KERN_INFO, lport->host, + "Flogi Request Accepted by Switch\n"); + } + return 0; +} + +static void fnic_fcoe_send_vlan_req(struct fnic *fnic) +{ + struct fcoe_ctlr *fip = &fnic->ctlr; + struct sk_buff *skb; + char *eth_fr; + int fr_len; + struct fip_vlan *vlan; + u64 vlan_tov; + + fnic_fcoe_reset_vlans(fnic); + fnic->set_vlan(fnic, 0); + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "Sending VLAN request...\n"); + skb = dev_alloc_skb(sizeof(struct fip_vlan)); + if (!skb) + return; + + fr_len = sizeof(*vlan); + eth_fr = (char *)skb->data; + vlan = (struct fip_vlan *)eth_fr; + + memset(vlan, 0, sizeof(*vlan)); + memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN); + memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN); + vlan->eth.h_proto = htons(ETH_P_FIP); + + vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); + vlan->fip.fip_op = htons(FIP_OP_VLAN); + vlan->fip.fip_subcode = FIP_SC_VL_REQ; + vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW); + + vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC; + vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW; + memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); + + vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME; + vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW; + put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn); + + skb_put(skb, sizeof(*vlan)); + skb->protocol = htons(ETH_P_FIP); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + fip->send(fip, skb); + + /* set a timer so that we can retry if there no response */ + vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV); + mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov)); +} + +static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb) +{ + struct fcoe_ctlr *fip = &fnic->ctlr; + struct fip_header *fiph; + struct fip_desc *desc; + u16 vid; + size_t rlen; + size_t dlen; + struct fcoe_vlan *vlan; + u64 sol_time; + unsigned long flags; + + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "Received VLAN response...\n"); + + fiph = (struct fip_header *) skb->data; + + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "Received VLAN response... OP 0x%x SUB_OP 0x%x\n", + ntohs(fiph->fip_op), fiph->fip_subcode); + + rlen = ntohs(fiph->fip_dl_len) * 4; + fnic_fcoe_reset_vlans(fnic); + spin_lock_irqsave(&fnic->vlans_lock, flags); + desc = (struct fip_desc *)(fiph + 1); + while (rlen > 0) { + dlen = desc->fip_dlen * FIP_BPW; + switch (desc->fip_dtype) { + case FIP_DT_VLAN: + vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan); + shost_printk(KERN_INFO, fnic->lport->host, + "process_vlan_resp: FIP VLAN %d\n", vid); + vlan = kmalloc(sizeof(*vlan), + GFP_ATOMIC); + if (!vlan) { + /* retry from timer */ + spin_unlock_irqrestore(&fnic->vlans_lock, + flags); + goto out; + } + memset(vlan, 0, sizeof(struct fcoe_vlan)); + vlan->vid = vid & 0x0fff; + vlan->state = FIP_VLAN_AVAIL; + list_add_tail(&vlan->list, &fnic->vlans); + break; + } + desc = (struct fip_desc *)((char *)desc + dlen); + rlen -= dlen; + } + + /* any VLAN descriptors present ? */ + if (list_empty(&fnic->vlans)) { + /* retry from timer */ + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "No VLAN descriptors in FIP VLAN response\n"); + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + goto out; + } + + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + fnic->set_vlan(fnic, vlan->vid); + vlan->state = FIP_VLAN_SENT; /* sent now */ + vlan->sol_count++; + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + /* start the solicitation */ + fcoe_ctlr_link_up(fip); + + sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY); + mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); +out: + return; +} + +static void fnic_fcoe_start_fcf_disc(struct fnic *fnic) +{ + unsigned long flags; + struct fcoe_vlan *vlan; + u64 sol_time; + + spin_lock_irqsave(&fnic->vlans_lock, flags); + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + fnic->set_vlan(fnic, vlan->vid); + vlan->state = FIP_VLAN_SENT; /* sent now */ + vlan->sol_count = 1; + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + /* start the solicitation */ + fcoe_ctlr_link_up(&fnic->ctlr); + + sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY); + mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); +} + +static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag) +{ + unsigned long flags; + struct fcoe_vlan *fvlan; + + spin_lock_irqsave(&fnic->vlans_lock, flags); + if (list_empty(&fnic->vlans)) { + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + return -EINVAL; + } + + fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + if (fvlan->state == FIP_VLAN_USED) { + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + return 0; + } + + if (fvlan->state == FIP_VLAN_SENT) { + fvlan->state = FIP_VLAN_USED; + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + return 0; + } + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + return -EINVAL; +} + +static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev) +{ + struct fnic_event *fevt; + unsigned long flags; + + fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC); + if (!fevt) + return; + + fevt->fnic = fnic; + fevt->event = ev; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + list_add_tail(&fevt->list, &fnic->evlist); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + schedule_work(&fnic->event_work); +} + +static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb) +{ + struct fip_header *fiph; + int ret = 1; + u16 op; + u8 sub; + + if (!skb || !(skb->data)) + return -1; + + if (skb_linearize(skb)) + goto drop; + + fiph = (struct fip_header *)skb->data; + op = ntohs(fiph->fip_op); + sub = fiph->fip_subcode; + + if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER) + goto drop; + + if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len) + goto drop; + + if (op == FIP_OP_DISC && sub == FIP_SC_ADV) { + if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags))) + goto drop; + /* pass it on to fcoe */ + ret = 1; + } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_REP) { + /* set the vlan as used */ + fnic_fcoe_process_vlan_resp(fnic, skb); + ret = 0; + } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) { + /* received CVL request, restart vlan disc */ + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + /* pass it on to fcoe */ + ret = 1; + } +drop: + return ret; +} + +void fnic_handle_fip_frame(struct work_struct *work) +{ + struct fnic *fnic = container_of(work, struct fnic, fip_frame_work); + unsigned long flags; + struct sk_buff *skb; + struct ethhdr *eh; + + while ((skb = skb_dequeue(&fnic->fip_frame_queue))) { + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + dev_kfree_skb(skb); + return; + } + /* + * If we're in a transitional state, just re-queue and return. + * The queue will be serviced when we get to a stable state. + */ + if (fnic->state != FNIC_IN_FC_MODE && + fnic->state != FNIC_IN_ETH_MODE) { + skb_queue_head(&fnic->fip_frame_queue, skb); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + eh = (struct ethhdr *)skb->data; + if (eh->h_proto == htons(ETH_P_FIP)) { + skb_pull(skb, sizeof(*eh)); + if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) { + dev_kfree_skb(skb); + continue; + } + /* + * If there's FLOGI rejects - clear all + * fcf's & restart from scratch + */ + if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) { + shost_printk(KERN_INFO, fnic->lport->host, + "Trigger a Link down - VLAN Disc\n"); + fcoe_ctlr_link_down(&fnic->ctlr); + /* start FCoE VLAN discovery */ + fnic_fcoe_send_vlan_req(fnic); + dev_kfree_skb(skb); + continue; + } + fcoe_ctlr_recv(&fnic->ctlr, skb); + continue; + } + } +} + /** * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame. * @fnic: fnic instance. @@ -150,8 +603,14 @@ static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb) skb_reset_mac_header(skb); } if (eh->h_proto == htons(ETH_P_FIP)) { - skb_pull(skb, sizeof(*eh)); - fcoe_ctlr_recv(&fnic->ctlr, skb); + if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) { + printk(KERN_ERR "Dropped FIP frame, as firmware " + "uses non-FIP mode, Enable FIP " + "using UCSM\n"); + goto drop; + } + skb_queue_tail(&fnic->fip_frame_queue, skb); + queue_work(fnic_fip_queue, &fnic->fip_frame_work); return 1; /* let caller know packet was used */ } if (eh->h_proto != htons(ETH_P_FCOE)) @@ -720,3 +1179,104 @@ void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) dev_kfree_skb(fp_skb(fp)); buf->os_buf = NULL; } + +void fnic_fcoe_reset_vlans(struct fnic *fnic) +{ + unsigned long flags; + struct fcoe_vlan *vlan; + struct fcoe_vlan *next; + + /* + * indicate a link down to fcoe so that all fcf's are free'd + * might not be required since we did this before sending vlan + * discovery request + */ + spin_lock_irqsave(&fnic->vlans_lock, flags); + if (!list_empty(&fnic->vlans)) { + list_for_each_entry_safe(vlan, next, &fnic->vlans, list) { + list_del(&vlan->list); + kfree(vlan); + } + } + spin_unlock_irqrestore(&fnic->vlans_lock, flags); +} + +void fnic_handle_fip_timer(struct fnic *fnic) +{ + unsigned long flags; + struct fcoe_vlan *vlan; + u64 sol_time; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (fnic->ctlr.mode == FIP_ST_NON_FIP) + return; + + spin_lock_irqsave(&fnic->vlans_lock, flags); + if (list_empty(&fnic->vlans)) { + /* no vlans available, try again */ + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Start VLAN Discovery\n"); + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + return; + } + + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + shost_printk(KERN_DEBUG, fnic->lport->host, + "fip_timer: vlan %d state %d sol_count %d\n", + vlan->vid, vlan->state, vlan->sol_count); + switch (vlan->state) { + case FIP_VLAN_USED: + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "FIP VLAN is selected for FC transaction\n"); + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + break; + case FIP_VLAN_FAILED: + /* if all vlans are in failed state, restart vlan disc */ + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Start VLAN Discovery\n"); + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + break; + case FIP_VLAN_SENT: + if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) { + /* + * no response on this vlan, remove from the list. + * Try the next vlan + */ + shost_printk(KERN_INFO, fnic->lport->host, + "Dequeue this VLAN ID %d from list\n", + vlan->vid); + list_del(&vlan->list); + kfree(vlan); + vlan = NULL; + if (list_empty(&fnic->vlans)) { + /* we exhausted all vlans, restart vlan disc */ + spin_unlock_irqrestore(&fnic->vlans_lock, + flags); + shost_printk(KERN_INFO, fnic->lport->host, + "fip_timer: vlan list empty, " + "trigger vlan disc\n"); + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + return; + } + /* check the next vlan */ + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, + list); + fnic->set_vlan(fnic, vlan->vid); + vlan->state = FIP_VLAN_SENT; /* sent now */ + } + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + vlan->sol_count++; + sol_time = jiffies + msecs_to_jiffies + (FCOE_CTLR_START_DELAY); + mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); + break; + } +} diff --git a/drivers/scsi/fnic/fnic_fip.h b/drivers/scsi/fnic/fnic_fip.h new file mode 100644 index 000000000000..87e74c2ab971 --- /dev/null +++ b/drivers/scsi/fnic/fnic_fip.h @@ -0,0 +1,68 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _FNIC_FIP_H_ +#define _FNIC_FIP_H_ + + +#define FCOE_CTLR_START_DELAY 2000 /* ms after first adv. to choose FCF */ +#define FCOE_CTLR_FIPVLAN_TOV 2000 /* ms after FIP VLAN disc */ +#define FCOE_CTLR_MAX_SOL 8 + +#define FINC_MAX_FLOGI_REJECTS 8 + +/* + * FIP_DT_VLAN descriptor. + */ +struct fip_vlan_desc { + struct fip_desc fd_desc; + __be16 fd_vlan; +} __attribute__((packed)); + +struct vlan { + __be16 vid; + __be16 type; +}; + +/* + * VLAN entry. + */ +struct fcoe_vlan { + struct list_head list; + u16 vid; /* vlan ID */ + u16 sol_count; /* no. of sols sent */ + u16 state; /* state */ +}; + +enum fip_vlan_state { + FIP_VLAN_AVAIL = 0, /* don't do anything */ + FIP_VLAN_SENT = 1, /* sent */ + FIP_VLAN_USED = 2, /* succeed */ + FIP_VLAN_FAILED = 3, /* failed to response */ +}; + +struct fip_vlan { + struct ethhdr eth; + struct fip_header fip; + struct { + struct fip_mac_desc mac; + struct fip_wwn_desc wwnn; + } desc; +}; + +#endif /* __FINC_FIP_H_ */ diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index d601ac543c52..5f09d1814d26 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -39,6 +39,7 @@ #include "vnic_intr.h" #include "vnic_stats.h" #include "fnic_io.h" +#include "fnic_fip.h" #include "fnic.h" #define PCI_DEVICE_ID_CISCO_FNIC 0x0045 @@ -292,6 +293,13 @@ static void fnic_notify_timer(unsigned long data) round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD)); } +static void fnic_fip_notify_timer(unsigned long data) +{ + struct fnic *fnic = (struct fnic *)data; + + fnic_handle_fip_timer(fnic); +} + static void fnic_notify_timer_start(struct fnic *fnic) { switch (vnic_dev_get_intr_mode(fnic->vdev)) { @@ -403,6 +411,12 @@ static u8 *fnic_get_mac(struct fc_lport *lport) return fnic->data_src_addr; } +static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id) +{ + u16 old_vlan; + old_vlan = vnic_dev_set_default_vlan(fnic->vdev, vlan_id); +} + static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct Scsi_Host *host; @@ -620,7 +634,29 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0); vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS); vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); + fnic->set_vlan = fnic_set_vlan; fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO); + setup_timer(&fnic->fip_timer, fnic_fip_notify_timer, + (unsigned long)fnic); + spin_lock_init(&fnic->vlans_lock); + INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame); + INIT_WORK(&fnic->event_work, fnic_handle_event); + skb_queue_head_init(&fnic->fip_frame_queue); + spin_lock_irqsave(&fnic_list_lock, flags); + if (!fnic_fip_queue) { + fnic_fip_queue = + create_singlethread_workqueue("fnic_fip_q"); + if (!fnic_fip_queue) { + spin_unlock_irqrestore(&fnic_list_lock, flags); + printk(KERN_ERR PFX "fnic FIP work queue " + "create failed\n"); + err = -ENOMEM; + goto err_out_free_max_pool; + } + } + spin_unlock_irqrestore(&fnic_list_lock, flags); + INIT_LIST_HEAD(&fnic->evlist); + INIT_LIST_HEAD(&fnic->vlans); } else { shost_printk(KERN_INFO, fnic->lport->host, "firmware uses non-FIP mode\n"); @@ -807,6 +843,13 @@ static void fnic_remove(struct pci_dev *pdev) skb_queue_purge(&fnic->frame_queue); skb_queue_purge(&fnic->tx_queue); + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + del_timer_sync(&fnic->fip_timer); + skb_queue_purge(&fnic->fip_frame_queue); + fnic_fcoe_reset_vlans(fnic); + fnic_fcoe_evlist_free(fnic); + } + /* * Log off the fabric. This stops all remote ports, dns port, * logs off the fabric. This flushes all rport, disc, lport work @@ -889,8 +932,8 @@ static int __init fnic_init_module(void) len = sizeof(struct fnic_sgl_list); fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, - SLAB_HWCACHE_ALIGN, - NULL); + SLAB_HWCACHE_ALIGN, + NULL); if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) { printk(KERN_ERR PFX "failed to create fnic max sgl slab\n"); err = -ENOMEM; @@ -951,6 +994,10 @@ static void __exit fnic_cleanup_module(void) { pci_unregister_driver(&fnic_driver); destroy_workqueue(fnic_event_queue); + if (fnic_fip_queue) { + flush_workqueue(fnic_fip_queue); + destroy_workqueue(fnic_fip_queue); + } kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); kmem_cache_destroy(fnic_io_req_cache); diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c index b576be734e2e..9795d6f3e197 100644 --- a/drivers/scsi/fnic/vnic_dev.c +++ b/drivers/scsi/fnic/vnic_dev.c @@ -584,6 +584,16 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg) return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); } +u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev, u16 new_default_vlan) +{ + u64 a0 = new_default_vlan, a1 = 0; + int wait = 1000; + int old_vlan = 0; + + old_vlan = vnic_dev_cmd(vdev, CMD_SET_DEFAULT_VLAN, &a0, &a1, wait); + return (u16)old_vlan; +} + int vnic_dev_link_status(struct vnic_dev *vdev) { if (vdev->linkstatus) diff --git a/drivers/scsi/fnic/vnic_dev.h b/drivers/scsi/fnic/vnic_dev.h index f9935a8a5a09..40d4195f562b 100644 --- a/drivers/scsi/fnic/vnic_dev.h +++ b/drivers/scsi/fnic/vnic_dev.h @@ -148,6 +148,8 @@ int vnic_dev_disable(struct vnic_dev *vdev); int vnic_dev_open(struct vnic_dev *vdev, int arg); int vnic_dev_open_done(struct vnic_dev *vdev, int *done); int vnic_dev_init(struct vnic_dev *vdev, int arg); +u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev, + u16 new_default_vlan); int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg); int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done); void vnic_dev_set_intr_mode(struct vnic_dev *vdev, diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h index 7c9ccbd4134b..3e2fcbda6aed 100644 --- a/drivers/scsi/fnic/vnic_devcmd.h +++ b/drivers/scsi/fnic/vnic_devcmd.h @@ -196,6 +196,73 @@ enum vnic_devcmd_cmd { /* undo initialize of virtual link */ CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34), + + /* check fw capability of a cmd: + * in: (u32)a0=cmd + * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */ + CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36), + + /* persistent binding info + * in: (u64)a0=paddr of arg + * (u32)a1=CMD_PERBI_XXX */ + CMD_PERBI = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 37), + + /* Interrupt Assert Register functionality + * in: (u16)a0=interrupt number to assert + */ + CMD_IAR = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38), + + /* initiate hangreset, like softreset after hang detected */ + CMD_HANG_RESET = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39), + + /* hangreset status: + * out: a0=0 reset complete, a0=1 reset in progress */ + CMD_HANG_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40), + + /* + * Set hw ingress packet vlan rewrite mode: + * in: (u32)a0=new vlan rewrite mode + * out: (u32)a0=old vlan rewrite mode */ + CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41), + + /* + * in: (u16)a0=bdf of target vnic + * (u32)a1=cmd to proxy + * a2-a15=args to cmd in a1 + * out: (u32)a0=status of proxied cmd + * a1-a15=out args of proxied cmd */ + CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42), + + /* + * As for BY_BDF except a0 is index of hvnlink subordinate vnic + * or SR-IOV virtual vnic + */ + CMD_PROXY_BY_INDEX = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43), + + /* + * For HPP toggle: + * adapter-info-get + * in: (u64)a0=phsical address of buffer passed in from caller. + * (u16)a1=size of buffer specified in a0. + * out: (u64)a0=phsical address of buffer passed in from caller. + * (u16)a1=actual bytes from VIF-CONFIG-INFO TLV, or + * 0 if no VIF-CONFIG-INFO TLV was ever received. */ + CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44), + + /* + * INT13 API: (u64)a0=paddr to vnic_int13_params struct + * (u32)a1=INT13_CMD_xxx + */ + CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45), + + /* + * Set default vlan: + * in: (u16)a0=new default vlan + * (u16)a1=zero for overriding vlan with param a0, + * non-zero for resetting vlan to the default + * out: (u16)a0=old default vlan + */ + CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46) }; /* flags for CMD_OPEN */ diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index cc82d0f322b6..4e31caa21ddf 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -2179,7 +2179,7 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) return 0; } - if (vhost->state == IBMVFC_ACTIVE) { + if (vhost->logged_in) { evt = ibmvfc_get_event(vhost); ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT); @@ -2190,7 +2190,12 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) tmf->common.length = sizeof(*tmf); tmf->scsi_id = rport->port_id; int_to_scsilun(sdev->lun, &tmf->lun); - tmf->flags = (type | IBMVFC_TMF_LUA_VALID); + if (!(vhost->login_buf->resp.capabilities & IBMVFC_CAN_SUPPRESS_ABTS)) + type &= ~IBMVFC_TMF_SUPPRESS_ABTS; + if (vhost->state == IBMVFC_ACTIVE) + tmf->flags = (type | IBMVFC_TMF_LUA_VALID); + else + tmf->flags = ((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID); tmf->cancel_key = (unsigned long)sdev->hostdata; tmf->my_cancel_key = (unsigned long)starget->hostdata; @@ -2327,7 +2332,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev) timeout = wait_for_completion_timeout(&evt->comp, timeout); if (!timeout) { - rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); + rc = ibmvfc_cancel_all(sdev, 0); if (!rc) { rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key); if (rc == SUCCESS) @@ -2383,24 +2388,30 @@ out: * @cmd: scsi command to abort * * Returns: - * SUCCESS / FAILED + * SUCCESS / FAST_IO_FAIL / FAILED **/ static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct ibmvfc_host *vhost = shost_priv(sdev->host); - int cancel_rc, abort_rc; + int cancel_rc, block_rc; int rc = FAILED; ENTER; - fc_block_scsi_eh(cmd); + block_rc = fc_block_scsi_eh(cmd); ibmvfc_wait_while_resetting(vhost); - cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); - abort_rc = ibmvfc_abort_task_set(sdev); + if (block_rc != FAST_IO_FAIL) { + cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); + ibmvfc_abort_task_set(sdev); + } else + cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); - if (!cancel_rc && !abort_rc) + if (!cancel_rc) rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun); + if (block_rc == FAST_IO_FAIL && rc != FAILED) + rc = FAST_IO_FAIL; + LEAVE; return rc; } @@ -2410,29 +2421,47 @@ static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd) * @cmd: scsi command struct * * Returns: - * SUCCESS / FAILED + * SUCCESS / FAST_IO_FAIL / FAILED **/ static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct ibmvfc_host *vhost = shost_priv(sdev->host); - int cancel_rc, reset_rc; + int cancel_rc, block_rc, reset_rc = 0; int rc = FAILED; ENTER; - fc_block_scsi_eh(cmd); + block_rc = fc_block_scsi_eh(cmd); ibmvfc_wait_while_resetting(vhost); - cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET); - reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN"); + if (block_rc != FAST_IO_FAIL) { + cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET); + reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN"); + } else + cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); if (!cancel_rc && !reset_rc) rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun); + if (block_rc == FAST_IO_FAIL && rc != FAILED) + rc = FAST_IO_FAIL; + LEAVE; return rc; } /** + * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function + * @sdev: scsi device struct + * @data: return code + * + **/ +static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data) +{ + unsigned long *rc = data; + *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); +} + +/** * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function * @sdev: scsi device struct * @data: return code @@ -2449,26 +2478,33 @@ static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data) * @cmd: scsi command struct * * Returns: - * SUCCESS / FAILED + * SUCCESS / FAST_IO_FAIL / FAILED **/ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct ibmvfc_host *vhost = shost_priv(sdev->host); struct scsi_target *starget = scsi_target(sdev); - int reset_rc; + int block_rc; + int reset_rc = 0; int rc = FAILED; unsigned long cancel_rc = 0; ENTER; - fc_block_scsi_eh(cmd); + block_rc = fc_block_scsi_eh(cmd); ibmvfc_wait_while_resetting(vhost); - starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset); - reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target"); + if (block_rc != FAST_IO_FAIL) { + starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset); + reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target"); + } else + starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset); if (!cancel_rc && !reset_rc) rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target); + if (block_rc == FAST_IO_FAIL && rc != FAILED) + rc = FAST_IO_FAIL; + LEAVE; return rc; } @@ -2480,12 +2516,16 @@ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd) **/ static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd) { - int rc; + int rc, block_rc; struct ibmvfc_host *vhost = shost_priv(cmd->device->host); - fc_block_scsi_eh(cmd); + block_rc = fc_block_scsi_eh(cmd); dev_err(vhost->dev, "Resetting connection due to error recovery\n"); rc = ibmvfc_issue_fc_host_lip(vhost->host); + + if (block_rc == FAST_IO_FAIL) + return FAST_IO_FAIL; + return rc ? FAILED : SUCCESS; } @@ -2509,8 +2549,7 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport) dev_rport = starget_to_rport(scsi_target(sdev)); if (dev_rport != rport) continue; - ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); - ibmvfc_abort_task_set(sdev); + ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); } rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport); diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 3be8af624e6f..017a5290e8c1 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -29,8 +29,8 @@ #include "viosrp.h" #define IBMVFC_NAME "ibmvfc" -#define IBMVFC_DRIVER_VERSION "1.0.10" -#define IBMVFC_DRIVER_DATE "(August 24, 2012)" +#define IBMVFC_DRIVER_VERSION "1.0.11" +#define IBMVFC_DRIVER_DATE "(April 12, 2013)" #define IBMVFC_DEFAULT_TIMEOUT 60 #define IBMVFC_ADISC_CANCEL_TIMEOUT 45 @@ -208,10 +208,10 @@ struct ibmvfc_npiv_login_resp { u16 error; u32 flags; #define IBMVFC_NATIVE_FC 0x01 -#define IBMVFC_CAN_FLUSH_ON_HALT 0x08 u32 reserved; u64 capabilities; #define IBMVFC_CAN_FLUSH_ON_HALT 0x08 +#define IBMVFC_CAN_SUPPRESS_ABTS 0x10 u32 max_cmds; u32 scsi_id_sz; u64 max_dma_len; @@ -351,6 +351,7 @@ struct ibmvfc_tmf { #define IBMVFC_TMF_LUN_RESET 0x10 #define IBMVFC_TMF_TGT_RESET 0x20 #define IBMVFC_TMF_LUA_VALID 0x40 +#define IBMVFC_TMF_SUPPRESS_ABTS 0x80 u32 cancel_key; u32 my_cancel_key; u32 pad; diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 2197b57fb225..82a3c1ec8706 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -4777,7 +4777,7 @@ static int ipr_eh_host_reset(struct scsi_cmnd *cmd) ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - if (!ioa_cfg->in_reset_reload) { + if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); dev_err(&ioa_cfg->pdev->dev, "Adapter being reset as a result of error recovery.\n"); @@ -6421,7 +6421,7 @@ static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd, { u32 ioadl_flags = 0; struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; - struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; + struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64; struct ipr_ioadl64_desc *last_ioadl64 = NULL; int len = qc->nbytes; struct scatterlist *sg; @@ -6441,7 +6441,7 @@ static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd, ioarcb->ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); ioarcb->u.sis64_addr_data.data_ioadl_addr = - cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl)); + cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64)); for_each_sg(qc->sg, sg, qc->n_elem, si) { ioadl64->flags = cpu_to_be32(ioadl_flags); @@ -6739,6 +6739,7 @@ static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg) static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + int i; ENTER; if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { @@ -6750,6 +6751,13 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd) ioa_cfg->in_reset_reload = 0; ioa_cfg->reset_retries = 0; + for (i = 0; i < ioa_cfg->hrrq_num; i++) { + spin_lock(&ioa_cfg->hrrq[i]._lock); + ioa_cfg->hrrq[i].ioa_is_dead = 1; + spin_unlock(&ioa_cfg->hrrq[i]._lock); + } + wmb(); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); wake_up_all(&ioa_cfg->reset_wait_q); LEAVE; @@ -8651,7 +8659,7 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev) spin_lock_irqsave(ioa_cfg->host->host_lock, flags); if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) ioa_cfg->sdt_state = ABORT_DUMP; - ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES; + ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1; ioa_cfg->in_ioa_bringdown = 1; for (i = 0; i < ioa_cfg->hrrq_num; i++) { spin_lock(&ioa_cfg->hrrq[i]._lock); diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 21a6ff1ed5c6..a1fb840596ef 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -552,7 +552,7 @@ struct ipr_ioarcb_ata_regs { /* 22 bytes */ u8 hob_lbam; u8 hob_lbah; u8 ctl; -}__attribute__ ((packed, aligned(4))); +}__attribute__ ((packed, aligned(2))); struct ipr_ioadl_desc { __be32 flags_and_data_len; diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index c3aa6c5457b9..96a26f454673 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -1085,7 +1085,7 @@ static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *s struct isci_host *ihost = idev->owning_port->owning_controller; struct domain_device *dev = idev->domain_dev; - if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { + if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { sci_change_state(&idev->sm, SCI_STP_DEV_IDLE); } else if (dev_is_expander(dev)) { sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE); @@ -1098,7 +1098,7 @@ static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); struct domain_device *dev = idev->domain_dev; - if (dev->dev_type == SAS_END_DEV) { + if (dev->dev_type == SAS_END_DEVICE) { struct isci_host *ihost = idev->owning_port->owning_controller; isci_remote_device_not_ready(ihost, idev, diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h index 7674caae1d88..47a013fffae7 100644 --- a/drivers/scsi/isci/remote_device.h +++ b/drivers/scsi/isci/remote_device.h @@ -297,7 +297,7 @@ static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_conte static inline bool dev_is_expander(struct domain_device *dev) { - return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV; + return dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE; } static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev) diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 9594ab62702b..e3e3bcbd5a9f 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -2978,7 +2978,7 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm) /* all unaccelerated request types (non ssp or ncq) handled with * substates */ - if (!task && dev->dev_type == SAS_END_DEV) { + if (!task && dev->dev_type == SAS_END_DEVICE) { state = SCI_REQ_TASK_WAIT_TC_COMP; } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { state = SCI_REQ_SMP_WAIT_RESP; @@ -3101,7 +3101,7 @@ sci_io_request_construct(struct isci_host *ihost, if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) return SCI_FAILURE_INVALID_REMOTE_DEVICE; - if (dev->dev_type == SAS_END_DEV) + if (dev->dev_type == SAS_END_DEVICE) /* pass */; else if (dev_is_sata(dev)) memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); @@ -3125,7 +3125,7 @@ enum sci_status sci_task_request_construct(struct isci_host *ihost, /* Build the common part of the request */ sci_general_request_construct(ihost, idev, ireq); - if (dev->dev_type == SAS_END_DEV || dev_is_sata(dev)) { + if (dev->dev_type == SAS_END_DEVICE || dev_is_sata(dev)) { set_bit(IREQ_TMF, &ireq->flags); memset(ireq->tc, 0, sizeof(struct scu_task_context)); diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index b6f19a1db780..9bb020ac089c 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -250,7 +250,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, } /* XXX convert to get this from task->tproto like other drivers */ - if (dev->dev_type == SAS_END_DEV) { + if (dev->dev_type == SAS_END_DEVICE) { isci_tmf->proto = SAS_PROTOCOL_SSP; status = sci_task_request_construct_ssp(ireq); if (status != SCI_SUCCESS) diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index bdb81cda8401..161c98efade9 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -285,14 +285,14 @@ int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy) if (phy->attached_tproto & SAS_PROTOCOL_STP) dev->tproto = phy->attached_tproto; if (phy->attached_sata_dev) - dev->tproto |= SATA_DEV; + dev->tproto |= SAS_SATA_DEV; - if (phy->attached_dev_type == SATA_PENDING) - dev->dev_type = SATA_PENDING; + if (phy->attached_dev_type == SAS_SATA_PENDING) + dev->dev_type = SAS_SATA_PENDING; else { int res; - dev->dev_type = SATA_DEV; + dev->dev_type = SAS_SATA_DEV; res = sas_get_report_phy_sata(dev->parent, phy->phy_id, &dev->sata_dev.rps_resp); if (res) { @@ -314,7 +314,7 @@ static int sas_ata_clear_pending(struct domain_device *dev, struct ex_phy *phy) int res; /* we weren't pending, so successfully end the reset sequence now */ - if (dev->dev_type != SATA_PENDING) + if (dev->dev_type != SAS_SATA_PENDING) return 1; /* hmmm, if this succeeds do we need to repost the domain_device to the @@ -348,9 +348,9 @@ static int smp_ata_check_ready(struct ata_link *link) return 0; switch (ex_phy->attached_dev_type) { - case SATA_PENDING: + case SAS_SATA_PENDING: return 0; - case SAS_END_DEV: + case SAS_END_DEVICE: if (ex_phy->attached_sata_dev) return sas_ata_clear_pending(dev, ex_phy); default: @@ -631,7 +631,7 @@ static void sas_get_ata_command_set(struct domain_device *dev) struct dev_to_host_fis *fis = (struct dev_to_host_fis *) dev->frame_rcvd; - if (dev->dev_type == SATA_PENDING) + if (dev->dev_type == SAS_SATA_PENDING) return; if ((fis->sector_count == 1 && /* ATA */ @@ -797,7 +797,7 @@ int sas_discover_sata(struct domain_device *dev) { int res; - if (dev->dev_type == SATA_PM) + if (dev->dev_type == SAS_SATA_PM) return -ENODEV; sas_get_ata_command_set(dev); diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index a0c3003e0c7d..62b58d38ce2e 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -39,11 +39,11 @@ void sas_init_dev(struct domain_device *dev) { switch (dev->dev_type) { - case SAS_END_DEV: + case SAS_END_DEVICE: INIT_LIST_HEAD(&dev->ssp_dev.eh_list_node); break; - case EDGE_DEV: - case FANOUT_DEV: + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: INIT_LIST_HEAD(&dev->ex_dev.children); mutex_init(&dev->ex_dev.cmd_mutex); break; @@ -93,9 +93,9 @@ static int sas_get_port_device(struct asd_sas_port *port) if (fis->interrupt_reason == 1 && fis->lbal == 1 && fis->byte_count_low==0x69 && fis->byte_count_high == 0x96 && (fis->device & ~0x10) == 0) - dev->dev_type = SATA_PM; + dev->dev_type = SAS_SATA_PM; else - dev->dev_type = SATA_DEV; + dev->dev_type = SAS_SATA_DEV; dev->tproto = SAS_PROTOCOL_SATA; } else { struct sas_identify_frame *id = @@ -109,21 +109,21 @@ static int sas_get_port_device(struct asd_sas_port *port) dev->port = port; switch (dev->dev_type) { - case SATA_DEV: + case SAS_SATA_DEV: rc = sas_ata_init(dev); if (rc) { rphy = NULL; break; } /* fall through */ - case SAS_END_DEV: + case SAS_END_DEVICE: rphy = sas_end_device_alloc(port->port); break; - case EDGE_DEV: + case SAS_EDGE_EXPANDER_DEVICE: rphy = sas_expander_alloc(port->port, SAS_EDGE_EXPANDER_DEVICE); break; - case FANOUT_DEV: + case SAS_FANOUT_EXPANDER_DEVICE: rphy = sas_expander_alloc(port->port, SAS_FANOUT_EXPANDER_DEVICE); break; @@ -156,7 +156,7 @@ static int sas_get_port_device(struct asd_sas_port *port) dev->rphy = rphy; get_device(&dev->rphy->dev); - if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV) + if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEVICE) list_add_tail(&dev->disco_list_node, &port->disco_list); else { spin_lock_irq(&port->dev_list_lock); @@ -315,7 +315,7 @@ void sas_free_device(struct kref *kref) dev->phy = NULL; /* remove the phys and ports, everything else should be gone */ - if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) + if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) kfree(dev->ex_dev.ex_phy); if (dev_is_sata(dev) && dev->sata_dev.ap) { @@ -343,7 +343,7 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d spin_unlock_irq(&port->dev_list_lock); spin_lock_irq(&ha->lock); - if (dev->dev_type == SAS_END_DEV && + if (dev->dev_type == SAS_END_DEVICE && !list_empty(&dev->ssp_dev.eh_list_node)) { list_del_init(&dev->ssp_dev.eh_list_node); ha->eh_active--; @@ -457,15 +457,15 @@ static void sas_discover_domain(struct work_struct *work) task_pid_nr(current)); switch (dev->dev_type) { - case SAS_END_DEV: + case SAS_END_DEVICE: error = sas_discover_end_dev(dev); break; - case EDGE_DEV: - case FANOUT_DEV: + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: error = sas_discover_root_expander(dev); break; - case SATA_DEV: - case SATA_PM: + case SAS_SATA_DEV: + case SAS_SATA_PM: #ifdef CONFIG_SCSI_SAS_ATA error = sas_discover_sata(dev); break; diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index f42b0e15410f..446b85110a1f 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -183,21 +183,21 @@ static char sas_route_char(struct domain_device *dev, struct ex_phy *phy) } } -static enum sas_dev_type to_dev_type(struct discover_resp *dr) +static enum sas_device_type to_dev_type(struct discover_resp *dr) { /* This is detecting a failure to transmit initial dev to host * FIS as described in section J.5 of sas-2 r16 */ - if (dr->attached_dev_type == NO_DEVICE && dr->attached_sata_dev && + if (dr->attached_dev_type == SAS_PHY_UNUSED && dr->attached_sata_dev && dr->linkrate >= SAS_LINK_RATE_1_5_GBPS) - return SATA_PENDING; + return SAS_SATA_PENDING; else return dr->attached_dev_type; } static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) { - enum sas_dev_type dev_type; + enum sas_device_type dev_type; enum sas_linkrate linkrate; u8 sas_addr[SAS_ADDR_SIZE]; struct smp_resp *resp = rsp; @@ -238,7 +238,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) /* Handle vacant phy - rest of dr data is not valid so skip it */ if (phy->phy_state == PHY_VACANT) { memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); - phy->attached_dev_type = NO_DEVICE; + phy->attached_dev_type = SAS_PHY_UNUSED; if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) { phy->phy_id = phy_id; goto skip; @@ -259,7 +259,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) /* help some expanders that fail to zero sas_address in the 'no * device' case */ - if (phy->attached_dev_type == NO_DEVICE || + if (phy->attached_dev_type == SAS_PHY_UNUSED || phy->linkrate < SAS_LINK_RATE_1_5_GBPS) memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); else @@ -292,13 +292,13 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) out: switch (phy->attached_dev_type) { - case SATA_PENDING: + case SAS_SATA_PENDING: type = "stp pending"; break; - case NO_DEVICE: + case SAS_PHY_UNUSED: type = "no device"; break; - case SAS_END_DEV: + case SAS_END_DEVICE: if (phy->attached_iproto) { if (phy->attached_tproto) type = "host+target"; @@ -311,8 +311,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) type = "ssp"; } break; - case EDGE_DEV: - case FANOUT_DEV: + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: type = "smp"; break; default: @@ -833,7 +833,7 @@ static struct domain_device *sas_ex_discover_end_dev( } else #endif if (phy->attached_tproto & SAS_PROTOCOL_SSP) { - child->dev_type = SAS_END_DEV; + child->dev_type = SAS_END_DEVICE; rphy = sas_end_device_alloc(phy->port); /* FIXME: error handling */ if (unlikely(!rphy)) @@ -932,11 +932,11 @@ static struct domain_device *sas_ex_discover_expander( switch (phy->attached_dev_type) { - case EDGE_DEV: + case SAS_EDGE_EXPANDER_DEVICE: rphy = sas_expander_alloc(phy->port, SAS_EDGE_EXPANDER_DEVICE); break; - case FANOUT_DEV: + case SAS_FANOUT_EXPANDER_DEVICE: rphy = sas_expander_alloc(phy->port, SAS_FANOUT_EXPANDER_DEVICE); break; @@ -1013,7 +1013,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr)) sas_ex_disable_port(dev, ex_phy->attached_sas_addr); - if (ex_phy->attached_dev_type == NO_DEVICE) { + if (ex_phy->attached_dev_type == SAS_PHY_UNUSED) { if (ex_phy->routing_attr == DIRECT_ROUTING) { memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE); sas_configure_routing(dev, ex_phy->attached_sas_addr); @@ -1022,10 +1022,10 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) } else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN) return 0; - if (ex_phy->attached_dev_type != SAS_END_DEV && - ex_phy->attached_dev_type != FANOUT_DEV && - ex_phy->attached_dev_type != EDGE_DEV && - ex_phy->attached_dev_type != SATA_PENDING) { + if (ex_phy->attached_dev_type != SAS_END_DEVICE && + ex_phy->attached_dev_type != SAS_FANOUT_EXPANDER_DEVICE && + ex_phy->attached_dev_type != SAS_EDGE_EXPANDER_DEVICE && + ex_phy->attached_dev_type != SAS_SATA_PENDING) { SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx " "phy 0x%x\n", ex_phy->attached_dev_type, SAS_ADDR(dev->sas_addr), @@ -1049,11 +1049,11 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) } switch (ex_phy->attached_dev_type) { - case SAS_END_DEV: - case SATA_PENDING: + case SAS_END_DEVICE: + case SAS_SATA_PENDING: child = sas_ex_discover_end_dev(dev, phy_id); break; - case FANOUT_DEV: + case SAS_FANOUT_EXPANDER_DEVICE: if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) { SAS_DPRINTK("second fanout expander %016llx phy 0x%x " "attached to ex %016llx phy 0x%x\n", @@ -1067,7 +1067,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) memcpy(dev->port->disc.fanout_sas_addr, ex_phy->attached_sas_addr, SAS_ADDR_SIZE); /* fallthrough */ - case EDGE_DEV: + case SAS_EDGE_EXPANDER_DEVICE: child = sas_ex_discover_expander(dev, phy_id); break; default: @@ -1111,8 +1111,8 @@ static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr) phy->phy_state == PHY_NOT_PRESENT) continue; - if ((phy->attached_dev_type == EDGE_DEV || - phy->attached_dev_type == FANOUT_DEV) && + if ((phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE || + phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE) && phy->routing_attr == SUBTRACTIVE_ROUTING) { memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE); @@ -1130,8 +1130,8 @@ static int sas_check_level_subtractive_boundary(struct domain_device *dev) u8 sub_addr[8] = {0, }; list_for_each_entry(child, &ex->children, siblings) { - if (child->dev_type != EDGE_DEV && - child->dev_type != FANOUT_DEV) + if (child->dev_type != SAS_EDGE_EXPANDER_DEVICE && + child->dev_type != SAS_FANOUT_EXPANDER_DEVICE) continue; if (sub_addr[0] == 0) { sas_find_sub_addr(child, sub_addr); @@ -1208,7 +1208,7 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev) int i; u8 *sub_sas_addr = NULL; - if (dev->dev_type != EDGE_DEV) + if (dev->dev_type != SAS_EDGE_EXPANDER_DEVICE) return 0; for (i = 0; i < ex->num_phys; i++) { @@ -1218,8 +1218,8 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev) phy->phy_state == PHY_NOT_PRESENT) continue; - if ((phy->attached_dev_type == FANOUT_DEV || - phy->attached_dev_type == EDGE_DEV) && + if ((phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE || + phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE) && phy->routing_attr == SUBTRACTIVE_ROUTING) { if (!sub_sas_addr) @@ -1245,8 +1245,8 @@ static void sas_print_parent_topology_bug(struct domain_device *child, struct ex_phy *child_phy) { static const char *ex_type[] = { - [EDGE_DEV] = "edge", - [FANOUT_DEV] = "fanout", + [SAS_EDGE_EXPANDER_DEVICE] = "edge", + [SAS_FANOUT_EXPANDER_DEVICE] = "fanout", }; struct domain_device *parent = child->parent; @@ -1321,8 +1321,8 @@ static int sas_check_parent_topology(struct domain_device *child) if (!child->parent) return 0; - if (child->parent->dev_type != EDGE_DEV && - child->parent->dev_type != FANOUT_DEV) + if (child->parent->dev_type != SAS_EDGE_EXPANDER_DEVICE && + child->parent->dev_type != SAS_FANOUT_EXPANDER_DEVICE) return 0; parent_ex = &child->parent->ex_dev; @@ -1341,8 +1341,8 @@ static int sas_check_parent_topology(struct domain_device *child) child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id]; switch (child->parent->dev_type) { - case EDGE_DEV: - if (child->dev_type == FANOUT_DEV) { + case SAS_EDGE_EXPANDER_DEVICE: + if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING || child_phy->routing_attr != TABLE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); @@ -1366,7 +1366,7 @@ static int sas_check_parent_topology(struct domain_device *child) } } break; - case FANOUT_DEV: + case SAS_FANOUT_EXPANDER_DEVICE: if (parent_phy->routing_attr != TABLE_ROUTING || child_phy->routing_attr != SUBTRACTIVE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); @@ -1619,8 +1619,8 @@ static int sas_ex_level_discovery(struct asd_sas_port *port, const int level) struct domain_device *dev; list_for_each_entry(dev, &port->dev_list, dev_list_node) { - if (dev->dev_type == EDGE_DEV || - dev->dev_type == FANOUT_DEV) { + if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || + dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); @@ -1720,7 +1720,7 @@ static int sas_get_phy_change_count(struct domain_device *dev, } static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id, - u8 *sas_addr, enum sas_dev_type *type) + u8 *sas_addr, enum sas_device_type *type) { int res; struct smp_resp *disc_resp; @@ -1849,7 +1849,7 @@ static int sas_find_bcast_dev(struct domain_device *dev, SAS_DPRINTK("Expander phys DID NOT change\n"); } list_for_each_entry(ch, &ex->children, siblings) { - if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) { + if (ch->dev_type == SAS_EDGE_EXPANDER_DEVICE || ch->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { res = sas_find_bcast_dev(ch, src_dev); if (*src_dev) return res; @@ -1866,8 +1866,8 @@ static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_devi list_for_each_entry_safe(child, n, &ex->children, siblings) { set_bit(SAS_DEV_GONE, &child->state); - if (child->dev_type == EDGE_DEV || - child->dev_type == FANOUT_DEV) + if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || + child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) sas_unregister_ex_tree(port, child); else sas_unregister_dev(port, child); @@ -1887,8 +1887,8 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent, if (SAS_ADDR(child->sas_addr) == SAS_ADDR(phy->attached_sas_addr)) { set_bit(SAS_DEV_GONE, &child->state); - if (child->dev_type == EDGE_DEV || - child->dev_type == FANOUT_DEV) + if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || + child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) sas_unregister_ex_tree(parent->port, child); else sas_unregister_dev(parent->port, child); @@ -1916,8 +1916,8 @@ static int sas_discover_bfs_by_root_level(struct domain_device *root, int res = 0; list_for_each_entry(child, &ex_root->children, siblings) { - if (child->dev_type == EDGE_DEV || - child->dev_type == FANOUT_DEV) { + if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || + child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { struct sas_expander_device *ex = rphy_to_expander_device(child->rphy); @@ -1970,8 +1970,8 @@ static int sas_discover_new(struct domain_device *dev, int phy_id) list_for_each_entry(child, &dev->ex_dev.children, siblings) { if (SAS_ADDR(child->sas_addr) == SAS_ADDR(ex_phy->attached_sas_addr)) { - if (child->dev_type == EDGE_DEV || - child->dev_type == FANOUT_DEV) + if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || + child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) res = sas_discover_bfs_by_root(child); break; } @@ -1979,16 +1979,16 @@ static int sas_discover_new(struct domain_device *dev, int phy_id) return res; } -static bool dev_type_flutter(enum sas_dev_type new, enum sas_dev_type old) +static bool dev_type_flutter(enum sas_device_type new, enum sas_device_type old) { if (old == new) return true; /* treat device directed resets as flutter, if we went - * SAS_END_DEV to SATA_PENDING the link needs recovery + * SAS_END_DEVICE to SAS_SATA_PENDING the link needs recovery */ - if ((old == SATA_PENDING && new == SAS_END_DEV) || - (old == SAS_END_DEV && new == SATA_PENDING)) + if ((old == SAS_SATA_PENDING && new == SAS_END_DEVICE) || + (old == SAS_END_DEVICE && new == SAS_SATA_PENDING)) return true; return false; @@ -1998,7 +1998,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; - enum sas_dev_type type = NO_DEVICE; + enum sas_device_type type = SAS_PHY_UNUSED; u8 sas_addr[8]; int res; @@ -2032,7 +2032,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last) sas_ex_phy_discover(dev, phy_id); - if (ata_dev && phy->attached_dev_type == SATA_PENDING) + if (ata_dev && phy->attached_dev_type == SAS_SATA_PENDING) action = ", needs recovery"; SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n", SAS_ADDR(dev->sas_addr), phy_id, action); diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index 1de67964e5a1..7e7ba83f0a21 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h @@ -131,16 +131,16 @@ static inline void sas_fill_in_rphy(struct domain_device *dev, rphy->identify.initiator_port_protocols = dev->iproto; rphy->identify.target_port_protocols = dev->tproto; switch (dev->dev_type) { - case SATA_DEV: + case SAS_SATA_DEV: /* FIXME: need sata device type */ - case SAS_END_DEV: - case SATA_PENDING: + case SAS_END_DEVICE: + case SAS_SATA_PENDING: rphy->identify.device_type = SAS_END_DEVICE; break; - case EDGE_DEV: + case SAS_EDGE_EXPANDER_DEVICE: rphy->identify.device_type = SAS_EDGE_EXPANDER_DEVICE; break; - case FANOUT_DEV: + case SAS_FANOUT_EXPANDER_DEVICE: rphy->identify.device_type = SAS_FANOUT_EXPANDER_DEVICE; break; default: diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c index 1398b714c018..d3c5297c6c89 100644 --- a/drivers/scsi/libsas/sas_port.c +++ b/drivers/scsi/libsas/sas_port.c @@ -69,7 +69,7 @@ static void sas_resume_port(struct asd_sas_phy *phy) continue; } - if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) { + if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { dev->ex_dev.ex_change_count = -1; for (i = 0; i < dev->ex_dev.num_phys; i++) { struct ex_phy *phy = &dev->ex_dev.ex_phy[i]; diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 7706c99ec8bb..bcc56cac4fd8 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -46,10 +46,15 @@ struct lpfc_sli2_slim; #define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi cmnd for menlo needs nearly twice as for firmware downloads using bsg */ -#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */ + +#define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */ +#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */ +#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */ #define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ +#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */ +#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */ + #define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */ -#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/ #define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ #define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ #define LPFC_VNAME_LEN 100 /* vport symbolic name length */ @@ -66,8 +71,10 @@ struct lpfc_sli2_slim; * queue depths when there are driver resource error or Firmware * resource error. */ -#define QUEUE_RAMP_DOWN_INTERVAL (1 * HZ) /* 1 Second */ -#define QUEUE_RAMP_UP_INTERVAL (300 * HZ) /* 5 minutes */ +/* 1 Second */ +#define QUEUE_RAMP_DOWN_INTERVAL (msecs_to_jiffies(1000 * 1)) +/* 5 minutes */ +#define QUEUE_RAMP_UP_INTERVAL (msecs_to_jiffies(1000 * 300)) /* Number of exchanges reserved for discovery to complete */ #define LPFC_DISC_IOCB_BUFF_COUNT 20 @@ -671,6 +678,7 @@ struct lpfc_hba { uint32_t lmt; uint32_t fc_topology; /* link topology, from LINK INIT */ + uint32_t fc_topology_changed; /* link topology, from LINK INIT */ struct lpfc_stats fc_stat; @@ -701,9 +709,11 @@ struct lpfc_hba { uint32_t cfg_poll_tmo; uint32_t cfg_use_msi; uint32_t cfg_fcp_imax; + uint32_t cfg_fcp_cpu_map; uint32_t cfg_fcp_wq_count; uint32_t cfg_fcp_eq_count; uint32_t cfg_fcp_io_channel; + uint32_t cfg_total_seg_cnt; uint32_t cfg_sg_seg_cnt; uint32_t cfg_prot_sg_seg_cnt; uint32_t cfg_sg_dma_buf_size; @@ -804,8 +814,10 @@ struct lpfc_hba { uint64_t bg_reftag_err_cnt; /* fastpath list. */ - spinlock_t scsi_buf_list_lock; - struct list_head lpfc_scsi_buf_list; + spinlock_t scsi_buf_list_get_lock; /* SCSI buf alloc list lock */ + spinlock_t scsi_buf_list_put_lock; /* SCSI buf free list lock */ + struct list_head lpfc_scsi_buf_list_get; + struct list_head lpfc_scsi_buf_list_put; uint32_t total_scsi_bufs; struct list_head lpfc_iocb_list; uint32_t total_iocbq_bufs; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 9290713af253..3c5625b8b1f4 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -674,6 +674,9 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) int i; int rc; + if (phba->pport->fc_flag & FC_OFFLINE_MODE) + return 0; + init_completion(&online_compl); rc = lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_OFFLINE_PREP); @@ -741,7 +744,8 @@ lpfc_selective_reset(struct lpfc_hba *phba) int status = 0; int rc; - if (!phba->cfg_enable_hba_reset) + if ((!phba->cfg_enable_hba_reset) || + (phba->pport->fc_flag & FC_OFFLINE_MODE)) return -EACCES; status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); @@ -895,6 +899,7 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) pci_disable_sriov(pdev); phba->cfg_sriov_nr_virtfn = 0; } + status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); if (status != 0) @@ -2801,6 +2806,8 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr, lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3054 lpfc_topology changed from %d to %d\n", prev_val, val); + if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4) + phba->fc_topology_changed = 1; err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); if (err) { phba->cfg_topology = prev_val; @@ -3792,6 +3799,141 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val) static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR, lpfc_fcp_imax_show, lpfc_fcp_imax_store); +/** + * lpfc_state_show - Display current driver CPU affinity + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains text describing the state of the link. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_vector_map_info *cpup; + int idx, len = 0; + + if ((phba->sli_rev != LPFC_SLI_REV4) || + (phba->intr_type != MSIX)) + return len; + + switch (phba->cfg_fcp_cpu_map) { + case 0: + len += snprintf(buf + len, PAGE_SIZE-len, + "fcp_cpu_map: No mapping (%d)\n", + phba->cfg_fcp_cpu_map); + return len; + case 1: + len += snprintf(buf + len, PAGE_SIZE-len, + "fcp_cpu_map: HBA centric mapping (%d): " + "%d online CPUs\n", + phba->cfg_fcp_cpu_map, + phba->sli4_hba.num_online_cpu); + break; + case 2: + len += snprintf(buf + len, PAGE_SIZE-len, + "fcp_cpu_map: Driver centric mapping (%d): " + "%d online CPUs\n", + phba->cfg_fcp_cpu_map, + phba->sli4_hba.num_online_cpu); + break; + } + + cpup = phba->sli4_hba.cpu_map; + for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) { + if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) + len += snprintf(buf + len, PAGE_SIZE-len, + "CPU %02d io_chan %02d " + "physid %d coreid %d\n", + idx, cpup->channel_id, cpup->phys_id, + cpup->core_id); + else + len += snprintf(buf + len, PAGE_SIZE-len, + "CPU %02d io_chan %02d " + "physid %d coreid %d IRQ %d\n", + idx, cpup->channel_id, cpup->phys_id, + cpup->core_id, cpup->irq); + + cpup++; + } + return len; +} + +/** + * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: one or more lpfc_polling_flags values. + * @count: not used. + * + * Returns: + * -EINVAL - Not implemented yet. + **/ +static ssize_t +lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int status = -EINVAL; + return status; +} + +/* +# lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors +# for the HBA. +# +# Value range is [0 to 2]. Default value is LPFC_DRIVER_CPU_MAP (2). +# 0 - Do not affinitze IRQ vectors +# 1 - Affintize HBA vectors with respect to each HBA +# (start with CPU0 for each HBA) +# 2 - Affintize HBA vectors with respect to the entire driver +# (round robin thru all CPUs across all HBAs) +*/ +static int lpfc_fcp_cpu_map = LPFC_DRIVER_CPU_MAP; +module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(lpfc_fcp_cpu_map, + "Defines how to map CPUs to IRQ vectors per HBA"); + +/** + * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable + * @phba: lpfc_hba pointer. + * @val: link speed value. + * + * Description: + * If val is in a valid range [0-2], then affinitze the adapter's + * MSIX vectors. + * + * Returns: + * zero if val saved. + * -EINVAL val out of range + **/ +static int +lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val) +{ + if (phba->sli_rev != LPFC_SLI_REV4) { + phba->cfg_fcp_cpu_map = 0; + return 0; + } + + if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) { + phba->cfg_fcp_cpu_map = val; + return 0; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3326 fcp_cpu_map: %d out of range, using default\n", + val); + phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP; + + return 0; +} + +static DEVICE_ATTR(lpfc_fcp_cpu_map, S_IRUGO | S_IWUSR, + lpfc_fcp_cpu_map_show, lpfc_fcp_cpu_map_store); + /* # lpfc_fcp_class: Determines FC class to use for the FCP protocol. # Value range is [2,3]. Default value is 3. @@ -4009,12 +4151,11 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); # 0 = disabled (default) # 1 = enabled # Value range is [0,1]. Default value is 0. +# +# This feature in under investigation and may be supported in the future. */ unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF; -module_param(lpfc_fcp_look_ahead, uint, S_IRUGO); -MODULE_PARM_DESC(lpfc_fcp_look_ahead, "Look ahead for completions"); - /* # lpfc_prot_mask: i # - Bit mask of host protection capabilities used to register with the @@ -4071,16 +4212,23 @@ MODULE_PARM_DESC(lpfc_delay_discovery, /* * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count - * This value can be set to values between 64 and 256. The default value is + * This value can be set to values between 64 and 4096. The default value is * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE). + * Because of the additional overhead involved in setting up T10-DIF, + * this parameter will be limited to 128 if BlockGuard is enabled under SLI4 + * and will be limited to 512 if BlockGuard is enabled under SLI3. */ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count"); -LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_PROT_SG_SEG_CNT, - LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_MAX_PROT_SG_SEG_CNT, - "Max Protection Scatter Gather Segment Count"); +/* + * This parameter will be depricated, the driver cannot limit the + * protection data s/g list. + */ +LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, + LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT, + "Max Protection Scatter Gather Segment Count"); struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_bg_info, @@ -4141,6 +4289,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_poll_tmo, &dev_attr_lpfc_use_msi, &dev_attr_lpfc_fcp_imax, + &dev_attr_lpfc_fcp_cpu_map, &dev_attr_lpfc_fcp_wq_count, &dev_attr_lpfc_fcp_eq_count, &dev_attr_lpfc_fcp_io_channel, @@ -5123,6 +5272,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_enable_rrq_init(phba, lpfc_enable_rrq); lpfc_use_msi_init(phba, lpfc_use_msi); lpfc_fcp_imax_init(phba, lpfc_fcp_imax); + lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map); lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count); lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count); lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel); diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 888666892004..094be2cad65b 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -219,26 +219,35 @@ lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers, unsigned int transfer_bytes, bytes_copied = 0; unsigned int sg_offset, dma_offset; unsigned char *dma_address, *sg_address; - struct scatterlist *sgel; LIST_HEAD(temp_list); - + struct sg_mapping_iter miter; + unsigned long flags; + unsigned int sg_flags = SG_MITER_ATOMIC; + bool sg_valid; list_splice_init(&dma_buffers->list, &temp_list); list_add(&dma_buffers->list, &temp_list); sg_offset = 0; - sgel = bsg_buffers->sg_list; + if (to_buffers) + sg_flags |= SG_MITER_FROM_SG; + else + sg_flags |= SG_MITER_TO_SG; + sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt, + sg_flags); + local_irq_save(flags); + sg_valid = sg_miter_next(&miter); list_for_each_entry(mp, &temp_list, list) { dma_offset = 0; - while (bytes_to_transfer && sgel && + while (bytes_to_transfer && sg_valid && (dma_offset < LPFC_BPL_SIZE)) { dma_address = mp->virt + dma_offset; if (sg_offset) { /* Continue previous partial transfer of sg */ - sg_address = sg_virt(sgel) + sg_offset; - transfer_bytes = sgel->length - sg_offset; + sg_address = miter.addr + sg_offset; + transfer_bytes = miter.length - sg_offset; } else { - sg_address = sg_virt(sgel); - transfer_bytes = sgel->length; + sg_address = miter.addr; + transfer_bytes = miter.length; } if (bytes_to_transfer < transfer_bytes) transfer_bytes = bytes_to_transfer; @@ -252,12 +261,14 @@ lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers, sg_offset += transfer_bytes; bytes_to_transfer -= transfer_bytes; bytes_copied += transfer_bytes; - if (sg_offset >= sgel->length) { + if (sg_offset >= miter.length) { sg_offset = 0; - sgel = sg_next(sgel); + sg_valid = sg_miter_next(&miter); } } } + sg_miter_stop(&miter); + local_irq_restore(flags); list_del_init(&dma_buffers->list); list_splice(&temp_list, &dma_buffers->list); return bytes_copied; @@ -471,6 +482,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) cmdiocbq->context1 = dd_data; cmdiocbq->context2 = cmp; cmdiocbq->context3 = bmp; + cmdiocbq->context_un.ndlp = ndlp; dd_data->type = TYPE_IOCB; dd_data->set_job = job; dd_data->context_un.iocb.cmdiocbq = cmdiocbq; @@ -1508,6 +1520,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, ctiocb->context1 = dd_data; ctiocb->context2 = cmp; ctiocb->context3 = bmp; + ctiocb->context_un.ndlp = ndlp; ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp; dd_data->type = TYPE_IOCB; @@ -2576,7 +2589,8 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, evt->wait_time_stamp = jiffies; time_left = wait_event_interruptible_timeout( evt->wq, !list_empty(&evt->events_to_see), - ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); + msecs_to_jiffies(1000 * + ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); if (list_empty(&evt->events_to_see)) ret_val = (time_left) ? -EINTR : -ETIMEDOUT; else { @@ -3151,7 +3165,8 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job) evt->waiting = 1; time_left = wait_event_interruptible_timeout( evt->wq, !list_empty(&evt->events_to_see), - ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); + msecs_to_jiffies(1000 * + ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); evt->waiting = 0; if (list_empty(&evt->events_to_see)) { rc = (time_left) ? -EINTR : -ETIMEDOUT; diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 7631893ae005..d41456e5f814 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -470,3 +470,4 @@ int lpfc_sli4_xri_sgl_update(struct lpfc_hba *); void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *); uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *); int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t); +void lpfc_sli4_offline_eratt(struct lpfc_hba *); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 7bff3a19af56..ae1a07c57cae 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1811,7 +1811,8 @@ lpfc_fdmi_timeout_handler(struct lpfc_vport *vport) if (init_utsname()->nodename[0] != '\0') lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); else - mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); + mod_timer(&vport->fc_fdmitmo, jiffies + + msecs_to_jiffies(1000 * 60)); } return; } diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index bbed8471bf0b..3cae0a92e8bd 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -29,6 +29,7 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> + #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" @@ -238,7 +239,10 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, icmd->un.elsreq64.remoteID = did; /* DID */ icmd->ulpCommand = CMD_ELS_REQUEST64_CR; - icmd->ulpTimeout = phba->fc_ratov * 2; + if (elscmd == ELS_CMD_FLOGI) + icmd->ulpTimeout = FF_DEF_RATOV * 2; + else + icmd->ulpTimeout = phba->fc_ratov * 2; } else { icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys); @@ -308,16 +312,20 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, /* Xmit ELS command <elsCmd> to remote NPORT <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0116 Xmit ELS command x%x to remote " - "NPORT x%x I/O tag: x%x, port state: x%x\n", + "NPORT x%x I/O tag: x%x, port state:x%x" + " fc_flag:x%x\n", elscmd, did, elsiocb->iotag, - vport->port_state); + vport->port_state, + vport->fc_flag); } else { /* Xmit ELS response <elsCmd> to remote NPORT <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0117 Xmit ELS response x%x to remote " - "NPORT x%x I/O tag: x%x, size: x%x\n", + "NPORT x%x I/O tag: x%x, size: x%x " + "port_state x%x fc_flag x%x\n", elscmd, ndlp->nlp_DID, elsiocb->iotag, - cmdSize); + cmdSize, vport->port_state, + vport->fc_flag); } return elsiocb; @@ -909,6 +917,23 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_PT2PT; spin_unlock_irq(shost->host_lock); + /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ + if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { + lpfc_unregister_fcf_prep(phba); + + /* The FC_VFI_REGISTERED flag will get clear in the cmpl + * handler for unreg_vfi, but if we don't force the + * FC_VFI_REGISTERED flag then the reg_vfi mailbox could be + * built with the update bit set instead of just the vp bit to + * change the Nport ID. We need to have the vp set and the + * Upd cleared on topology changes. + */ + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VFI_REGISTERED; + spin_unlock_irq(shost->host_lock); + phba->fc_topology_changed = 0; + lpfc_issue_reg_vfi(vport); + } /* Start discovery - this should just do CLEAR_LA */ lpfc_disc_start(vport); @@ -1030,9 +1055,19 @@ stop_rr_fcf_flogi: vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; if ((phba->sli_rev == LPFC_SLI_REV4) && (!(vport->fc_flag & FC_VFI_REGISTERED) || - (vport->fc_prevDID != vport->fc_myDID))) { - if (vport->fc_flag & FC_VFI_REGISTERED) - lpfc_sli4_unreg_all_rpis(vport); + (vport->fc_prevDID != vport->fc_myDID) || + phba->fc_topology_changed)) { + if (vport->fc_flag & FC_VFI_REGISTERED) { + if (phba->fc_topology_changed) { + lpfc_unregister_fcf_prep(phba); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VFI_REGISTERED; + spin_unlock_irq(shost->host_lock); + phba->fc_topology_changed = 0; + } else { + lpfc_sli4_unreg_all_rpis(vport); + } + } lpfc_issue_reg_vfi(vport); lpfc_nlp_put(ndlp); goto out; @@ -1054,10 +1089,11 @@ stop_rr_fcf_flogi: /* FLOGI completes successfully */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0101 FLOGI completes successfully " - "Data: x%x x%x x%x x%x\n", + "0101 FLOGI completes successfully, I/O tag:x%x, " + "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag, irsp->un.ulpWord[4], sp->cmn.e_d_tov, - sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution); + sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, + vport->port_state, vport->fc_flag); if (vport->port_state == LPFC_FLOGI) { /* @@ -5047,6 +5083,8 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct ls_rjt stat; uint32_t cmd, did; int rc; + uint32_t fc_flag = 0; + uint32_t port_state = 0; cmd = *lp++; sp = (struct serv_parm *) lp; @@ -5113,16 +5151,25 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, * will be. */ vport->fc_myDID = PT2PT_LocalID; - } + } else + vport->fc_myDID = PT2PT_RemoteID; /* * The vport state should go to LPFC_FLOGI only * AFTER we issue a FLOGI, not receive one. */ spin_lock_irq(shost->host_lock); + fc_flag = vport->fc_flag; + port_state = vport->port_state; vport->fc_flag |= FC_PT2PT; vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + vport->port_state = LPFC_FLOGI; spin_unlock_irq(shost->host_lock); + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3311 Rcv Flogi PS x%x new PS x%x " + "fc_flag x%x new fc_flag x%x\n", + port_state, vport->port_state, + fc_flag, vport->fc_flag); /* * We temporarily set fc_myDID to make it look like we are @@ -6241,7 +6288,8 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) } if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq)) - mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); + mod_timer(&vport->els_tmofunc, + jiffies + msecs_to_jiffies(1000 * timeout)); } /** @@ -6612,7 +6660,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, /* ELS command <elsCmd> received from NPORT <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0112 ELS command x%x received from NPORT x%x " - "Data: x%x\n", cmd, did, vport->port_state); + "Data: x%x x%x x%x x%x\n", + cmd, did, vport->port_state, vport->fc_flag, + vport->fc_myDID, vport->fc_prevDID); switch (cmd) { case ELS_CMD_PLOGI: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, @@ -6621,6 +6671,19 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, phba->fc_stat.elsRcvPLOGI++; ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); + if (phba->sli_rev == LPFC_SLI_REV4 && + (phba->pport->fc_flag & FC_PT2PT)) { + vport->fc_prevDID = vport->fc_myDID; + /* Our DID needs to be updated before registering + * the vfi. This is done in lpfc_rcv_plogi but + * that is called after the reg_vfi. + */ + vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo; + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3312 Remote port assigned DID x%x " + "%x\n", vport->fc_myDID, + vport->fc_prevDID); + } lpfc_send_els_event(vport, ndlp, payload); @@ -6630,6 +6693,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, rjt_exp = LSEXP_NOTHING_MORE; break; } + shost = lpfc_shost_from_vport(vport); if (vport->port_state < LPFC_DISC_AUTH) { if (!(phba->pport->fc_flag & FC_PT2PT) || (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { @@ -6641,9 +6705,18 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, * another NPort and the other side has initiated * the PLOGI before responding to our FLOGI. */ + if (phba->sli_rev == LPFC_SLI_REV4 && + (phba->fc_topology_changed || + vport->fc_myDID != vport->fc_prevDID)) { + lpfc_unregister_fcf_prep(phba); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VFI_REGISTERED; + spin_unlock_irq(shost->host_lock); + phba->fc_topology_changed = 0; + lpfc_issue_reg_vfi(vport); + } } - shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; spin_unlock_irq(shost->host_lock); @@ -7002,8 +7075,11 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) spin_lock_irq(shost->host_lock); if (vport->fc_flag & FC_DISC_DELAYED) { spin_unlock_irq(shost->host_lock); + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "3334 Delay fc port discovery for %d seconds\n", + phba->fc_ratov); mod_timer(&vport->delayed_disc_tmo, - jiffies + HZ * phba->fc_ratov); + jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); return; } spin_unlock_irq(shost->host_lock); @@ -7287,7 +7363,7 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba) return; shost = lpfc_shost_from_vport(phba->pport); - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -7791,7 +7867,8 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba) blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); /* Start a timer to unblock fabric iocbs after 100ms */ if (!blocked) - mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 ); + mod_timer(&phba->fabric_block_timer, + jiffies + msecs_to_jiffies(100)); return; } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 326e05a65a73..0f6e2548f35d 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -160,11 +160,12 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) if (!list_empty(&evtp->evt_listp)) return; + evtp->evt_arg1 = lpfc_nlp_get(ndlp); + spin_lock_irq(&phba->hbalock); /* We need to hold the node by incrementing the reference * count until this queued work is done */ - evtp->evt_arg1 = lpfc_nlp_get(ndlp); if (evtp->evt_arg1) { evtp->evt = LPFC_EVT_DEV_LOSS; list_add_tail(&evtp->evt_listp, &phba->work_list); @@ -1008,9 +1009,6 @@ lpfc_linkup(struct lpfc_hba *phba) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) lpfc_linkup_port(vports[i]); lpfc_destroy_vport_work_array(phba, vports); - if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && - (phba->sli_rev < LPFC_SLI_REV4)) - lpfc_issue_clear_la(phba, phba->pport); return 0; } @@ -1436,7 +1434,8 @@ lpfc_register_fcf(struct lpfc_hba *phba) if (phba->fcf.fcf_flag & FCF_REGISTERED) { phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); phba->hba_flag &= ~FCF_TS_INPROG; - if (phba->pport->port_state != LPFC_FLOGI) { + if (phba->pport->port_state != LPFC_FLOGI && + phba->pport->fc_flag & FC_FABRIC) { phba->hba_flag |= FCF_RR_INPROG; spin_unlock_irq(&phba->hbalock); lpfc_initial_flogi(phba->pport); @@ -2270,8 +2269,11 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2836 New FCF matches in-use " - "FCF (x%x)\n", - phba->fcf.current_rec.fcf_indx); + "FCF (x%x), port_state:x%x, " + "fc_flag:x%x\n", + phba->fcf.current_rec.fcf_indx, + phba->pport->port_state, + phba->pport->fc_flag); goto out; } else lpfc_printf_log(phba, KERN_ERR, LOG_FIP, @@ -2796,7 +2798,19 @@ void lpfc_issue_init_vpi(struct lpfc_vport *vport) { LPFC_MBOXQ_t *mboxq; - int rc; + int rc, vpi; + + if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) { + vpi = lpfc_alloc_vpi(vport->phba); + if (!vpi) { + lpfc_printf_vlog(vport, KERN_ERR, + LOG_MBOX, + "3303 Failed to obtain vport vpi\n"); + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + return; + } + vport->vpi = vpi; + } mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { @@ -2894,9 +2908,14 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) goto out_free_mem; } - /* If the VFI is already registered, there is nothing else to do */ + /* If the VFI is already registered, there is nothing else to do + * Unless this was a VFI update and we are in PT2PT mode, then + * we should drop through to set the port state to ready. + */ if (vport->fc_flag & FC_VFI_REGISTERED) - goto out_free_mem; + if (!(phba->sli_rev == LPFC_SLI_REV4 && + vport->fc_flag & FC_PT2PT)) + goto out_free_mem; /* The VPI is implicitly registered when the VFI is registered */ spin_lock_irq(shost->host_lock); @@ -2913,6 +2932,13 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) goto out_free_mem; } + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x " + "alpacnt:%d LinkState:%x topology:%x\n", + vport->port_state, vport->fc_flag, vport->fc_myDID, + vport->phba->alpa_map[0], + phba->link_state, phba->fc_topology); + if (vport->port_state == LPFC_FABRIC_CFG_LINK) { /* * For private loop or for NPort pt2pt, @@ -2925,7 +2951,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) /* Use loop map to make discovery list */ lpfc_disc_list_loopmap(vport); /* Start discovery */ - lpfc_disc_start(vport); + if (vport->fc_flag & FC_PT2PT) + vport->port_state = LPFC_VPORT_READY; + else + lpfc_disc_start(vport); } else { lpfc_start_fdiscs(phba); lpfc_do_scr_ns_plogi(phba, vport); @@ -3007,6 +3036,15 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) break; } + if (phba->fc_topology && + phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "3314 Toplogy changed was 0x%x is 0x%x\n", + phba->fc_topology, + bf_get(lpfc_mbx_read_top_topology, la)); + phba->fc_topology_changed = 1; + } + phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la); phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; @@ -4235,7 +4273,7 @@ lpfc_set_disctmo(struct lpfc_vport *vport) tmo, vport->port_state, vport->fc_flag); } - mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo); + mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo)); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_DISC_TMO; spin_unlock_irq(shost->host_lock); @@ -4949,8 +4987,12 @@ lpfc_disc_start(struct lpfc_vport *vport) uint32_t clear_la_pending; int did_changed; - if (!lpfc_is_link_up(phba)) + if (!lpfc_is_link_up(phba)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + "3315 Link is not up %x\n", + phba->link_state); return; + } if (phba->link_state == LPFC_CLEAR_LA) clear_la_pending = 1; @@ -4983,11 +5025,13 @@ lpfc_disc_start(struct lpfc_vport *vport) if (num_sent) return; - /* Register the VPI for SLI3, NON-NPIV only. */ + /* Register the VPI for SLI3, NPIV only. */ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && !(vport->fc_flag & FC_PT2PT) && !(vport->fc_flag & FC_RSCN_MODE) && (phba->sli_rev < LPFC_SLI_REV4)) { + if (vport->port_type == LPFC_PHYSICAL_PORT) + lpfc_issue_clear_la(phba, vport); lpfc_issue_reg_vpi(phba, vport); return; } @@ -5410,7 +5454,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (vport->cfg_fdmi_on == 1) lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); else - mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); + mod_timer(&vport->fc_fdmitmo, + jiffies + msecs_to_jiffies(1000 * 60)); /* decrement the node reference count held for this callback * function. @@ -5855,7 +5900,7 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba) struct lpfc_vport **vports; struct lpfc_nodelist *ndlp; struct Scsi_Host *shost; - int i, rc; + int i = 0, rc; /* Unregister RPIs */ if (lpfc_fcf_inuse(phba)) @@ -5883,6 +5928,20 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba) spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); + if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) { + ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); + if (ndlp) + lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); + lpfc_cleanup_pending_mbox(phba->pport); + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_sli4_unreg_all_rpis(phba->pport); + lpfc_mbx_unreg_vpi(phba->pport); + shost = lpfc_shost_from_vport(phba->pport); + spin_lock_irq(shost->host_lock); + phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED; + spin_unlock_irq(shost->host_lock); + } /* Cleanup any outstanding ELS commands */ lpfc_els_flush_all_cmd(phba); diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index e8c476031703..83700c18f468 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1667,6 +1667,7 @@ enum lpfc_protgrp_type { #define BG_OP_IN_CSUM_OUT_CSUM 0x5 #define BG_OP_IN_CRC_OUT_CSUM 0x6 #define BG_OP_IN_CSUM_OUT_CRC 0x7 +#define BG_OP_RAW_MODE 0x8 struct lpfc_pde5 { uint32_t word0; diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 1dd2f6f0a127..713a4613ec3a 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -200,6 +200,11 @@ struct lpfc_sli_intf { #define LPFC_MAX_IMAX 5000000 #define LPFC_DEF_IMAX 50000 +#define LPFC_MIN_CPU_MAP 0 +#define LPFC_MAX_CPU_MAP 2 +#define LPFC_HBA_CPU_MAP 1 +#define LPFC_DRIVER_CPU_MAP 2 /* Default */ + /* PORT_CAPABILITIES constants. */ #define LPFC_MAX_SUPPORTED_PAGES 8 @@ -621,7 +626,7 @@ struct lpfc_register { #define lpfc_sliport_status_rdy_SHIFT 23 #define lpfc_sliport_status_rdy_MASK 0x1 #define lpfc_sliport_status_rdy_WORD word0 -#define MAX_IF_TYPE_2_RESETS 1000 +#define MAX_IF_TYPE_2_RESETS 6 #define LPFC_CTL_PORT_CTL_OFFSET 0x408 #define lpfc_sliport_ctrl_end_SHIFT 30 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 90b8b0515e23..cb465b253910 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -33,6 +33,7 @@ #include <linux/slab.h> #include <linux/firmware.h> #include <linux/miscdevice.h> +#include <linux/percpu.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -58,6 +59,9 @@ char *_dump_buf_dif; unsigned long _dump_buf_dif_order; spinlock_t _dump_buf_lock; +/* Used when mapping IRQ vectors in a driver centric manner */ +uint16_t lpfc_used_cpu[LPFC_MAX_CPU]; + static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); static int lpfc_post_rcv_buf(struct lpfc_hba *); static int lpfc_sli4_queue_verify(struct lpfc_hba *); @@ -541,13 +545,16 @@ lpfc_config_port_post(struct lpfc_hba *phba) /* Set up ring-0 (ELS) timer */ timeout = phba->fc_ratov * 2; - mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); + mod_timer(&vport->els_tmofunc, + jiffies + msecs_to_jiffies(1000 * timeout)); /* Set up heart beat (HB) timer */ - mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + mod_timer(&phba->hb_tmofunc, + jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); phba->hb_outstanding = 0; phba->last_completion_time = jiffies; /* Set up error attention (ERATT) polling timer */ - mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); + mod_timer(&phba->eratt_poll, + jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); if (phba->hba_flag & LINK_DISABLED) { lpfc_printf_log(phba, @@ -908,9 +915,9 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) psb->pCmd = NULL; psb->status = IOSTAT_SUCCESS; } - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); - list_splice(&aborts, &phba->lpfc_scsi_buf_list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); + list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); return 0; } @@ -1021,7 +1028,8 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) !(phba->link_state == LPFC_HBA_ERROR) && !(phba->pport->load_flag & FC_UNLOADING)) mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); return; } @@ -1064,15 +1072,18 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) spin_lock_irq(&phba->pport->work_port_lock); - if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, - jiffies)) { + if (time_after(phba->last_completion_time + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), + jiffies)) { spin_unlock_irq(&phba->pport->work_port_lock); if (!phba->hb_outstanding) mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); else mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); return; } spin_unlock_irq(&phba->pport->work_port_lock); @@ -1104,7 +1115,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) if (!pmboxq) { mod_timer(&phba->hb_tmofunc, jiffies + - HZ * LPFC_HB_MBOX_INTERVAL); + msecs_to_jiffies(1000 * + LPFC_HB_MBOX_INTERVAL)); return; } @@ -1120,7 +1132,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) phba->mbox_mem_pool); mod_timer(&phba->hb_tmofunc, jiffies + - HZ * LPFC_HB_MBOX_INTERVAL); + msecs_to_jiffies(1000 * + LPFC_HB_MBOX_INTERVAL)); return; } phba->skipped_hb = 0; @@ -1136,7 +1149,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) phba->skipped_hb = jiffies; mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); return; } else { /* @@ -1150,7 +1164,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) jiffies_to_msecs(jiffies - phba->last_completion_time)); mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); } } } @@ -1191,7 +1206,7 @@ lpfc_offline_eratt(struct lpfc_hba *phba) * This routine is called to bring a SLI4 HBA offline when HBA hardware error * other than Port Error 6 has been detected. **/ -static void +void lpfc_sli4_offline_eratt(struct lpfc_hba *phba) { lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); @@ -2633,6 +2648,7 @@ lpfc_online(struct lpfc_hba *phba) struct lpfc_vport *vport; struct lpfc_vport **vports; int i; + bool vpis_cleared = false; if (!phba) return 0; @@ -2656,6 +2672,10 @@ lpfc_online(struct lpfc_hba *phba) lpfc_unblock_mgmt_io(phba); return 1; } + spin_lock_irq(&phba->hbalock); + if (!phba->sli4_hba.max_cfg_param.vpi_used) + vpis_cleared = true; + spin_unlock_irq(&phba->hbalock); } else { if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ lpfc_unblock_mgmt_io(phba); @@ -2672,8 +2692,13 @@ lpfc_online(struct lpfc_hba *phba) vports[i]->fc_flag &= ~FC_OFFLINE_MODE; if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; - if (phba->sli_rev == LPFC_SLI_REV4) + if (phba->sli_rev == LPFC_SLI_REV4) { vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + if ((vpis_cleared) && + (vports[i]->port_type != + LPFC_PHYSICAL_PORT)) + vports[i]->vpi = 0; + } spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); @@ -2833,16 +2858,30 @@ lpfc_scsi_free(struct lpfc_hba *phba) struct lpfc_iocbq *io, *io_next; spin_lock_irq(&phba->hbalock); + /* Release all the lpfc_scsi_bufs maintained by this host. */ - spin_lock(&phba->scsi_buf_list_lock); - list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { + + spin_lock(&phba->scsi_buf_list_put_lock); + list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, + list) { list_del(&sb->list); pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, sb->dma_handle); kfree(sb); phba->total_scsi_bufs--; } - spin_unlock(&phba->scsi_buf_list_lock); + spin_unlock(&phba->scsi_buf_list_put_lock); + + spin_lock(&phba->scsi_buf_list_get_lock); + list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, + list) { + list_del(&sb->list); + pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, + sb->dma_handle); + kfree(sb); + phba->total_scsi_bufs--; + } + spin_unlock(&phba->scsi_buf_list_get_lock); /* Release all the lpfc_iocbq entries maintained by this host. */ list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { @@ -2978,9 +3017,12 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) phba->sli4_hba.scsi_xri_cnt, phba->sli4_hba.scsi_xri_max); - spin_lock_irq(&phba->scsi_buf_list_lock); - list_splice_init(&phba->lpfc_scsi_buf_list, &scsi_sgl_list); - spin_unlock_irq(&phba->scsi_buf_list_lock); + spin_lock_irq(&phba->scsi_buf_list_get_lock); + spin_lock_irq(&phba->scsi_buf_list_put_lock); + list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); + list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); + spin_unlock_irq(&phba->scsi_buf_list_put_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { /* max scsi xri shrinked below the allocated scsi buffers */ @@ -2994,9 +3036,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) psb->dma_handle); kfree(psb); } - spin_lock_irq(&phba->scsi_buf_list_lock); + spin_lock_irq(&phba->scsi_buf_list_get_lock); phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt; - spin_unlock_irq(&phba->scsi_buf_list_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); } /* update xris associated to remaining allocated scsi buffers */ @@ -3014,9 +3056,12 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) psb->cur_iocbq.sli4_lxritag = lxri; psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; } - spin_lock_irq(&phba->scsi_buf_list_lock); - list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list); - spin_unlock_irq(&phba->scsi_buf_list_lock); + spin_lock_irq(&phba->scsi_buf_list_get_lock); + spin_lock_irq(&phba->scsi_buf_list_put_lock); + list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); + spin_unlock_irq(&phba->scsi_buf_list_put_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); return 0; @@ -3197,14 +3242,15 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) stat = 1; goto finished; } - if (time >= 30 * HZ) { + if (time >= msecs_to_jiffies(30 * 1000)) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0461 Scanning longer than 30 " "seconds. Continuing initialization\n"); stat = 1; goto finished; } - if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { + if (time >= msecs_to_jiffies(15 * 1000) && + phba->link_state <= LPFC_LINK_DOWN) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0465 Link down longer than 15 " "seconds. Continuing initialization\n"); @@ -3216,7 +3262,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) goto finished; if (vport->num_disc_nodes || vport->fc_prli_sent) goto finished; - if (vport->fc_map_cnt == 0 && time < 2 * HZ) + if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) goto finished; if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) goto finished; @@ -4215,7 +4261,8 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, * If there are other active VLinks present, * re-instantiate the Vlink using FDISC. */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000)); shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; @@ -4707,23 +4754,52 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) return -ENOMEM; /* - * Since the sg_tablesize is module parameter, the sg_dma_buf_size + * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size * used to create the sg_dma_buf_pool must be dynamically calculated. - * 2 segments are added since the IOCB needs a command and response bde. */ - phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + - sizeof(struct fcp_rsp) + - ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); + /* Initialize the host templates the configured values. */ + lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; + lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; + + /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ if (phba->cfg_enable_bg) { - phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; - phba->cfg_sg_dma_buf_size += - phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); + /* + * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, + * the FCP rsp, and a BDE for each. Sice we have no control + * over how many protection data segments the SCSI Layer + * will hand us (ie: there could be one for every block + * in the IO), we just allocate enough BDEs to accomidate + * our max amount and we need to limit lpfc_sg_seg_cnt to + * minimize the risk of running out. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + + (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64)); + + if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) + phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; + + /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ + phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; + } else { + /* + * The scsi_buf for a regular I/O will hold the FCP cmnd, + * the FCP rsp, a BDE for each, and a BDE for up to + * cfg_sg_seg_cnt data segments. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + + ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); + + /* Total BDEs in BPL for scsi_sg_list */ + phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; } - /* Also reinitialize the host templates with new values. */ - lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; - lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, + "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", + phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, + phba->cfg_total_seg_cnt); phba->max_vpi = LPFC_MAX_VPI; /* This will be set to correct value after config_port mbox */ @@ -4789,13 +4865,13 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) static int lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) { + struct lpfc_vector_map_info *cpup; struct lpfc_sli *psli; LPFC_MBOXQ_t *mboxq; - int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; + int rc, i, hbq_count, max_buf_size; uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; struct lpfc_mqe *mqe; - int longs, sli_family; - int sges_per_segment; + int longs; /* Before proceed, wait for POST done and device ready */ rc = lpfc_sli4_post_status_check(phba); @@ -4863,11 +4939,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; - /* With BlockGuard we can have multiple SGEs per Data Segemnt */ - sges_per_segment = 1; - if (phba->cfg_enable_bg) - sges_per_segment = 2; - /* * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple. @@ -4878,43 +4949,71 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) sizeof(struct lpfc_sli_ring), GFP_KERNEL); if (!phba->sli.ring) return -ENOMEM; + /* - * Since the sg_tablesize is module parameter, the sg_dma_buf_size + * It doesn't matter what family our adapter is in, we are + * limited to 2 Pages, 512 SGEs, for our SGL. + * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp + */ + max_buf_size = (2 * SLI4_PAGE_SIZE); + if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2) + phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2; + + /* + * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size * used to create the sg_dma_buf_pool must be dynamically calculated. - * 2 segments are added since the IOCB needs a command and response bde. - * To insure that the scsi sgl does not cross a 4k page boundary only - * sgl sizes of must be a power of 2. */ - buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + - (((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) * - sizeof(struct sli4_sge))); - - sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); - max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; - switch (sli_family) { - case LPFC_SLI_INTF_FAMILY_BE2: - case LPFC_SLI_INTF_FAMILY_BE3: - /* There is a single hint for BE - 2 pages per BPL. */ - if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) == - LPFC_SLI_INTF_SLI_HINT1_1) - max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; - break; - case LPFC_SLI_INTF_FAMILY_LNCR_A0: - case LPFC_SLI_INTF_FAMILY_LNCR_B0: - default: - break; + + if (phba->cfg_enable_bg) { + /* + * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, + * the FCP rsp, and a SGE for each. Sice we have no control + * over how many protection data segments the SCSI Layer + * will hand us (ie: there could be one for every block + * in the IO), we just allocate enough SGEs to accomidate + * our max amount and we need to limit lpfc_sg_seg_cnt to + * minimize the risk of running out. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + max_buf_size; + + /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ + phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; + + if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF) + phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF; + } else { + /* + * The scsi_buf for a regular I/O will hold the FCP cmnd, + * the FCP rsp, a SGE for each, and a SGE for up to + * cfg_sg_seg_cnt data segments. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + + ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); + + /* Total SGEs for scsi_sg_list */ + phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; + /* + * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need + * to post 1 page for the SGL. + */ } - for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; - dma_buf_size < max_buf_size && buf_size > dma_buf_size; - dma_buf_size = dma_buf_size << 1) - ; - if (dma_buf_size == max_buf_size) - phba->cfg_sg_seg_cnt = (dma_buf_size - - sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) - - (2 * sizeof(struct sli4_sge))) / - sizeof(struct sli4_sge); - phba->cfg_sg_dma_buf_size = dma_buf_size; + /* Initialize the host templates with the updated values. */ + lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; + lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; + + if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) + phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; + else + phba->cfg_sg_dma_buf_size = + SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, + "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n", + phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, + phba->cfg_total_seg_cnt); /* Initialize buffer queue management fields */ hbq_count = lpfc_sli_hbq_count(); @@ -5104,6 +5203,26 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_free_fcp_eq_hdl; } + phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) * + phba->sli4_hba.num_present_cpu), + GFP_KERNEL); + if (!phba->sli4_hba.cpu_map) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3327 Failed allocate memory for msi-x " + "interrupt vector mapping\n"); + rc = -ENOMEM; + goto out_free_msix; + } + /* Initialize io channels for round robin */ + cpup = phba->sli4_hba.cpu_map; + rc = 0; + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + cpup->channel_id = rc; + rc++; + if (rc >= phba->cfg_fcp_io_channel) + rc = 0; + } + /* * Enable sr-iov virtual functions if supported and configured * through the module parameter. @@ -5123,6 +5242,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) return 0; +out_free_msix: + kfree(phba->sli4_hba.msix_entries); out_free_fcp_eq_hdl: kfree(phba->sli4_hba.fcp_eq_hdl); out_free_fcf_rr_bmask: @@ -5152,6 +5273,11 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) { struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; + /* Free memory allocated for msi-x interrupt vector to CPU mapping */ + kfree(phba->sli4_hba.cpu_map); + phba->sli4_hba.num_present_cpu = 0; + phba->sli4_hba.num_online_cpu = 0; + /* Free memory allocated for msi-x interrupt vector entries */ kfree(phba->sli4_hba.msix_entries); @@ -5260,8 +5386,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) init_waitqueue_head(&phba->work_waitq); /* Initialize the scsi buffer list used by driver for scsi IO */ - spin_lock_init(&phba->scsi_buf_list_lock); - INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); + spin_lock_init(&phba->scsi_buf_list_get_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); + spin_lock_init(&phba->scsi_buf_list_put_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); /* Initialize the fabric iocb list */ INIT_LIST_HEAD(&phba->fabric_iocb_list); @@ -6696,6 +6824,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) int cfg_fcp_io_channel; uint32_t cpu; uint32_t i = 0; + uint32_t j = 0; /* @@ -6706,15 +6835,21 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) /* Sanity check on HBA EQ parameters */ cfg_fcp_io_channel = phba->cfg_fcp_io_channel; - /* It doesn't make sense to have more io channels then CPUs */ - for_each_online_cpu(cpu) { - i++; + /* It doesn't make sense to have more io channels then online CPUs */ + for_each_present_cpu(cpu) { + if (cpu_online(cpu)) + i++; + j++; } + phba->sli4_hba.num_online_cpu = i; + phba->sli4_hba.num_present_cpu = j; + if (i < cfg_fcp_io_channel) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3188 Reducing IO channels to match number of " - "CPUs: from %d to %d\n", cfg_fcp_io_channel, i); + "online CPUs: from %d to %d\n", + cfg_fcp_io_channel, i); cfg_fcp_io_channel = i; } @@ -7743,8 +7878,13 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) out: /* Catch the not-ready port failure after a port reset. */ - if (num_resets >= MAX_IF_TYPE_2_RESETS) + if (num_resets >= MAX_IF_TYPE_2_RESETS) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3317 HBA not functional: IP Reset Failed " + "after (%d) retries, try: " + "echo fw_reset > board_mode\n", num_resets); rc = -ENODEV; + } return rc; } @@ -8209,6 +8349,269 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba) } /** + * lpfc_find_next_cpu - Find next available CPU that matches the phys_id + * @phba: pointer to lpfc hba data structure. + * + * Find next available CPU to use for IRQ to CPU affinity. + */ +static int +lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id) +{ + struct lpfc_vector_map_info *cpup; + int cpu; + + cpup = phba->sli4_hba.cpu_map; + for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { + /* CPU must be online */ + if (cpu_online(cpu)) { + if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && + (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) && + (cpup->phys_id == phys_id)) { + return cpu; + } + } + cpup++; + } + + /* + * If we get here, we have used ALL CPUs for the specific + * phys_id. Now we need to clear out lpfc_used_cpu and start + * reusing CPUs. + */ + + for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { + if (lpfc_used_cpu[cpu] == phys_id) + lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY; + } + + cpup = phba->sli4_hba.cpu_map; + for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { + /* CPU must be online */ + if (cpu_online(cpu)) { + if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && + (cpup->phys_id == phys_id)) { + return cpu; + } + } + cpup++; + } + return LPFC_VECTOR_MAP_EMPTY; +} + +/** + * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors + * @phba: pointer to lpfc hba data structure. + * @vectors: number of HBA vectors + * + * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector + * affinization across multple physical CPUs (numa nodes). + * In addition, this routine will assign an IO channel for each CPU + * to use when issuing I/Os. + */ +static int +lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) +{ + int i, idx, saved_chann, used_chann, cpu, phys_id; + int max_phys_id, num_io_channel, first_cpu; + struct lpfc_vector_map_info *cpup; +#ifdef CONFIG_X86 + struct cpuinfo_x86 *cpuinfo; +#endif + struct cpumask *mask; + uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1]; + + /* If there is no mapping, just return */ + if (!phba->cfg_fcp_cpu_map) + return 1; + + /* Init cpu_map array */ + memset(phba->sli4_hba.cpu_map, 0xff, + (sizeof(struct lpfc_vector_map_info) * + phba->sli4_hba.num_present_cpu)); + + max_phys_id = 0; + phys_id = 0; + num_io_channel = 0; + first_cpu = LPFC_VECTOR_MAP_EMPTY; + + /* Update CPU map with physical id and core id of each CPU */ + cpup = phba->sli4_hba.cpu_map; + for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { +#ifdef CONFIG_X86 + cpuinfo = &cpu_data(cpu); + cpup->phys_id = cpuinfo->phys_proc_id; + cpup->core_id = cpuinfo->cpu_core_id; +#else + /* No distinction between CPUs for other platforms */ + cpup->phys_id = 0; + cpup->core_id = 0; +#endif + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3328 CPU physid %d coreid %d\n", + cpup->phys_id, cpup->core_id); + + if (cpup->phys_id > max_phys_id) + max_phys_id = cpup->phys_id; + cpup++; + } + + /* Now associate the HBA vectors with specific CPUs */ + for (idx = 0; idx < vectors; idx++) { + cpup = phba->sli4_hba.cpu_map; + cpu = lpfc_find_next_cpu(phba, phys_id); + if (cpu == LPFC_VECTOR_MAP_EMPTY) { + + /* Try for all phys_id's */ + for (i = 1; i < max_phys_id; i++) { + phys_id++; + if (phys_id > max_phys_id) + phys_id = 0; + cpu = lpfc_find_next_cpu(phba, phys_id); + if (cpu == LPFC_VECTOR_MAP_EMPTY) + continue; + goto found; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3329 Cannot set affinity:" + "Error mapping vector %d (%d)\n", + idx, vectors); + return 0; + } +found: + cpup += cpu; + if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP) + lpfc_used_cpu[cpu] = phys_id; + + /* Associate vector with selected CPU */ + cpup->irq = phba->sli4_hba.msix_entries[idx].vector; + + /* Associate IO channel with selected CPU */ + cpup->channel_id = idx; + num_io_channel++; + + if (first_cpu == LPFC_VECTOR_MAP_EMPTY) + first_cpu = cpu; + + /* Now affinitize to the selected CPU */ + mask = &cpup->maskbits; + cpumask_clear(mask); + cpumask_set_cpu(cpu, mask); + i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx]. + vector, mask); + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3330 Set Affinity: CPU %d channel %d " + "irq %d (%x)\n", + cpu, cpup->channel_id, + phba->sli4_hba.msix_entries[idx].vector, i); + + /* Spread vector mapping across multple physical CPU nodes */ + phys_id++; + if (phys_id > max_phys_id) + phys_id = 0; + } + + /* + * Finally fill in the IO channel for any remaining CPUs. + * At this point, all IO channels have been assigned to a specific + * MSIx vector, mapped to a specific CPU. + * Base the remaining IO channel assigned, to IO channels already + * assigned to other CPUs on the same phys_id. + */ + for (i = 0; i <= max_phys_id; i++) { + /* + * If there are no io channels already mapped to + * this phys_id, just round robin thru the io_channels. + * Setup chann[] for round robin. + */ + for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) + chann[idx] = idx; + + saved_chann = 0; + used_chann = 0; + + /* + * First build a list of IO channels already assigned + * to this phys_id before reassigning the same IO + * channels to the remaining CPUs. + */ + cpup = phba->sli4_hba.cpu_map; + cpu = first_cpu; + cpup += cpu; + for (idx = 0; idx < phba->sli4_hba.num_present_cpu; + idx++) { + if (cpup->phys_id == i) { + /* + * Save any IO channels that are + * already mapped to this phys_id. + */ + if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) { + chann[saved_chann] = + cpup->channel_id; + saved_chann++; + goto out; + } + + /* See if we are using round-robin */ + if (saved_chann == 0) + saved_chann = + phba->cfg_fcp_io_channel; + + /* Associate next IO channel with CPU */ + cpup->channel_id = chann[used_chann]; + num_io_channel++; + used_chann++; + if (used_chann == saved_chann) + used_chann = 0; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3331 Set IO_CHANN " + "CPU %d channel %d\n", + idx, cpup->channel_id); + } +out: + cpu++; + if (cpu >= phba->sli4_hba.num_present_cpu) { + cpup = phba->sli4_hba.cpu_map; + cpu = 0; + } else { + cpup++; + } + } + } + + if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) { + cpup = phba->sli4_hba.cpu_map; + for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) { + if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) { + cpup->channel_id = 0; + num_io_channel++; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3332 Assign IO_CHANN " + "CPU %d channel %d\n", + idx, cpup->channel_id); + } + cpup++; + } + } + + /* Sanity check */ + if (num_io_channel != phba->sli4_hba.num_present_cpu) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3333 Set affinity mismatch:" + "%d chann != %d cpus: %d vactors\n", + num_io_channel, phba->sli4_hba.num_present_cpu, + vectors); + + phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; + return 1; +} + + +/** * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device * @phba: pointer to lpfc hba data structure. * @@ -8259,9 +8662,7 @@ enable_msix_vectors: phba->sli4_hba.msix_entries[index].vector, phba->sli4_hba.msix_entries[index].entry); - /* - * Assign MSI-X vectors to interrupt handlers - */ + /* Assign MSI-X vectors to interrupt handlers */ for (index = 0; index < vectors; index++) { memset(&phba->sli4_hba.handler_name[index], 0, 16); sprintf((char *)&phba->sli4_hba.handler_name[index], @@ -8289,6 +8690,8 @@ enable_msix_vectors: phba->cfg_fcp_io_channel, vectors); phba->cfg_fcp_io_channel = vectors; } + + lpfc_sli4_set_affinity(phba, vectors); return rc; cfg_fail_out: @@ -9213,15 +9616,15 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); + /* Flush all driver's outstanding SCSI I/Os as we are to reset */ + lpfc_sli_flush_fcp_rings(phba); + /* stop all timers */ lpfc_stop_hba_timers(phba); /* Disable interrupt and pci device */ lpfc_sli_disable_intr(phba); pci_disable_device(phba->pcidev); - - /* Flush all driver's outstanding SCSI I/Os as we are to reset */ - lpfc_sli_flush_fcp_rings(phba); } /** @@ -9966,6 +10369,9 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); + /* Flush all driver's outstanding SCSI I/Os as we are to reset */ + lpfc_sli_flush_fcp_rings(phba); + /* stop all timers */ lpfc_stop_hba_timers(phba); @@ -9973,9 +10379,6 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) lpfc_sli4_disable_intr(phba); lpfc_sli4_queue_destroy(phba); pci_disable_device(phba->pcidev); - - /* Flush all driver's outstanding SCSI I/Os as we are to reset */ - lpfc_sli_flush_fcp_rings(phba); } /** @@ -10535,6 +10938,7 @@ static struct miscdevice lpfc_mgmt_dev = { static int __init lpfc_init(void) { + int cpu; int error = 0; printk(LPFC_MODULE_DESC "\n"); @@ -10561,6 +10965,11 @@ lpfc_init(void) return -ENOMEM; } } + + /* Initialize in case vector mapping is needed */ + for (cpu = 0; cpu < LPFC_MAX_CPU; cpu++) + lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY; + error = pci_register_driver(&lpfc_driver); if (error) { fc_release_transport(lpfc_transport_template); diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index baf53e6c2bd1..2a4e5d21eab2 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h @@ -37,6 +37,7 @@ #define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ #define LOG_FIP 0x00020000 /* FIP events */ #define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */ +#define LOG_SCSI_CMD 0x00080000 /* ALL SCSI commands */ #define LOG_ALL_MSG 0xffffffff /* LOG all messages */ #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index a7a9fa468308..41363db7d426 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -2149,18 +2149,21 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys) /* Only FC supports upd bit */ if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) && - (vport->fc_flag & FC_VFI_REGISTERED)) { + (vport->fc_flag & FC_VFI_REGISTERED) && + (!phba->fc_topology_changed)) { bf_set(lpfc_reg_vfi_vp, reg_vfi, 0); bf_set(lpfc_reg_vfi_upd, reg_vfi, 1); } lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX, "3134 Register VFI, mydid:x%x, fcfi:%d, " - " vfi:%d, vpi:%d, fc_pname:%x%x\n", + " vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x" + " port_state:x%x topology chg:%d\n", vport->fc_myDID, phba->fcf.fcfi, phba->sli4_hba.vfi_ids[vport->vfi], phba->vpi_ids[vport->vpi], - reg_vfi->wwn[0], reg_vfi->wwn[1]); + reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag, + vport->port_state, phba->fc_topology_changed); } /** diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index cd86069a0ba8..812d0cd7c86d 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -64,18 +64,26 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; int i; - if (phba->sli_rev == LPFC_SLI_REV4) + if (phba->sli_rev == LPFC_SLI_REV4) { + /* Calculate alignment */ + if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) + i = phba->cfg_sg_dma_buf_size; + else + i = SLI4_PAGE_SIZE; + phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", phba->pcidev, phba->cfg_sg_dma_buf_size, - phba->cfg_sg_dma_buf_size, + i, 0); - else + } else { phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", phba->pcidev, phba->cfg_sg_dma_buf_size, align, 0); + } + if (!phba->lpfc_scsi_dma_buf_pool) goto fail; diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 82f4d3542289..31e9b92f5a9b 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -332,9 +332,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* PLOGI chkparm OK */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", + "0114 PLOGI chkparm OK Data: x%x x%x x%x " + "x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, - ndlp->nlp_rpi); + ndlp->nlp_rpi, vport->port_state, + vport->fc_flag); if (vport->cfg_fcp_class == 2 && sp->cls2.classValid) ndlp->nlp_fcp_info |= CLASS2; @@ -574,7 +576,7 @@ out: lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); /* 1 sec timeout */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; @@ -631,7 +633,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * If there are other active VLinks present, * re-instantiate the Vlink using FDISC. */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -648,7 +651,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { /* Only try to re-login if this is NOT a Fabric Node */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -969,7 +973,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } /* Put ndlp in npr state set plogi timer for 1 sec */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); + mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -1303,7 +1307,8 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, if ((irsp->ulpStatus) || (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) { /* 1 sec timeout */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -1509,7 +1514,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, } /* Put ndlp in npr state set plogi timer for 1 sec */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -2145,7 +2151,8 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) { - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; ndlp->nlp_flag &= ~NLP_NPR_ADISC; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 74b8710e1e90..8523b278ec9d 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -24,6 +24,8 @@ #include <linux/export.h> #include <linux/delay.h> #include <asm/unaligned.h> +#include <linux/crc-t10dif.h> +#include <net/checksum.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -48,7 +50,7 @@ #define LPFC_RESET_WAIT 2 #define LPFC_ABORT_WAIT 2 -int _dump_buf_done; +int _dump_buf_done = 1; static char *dif_op_str[] = { "PROT_NORMAL", @@ -66,6 +68,10 @@ struct scsi_dif_tuple { __be32 ref_tag; /* Target LBA or indirect LBA */ }; +#if !defined(SCSI_PROT_GUARD_CHECK) || !defined(SCSI_PROT_REF_CHECK) +#define scsi_prot_flagged(sc, flg) sc +#endif + static void lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); static void @@ -534,7 +540,16 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) dma_addr_t pdma_phys_fcp_rsp; dma_addr_t pdma_phys_bpl; uint16_t iotag; - int bcnt; + int bcnt, bpl_size; + + bpl_size = phba->cfg_sg_dma_buf_size - + (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n", + num_to_alloc, phba->cfg_sg_dma_buf_size, + (int)sizeof(struct fcp_cmnd), + (int)sizeof(struct fcp_rsp), bpl_size); for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); @@ -759,7 +774,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba, struct list_head *post_sblist, int sb_count) { struct lpfc_scsi_buf *psb, *psb_next; - int status; + int status, sgl_size; int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; dma_addr_t pdma_phys_bpl1; int last_xritag = NO_XRI; @@ -771,6 +786,9 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba, if (sb_count <= 0) return -EINVAL; + sgl_size = phba->cfg_sg_dma_buf_size - + (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + list_for_each_entry_safe(psb, psb_next, post_sblist, list) { list_del_init(&psb->list); block_cnt++; @@ -803,7 +821,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba, post_cnt = block_cnt; } else if (block_cnt == 1) { /* last single sgl with non-contiguous xri */ - if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) + if (sgl_size > SGL_PAGE_SIZE) pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE; else @@ -885,9 +903,12 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba) int num_posted, rc = 0; /* get all SCSI buffers need to repost to a local list */ - spin_lock_irq(&phba->scsi_buf_list_lock); - list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist); - spin_unlock_irq(&phba->scsi_buf_list_lock); + spin_lock_irq(&phba->scsi_buf_list_get_lock); + spin_lock_irq(&phba->scsi_buf_list_put_lock); + list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist); + list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist); + spin_unlock_irq(&phba->scsi_buf_list_put_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); /* post the list of scsi buffer sgls to port if available */ if (!list_empty(&post_sblist)) { @@ -923,13 +944,22 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) IOCB_t *iocb; dma_addr_t pdma_phys_fcp_cmd; dma_addr_t pdma_phys_fcp_rsp; - dma_addr_t pdma_phys_bpl, pdma_phys_bpl1; + dma_addr_t pdma_phys_bpl; uint16_t iotag, lxri = 0; - int bcnt, num_posted; + int bcnt, num_posted, sgl_size; LIST_HEAD(prep_sblist); LIST_HEAD(post_sblist); LIST_HEAD(scsi_sblist); + sgl_size = phba->cfg_sg_dma_buf_size - + (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n", + num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size, + (int)sizeof(struct fcp_cmnd), + (int)sizeof(struct fcp_rsp)); + for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); if (!psb) @@ -948,6 +978,15 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) } memset(psb->data, 0, phba->cfg_sg_dma_buf_size); + /* Page alignment is CRITICAL, double check to be sure */ + if (((unsigned long)(psb->data) & + (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) { + pci_pool_free(phba->lpfc_scsi_dma_buf_pool, + psb->data, psb->dma_handle); + kfree(psb); + break; + } + /* Allocate iotag for psb->cur_iocbq. */ iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); if (iotag == 0) { @@ -968,17 +1007,14 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; psb->fcp_bpl = psb->data; - psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size) - - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + psb->fcp_cmnd = (psb->data + sgl_size); psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd + sizeof(struct fcp_cmnd)); /* Initialize local short-hand pointers. */ sgl = (struct sli4_sge *)psb->fcp_bpl; pdma_phys_bpl = psb->dma_handle; - pdma_phys_fcp_cmd = - (psb->dma_handle + phba->cfg_sg_dma_buf_size) - - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size); pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); /* @@ -1020,17 +1056,13 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) iocb->ulpLe = 1; iocb->ulpClass = CLASS3; psb->cur_iocbq.context1 = psb; - if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) - pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE; - else - pdma_phys_bpl1 = 0; psb->dma_phys_bpl = pdma_phys_bpl; /* add the scsi buffer to a post list */ list_add_tail(&psb->list, &post_sblist); - spin_lock_irq(&phba->scsi_buf_list_lock); + spin_lock_irq(&phba->scsi_buf_list_get_lock); phba->sli4_hba.scsi_xri_cnt++; - spin_unlock_irq(&phba->scsi_buf_list_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); } lpfc_printf_log(phba, KERN_INFO, LOG_BG, "3021 Allocate %d out of %d requested new SCSI " @@ -1079,17 +1111,23 @@ static struct lpfc_scsi_buf* lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { struct lpfc_scsi_buf * lpfc_cmd = NULL; - struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; - unsigned long iflag = 0; - - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); - list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); - if (lpfc_cmd) { - lpfc_cmd->seg_cnt = 0; - lpfc_cmd->nonsg_phys = 0; - lpfc_cmd->prot_seg_cnt = 0; + struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; + unsigned long gflag = 0; + unsigned long pflag = 0; + + spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag); + list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf, + list); + if (!lpfc_cmd) { + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag); + list_splice(&phba->lpfc_scsi_buf_list_put, + &phba->lpfc_scsi_buf_list_get); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); + list_remove_head(scsi_buf_list_get, lpfc_cmd, + struct lpfc_scsi_buf, list); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag); } - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag); return lpfc_cmd; } /** @@ -1107,28 +1145,39 @@ static struct lpfc_scsi_buf* lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { struct lpfc_scsi_buf *lpfc_cmd ; - unsigned long iflag = 0; + unsigned long gflag = 0; + unsigned long pflag = 0; int found = 0; - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); - list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list, - list) { + spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag); + list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get, list) { if (lpfc_test_rrq_active(phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag)) continue; list_del(&lpfc_cmd->list); found = 1; - lpfc_cmd->seg_cnt = 0; - lpfc_cmd->nonsg_phys = 0; - lpfc_cmd->prot_seg_cnt = 0; break; } - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, - iflag); + if (!found) { + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag); + list_splice(&phba->lpfc_scsi_buf_list_put, + &phba->lpfc_scsi_buf_list_get); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag); + list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get, + list) { + if (lpfc_test_rrq_active( + phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag)) + continue; + list_del(&lpfc_cmd->list); + found = 1; + break; + } + } + spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag); if (!found) return NULL; - else - return lpfc_cmd; + return lpfc_cmd; } /** * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA @@ -1160,10 +1209,15 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) { unsigned long iflag = 0; - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); + psb->seg_cnt = 0; + psb->nonsg_phys = 0; + psb->prot_seg_cnt = 0; + + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); psb->pCmd = NULL; - list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + psb->cur_iocbq.iocb_flag = LPFC_IO_FCP; + list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); } /** @@ -1181,6 +1235,10 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) { unsigned long iflag = 0; + psb->seg_cnt = 0; + psb->nonsg_phys = 0; + psb->prot_seg_cnt = 0; + if (psb->exch_busy) { spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); @@ -1190,11 +1248,11 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); } else { - - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); psb->pCmd = NULL; - list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + psb->cur_iocbq.iocb_flag = LPFC_IO_FCP; + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); + list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); } } @@ -1268,6 +1326,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) "dma_map_sg. Config %d, seg_cnt %d\n", __func__, phba->cfg_sg_seg_cnt, lpfc_cmd->seg_cnt); + lpfc_cmd->seg_cnt = 0; scsi_dma_unmap(scsi_cmnd); return 1; } @@ -2013,9 +2072,21 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); bf_set(pde6_optx, pde6, txop); bf_set(pde6_oprx, pde6, rxop); + + /* + * We only need to check the data on READs, for WRITEs + * protection data is automatically generated, not checked. + */ if (datadir == DMA_FROM_DEVICE) { - bf_set(pde6_ce, pde6, checking); - bf_set(pde6_re, pde6, checking); + if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)) + bf_set(pde6_ce, pde6, checking); + else + bf_set(pde6_ce, pde6, 0); + + if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK)) + bf_set(pde6_re, pde6, checking); + else + bf_set(pde6_re, pde6, 0); } bf_set(pde6_ai, pde6, 1); bf_set(pde6_ae, pde6, 0); @@ -2145,6 +2216,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, split_offset = 0; do { + /* Check to see if we ran out of space */ + if (num_bde >= (phba->cfg_total_seg_cnt - 2)) + return num_bde + 3; + /* setup PDE5 with what we have */ pde5 = (struct lpfc_pde5 *) bpl; memset(pde5, 0, sizeof(struct lpfc_pde5)); @@ -2164,8 +2239,17 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); bf_set(pde6_optx, pde6, txop); bf_set(pde6_oprx, pde6, rxop); - bf_set(pde6_ce, pde6, checking); - bf_set(pde6_re, pde6, checking); + + if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)) + bf_set(pde6_ce, pde6, checking); + else + bf_set(pde6_ce, pde6, 0); + + if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK)) + bf_set(pde6_re, pde6, checking); + else + bf_set(pde6_re, pde6, 0); + bf_set(pde6_ai, pde6, 1); bf_set(pde6_ae, pde6, 0); bf_set(pde6_apptagval, pde6, 0); @@ -2213,6 +2297,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, pgdone = 0; subtotal = 0; /* total bytes processed for current prot grp */ while (!pgdone) { + /* Check to see if we ran out of space */ + if (num_bde >= phba->cfg_total_seg_cnt) + return num_bde + 1; + if (!sgde) { lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9065 BLKGRD:%s Invalid data segment\n", @@ -2324,7 +2412,6 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct sli4_sge_diseed *diseed = NULL; dma_addr_t physaddr; int i = 0, num_sge = 0, status; - int datadir = sc->sc_data_direction; uint32_t reftag; unsigned blksize; uint8_t txop, rxop; @@ -2362,13 +2449,26 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, diseed->ref_tag = cpu_to_le32(reftag); diseed->ref_tag_tran = diseed->ref_tag; + /* + * We only need to check the data on READs, for WRITEs + * protection data is automatically generated, not checked. + */ + if (sc->sc_data_direction == DMA_FROM_DEVICE) { + if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)) + bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); + else + bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); + + if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK)) + bf_set(lpfc_sli4_sge_dif_re, diseed, checking); + else + bf_set(lpfc_sli4_sge_dif_re, diseed, 0); + } + /* setup DISEED with the rest of the info */ bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); - if (datadir == DMA_FROM_DEVICE) { - bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); - bf_set(lpfc_sli4_sge_dif_re, diseed, checking); - } + bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); bf_set(lpfc_sli4_sge_dif_me, diseed, 0); @@ -2497,6 +2597,10 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, split_offset = 0; do { + /* Check to see if we ran out of space */ + if (num_sge >= (phba->cfg_total_seg_cnt - 2)) + return num_sge + 3; + /* setup DISEED with what we have */ diseed = (struct sli4_sge_diseed *) sgl; memset(diseed, 0, sizeof(struct sli4_sge_diseed)); @@ -2506,11 +2610,34 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, diseed->ref_tag = cpu_to_le32(reftag); diseed->ref_tag_tran = diseed->ref_tag; + if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)) { + bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); + + } else { + bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); + /* + * When in this mode, the hardware will replace + * the guard tag from the host with a + * newly generated good CRC for the wire. + * Switch to raw mode here to avoid this + * behavior. What the host sends gets put on the wire. + */ + if (txop == BG_OP_IN_CRC_OUT_CRC) { + txop = BG_OP_RAW_MODE; + rxop = BG_OP_RAW_MODE; + } + } + + + if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK)) + bf_set(lpfc_sli4_sge_dif_re, diseed, checking); + else + bf_set(lpfc_sli4_sge_dif_re, diseed, 0); + /* setup DISEED with the rest of the info */ bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); - bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); - bf_set(lpfc_sli4_sge_dif_re, diseed, checking); + bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); bf_set(lpfc_sli4_sge_dif_me, diseed, 0); @@ -2556,6 +2683,10 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, pgdone = 0; subtotal = 0; /* total bytes processed for current prot grp */ while (!pgdone) { + /* Check to see if we ran out of space */ + if (num_sge >= phba->cfg_total_seg_cnt) + return num_sge + 1; + if (!sgde) { lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9086 BLKGRD:%s Invalid data segment\n", @@ -2670,6 +2801,47 @@ lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) } /** + * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard + * @phba: The Hba for which this call is being executed. + * @lpfc_cmd: The scsi buffer which is going to be adjusted. + * + * Adjust the data length to account for how much data + * is actually on the wire. + * + * returns the adjusted data length + **/ +static int +lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, + struct lpfc_scsi_buf *lpfc_cmd) +{ + struct scsi_cmnd *sc = lpfc_cmd->pCmd; + int fcpdl; + + fcpdl = scsi_bufflen(sc); + + /* Check if there is protection data on the wire */ + if (sc->sc_data_direction == DMA_FROM_DEVICE) { + /* Read */ + if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT) + return fcpdl; + + } else { + /* Write */ + if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP) + return fcpdl; + } + + /* + * If we are in DIF Type 1 mode every data block has a 8 byte + * DIF (trailer) attached to it. Must ajust FCP data length. + */ + if (scsi_prot_flagged(sc, SCSI_PROT_TRANSFER_PI)) + fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8; + + return fcpdl; +} + +/** * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec * @phba: The Hba for which this call is being executed. * @lpfc_cmd: The scsi buffer which is going to be prep'ed. @@ -2689,8 +2861,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, uint32_t num_bde = 0; int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; int prot_group_type = 0; - int diflen, fcpdl; - unsigned blksize; + int fcpdl; /* * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd @@ -2711,28 +2882,28 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, return 1; lpfc_cmd->seg_cnt = datasegcnt; - if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9067 BLKGRD: %s: Too many sg segments" - " from dma_map_sg. Config %d, seg_cnt" - " %d\n", - __func__, phba->cfg_sg_seg_cnt, - lpfc_cmd->seg_cnt); - scsi_dma_unmap(scsi_cmnd); - return 1; - } + + /* First check if data segment count from SCSI Layer is good */ + if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) + goto err; prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); switch (prot_group_type) { case LPFC_PG_TYPE_NO_DIF: + + /* Here we need to add a PDE5 and PDE6 to the count */ + if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) + goto err; + num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, datasegcnt); /* we should have 2 or more entries in buffer list */ if (num_bde < 2) goto err; break; - case LPFC_PG_TYPE_DIF_BUF:{ + + case LPFC_PG_TYPE_DIF_BUF: /* * This type indicates that protection buffers are * passed to the driver, so that needs to be prepared @@ -2747,31 +2918,28 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, } lpfc_cmd->prot_seg_cnt = protsegcnt; - if (lpfc_cmd->prot_seg_cnt - > phba->cfg_prot_sg_seg_cnt) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9068 BLKGRD: %s: Too many prot sg " - "segments from dma_map_sg. Config %d," - "prot_seg_cnt %d\n", __func__, - phba->cfg_prot_sg_seg_cnt, - lpfc_cmd->prot_seg_cnt); - dma_unmap_sg(&phba->pcidev->dev, - scsi_prot_sglist(scsi_cmnd), - scsi_prot_sg_count(scsi_cmnd), - datadir); - scsi_dma_unmap(scsi_cmnd); - return 1; - } + + /* + * There is a minimun of 4 BPLs used for every + * protection data segment. + */ + if ((lpfc_cmd->prot_seg_cnt * 4) > + (phba->cfg_total_seg_cnt - 2)) + goto err; num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, datasegcnt, protsegcnt); /* we should have 3 or more entries in buffer list */ - if (num_bde < 3) + if ((num_bde < 3) || + (num_bde > phba->cfg_total_seg_cnt)) goto err; break; - } + case LPFC_PG_TYPE_INVALID: default: + scsi_dma_unmap(scsi_cmnd); + lpfc_cmd->seg_cnt = 0; + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "9022 Unexpected protection group %i\n", prot_group_type); @@ -2790,18 +2958,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, iocb_cmd->ulpBdeCount = 1; iocb_cmd->ulpLe = 1; - fcpdl = scsi_bufflen(scsi_cmnd); - - if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) { - /* - * We are in DIF Type 1 mode - * Every data block has a 8 byte DIF (trailer) - * attached to it. Must ajust FCP data length - */ - blksize = lpfc_cmd_blksize(scsi_cmnd); - diflen = (fcpdl / blksize) * 8; - fcpdl += diflen; - } + fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); /* @@ -2812,14 +2969,234 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, return 0; err: + if (lpfc_cmd->seg_cnt) + scsi_dma_unmap(scsi_cmnd); + if (lpfc_cmd->prot_seg_cnt) + dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), + scsi_cmnd->sc_data_direction); + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "9023 Could not setup all needed BDE's" - "prot_group_type=%d, num_bde=%d\n", + "9023 Cannot setup S/G List for HBA" + "IO segs %d/%d BPL %d SCSI %d: %d %d\n", + lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, + phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, prot_group_type, num_bde); + + lpfc_cmd->seg_cnt = 0; + lpfc_cmd->prot_seg_cnt = 0; return 1; } /* + * This function calcuates the T10 DIF guard tag + * on the specified data using a CRC algorithmn + * using crc_t10dif. + */ +uint16_t +lpfc_bg_crc(uint8_t *data, int count) +{ + uint16_t crc = 0; + uint16_t x; + + crc = crc_t10dif(data, count); + x = cpu_to_be16(crc); + return x; +} + +/* + * This function calcuates the T10 DIF guard tag + * on the specified data using a CSUM algorithmn + * using ip_compute_csum. + */ +uint16_t +lpfc_bg_csum(uint8_t *data, int count) +{ + uint16_t ret; + + ret = ip_compute_csum(data, count); + return ret; +} + +/* + * This function examines the protection data to try to determine + * what type of T10-DIF error occurred. + */ +void +lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) +{ + struct scatterlist *sgpe; /* s/g prot entry */ + struct scatterlist *sgde; /* s/g data entry */ + struct scsi_cmnd *cmd = lpfc_cmd->pCmd; + struct scsi_dif_tuple *src = NULL; + uint8_t *data_src = NULL; + uint16_t guard_tag, guard_type; + uint16_t start_app_tag, app_tag; + uint32_t start_ref_tag, ref_tag; + int prot, protsegcnt; + int err_type, len, data_len; + int chk_ref, chk_app, chk_guard; + uint16_t sum; + unsigned blksize; + + err_type = BGS_GUARD_ERR_MASK; + sum = 0; + guard_tag = 0; + + /* First check to see if there is protection data to examine */ + prot = scsi_get_prot_op(cmd); + if ((prot == SCSI_PROT_READ_STRIP) || + (prot == SCSI_PROT_WRITE_INSERT) || + (prot == SCSI_PROT_NORMAL)) + goto out; + + /* Currently the driver just supports ref_tag and guard_tag checking */ + chk_ref = 1; + chk_app = 0; + chk_guard = 0; + + /* Setup a ptr to the protection data provided by the SCSI host */ + sgpe = scsi_prot_sglist(cmd); + protsegcnt = lpfc_cmd->prot_seg_cnt; + + if (sgpe && protsegcnt) { + + /* + * We will only try to verify guard tag if the segment + * data length is a multiple of the blksize. + */ + sgde = scsi_sglist(cmd); + blksize = lpfc_cmd_blksize(cmd); + data_src = (uint8_t *)sg_virt(sgde); + data_len = sgde->length; + if ((data_len & (blksize - 1)) == 0) + chk_guard = 1; + guard_type = scsi_host_get_guard(cmd->device->host); + + start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */ + start_app_tag = src->app_tag; + src = (struct scsi_dif_tuple *)sg_virt(sgpe); + len = sgpe->length; + while (src && protsegcnt) { + while (len) { + + /* + * First check to see if a protection data + * check is valid + */ + if ((src->ref_tag == 0xffffffff) || + (src->app_tag == 0xffff)) { + start_ref_tag++; + goto skipit; + } + + /* App Tag checking */ + app_tag = src->app_tag; + if (chk_app && (app_tag != start_app_tag)) { + err_type = BGS_APPTAG_ERR_MASK; + goto out; + } + + /* Reference Tag checking */ + ref_tag = be32_to_cpu(src->ref_tag); + if (chk_ref && (ref_tag != start_ref_tag)) { + err_type = BGS_REFTAG_ERR_MASK; + goto out; + } + start_ref_tag++; + + /* Guard Tag checking */ + if (chk_guard) { + guard_tag = src->guard_tag; + if (guard_type == SHOST_DIX_GUARD_IP) + sum = lpfc_bg_csum(data_src, + blksize); + else + sum = lpfc_bg_crc(data_src, + blksize); + if ((guard_tag != sum)) { + err_type = BGS_GUARD_ERR_MASK; + goto out; + } + } +skipit: + len -= sizeof(struct scsi_dif_tuple); + if (len < 0) + len = 0; + src++; + + data_src += blksize; + data_len -= blksize; + + /* + * Are we at the end of the Data segment? + * The data segment is only used for Guard + * tag checking. + */ + if (chk_guard && (data_len == 0)) { + chk_guard = 0; + sgde = sg_next(sgde); + if (!sgde) + goto out; + + data_src = (uint8_t *)sg_virt(sgde); + data_len = sgde->length; + if ((data_len & (blksize - 1)) == 0) + chk_guard = 1; + } + } + + /* Goto the next Protection data segment */ + sgpe = sg_next(sgpe); + if (sgpe) { + src = (struct scsi_dif_tuple *)sg_virt(sgpe); + len = sgpe->length; + } else { + src = NULL; + } + protsegcnt--; + } + } +out: + if (err_type == BGS_GUARD_ERR_MASK) { + scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, + 0x10, 0x1); + cmd->result = DRIVER_SENSE << 24 + | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); + phba->bg_guard_err_cnt++; + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n", + (unsigned long)scsi_get_lba(cmd), + sum, guard_tag); + + } else if (err_type == BGS_REFTAG_ERR_MASK) { + scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, + 0x10, 0x3); + cmd->result = DRIVER_SENSE << 24 + | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); + + phba->bg_reftag_err_cnt++; + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n", + (unsigned long)scsi_get_lba(cmd), + ref_tag, start_ref_tag); + + } else if (err_type == BGS_APPTAG_ERR_MASK) { + scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, + 0x10, 0x2); + cmd->result = DRIVER_SENSE << 24 + | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); + + phba->bg_apptag_err_cnt++; + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9041 BLKGRD: LBA %lx app_tag error %x != %x\n", + (unsigned long)scsi_get_lba(cmd), + app_tag, start_app_tag); + } +} + + +/* * This function checks for BlockGuard errors detected by * the HBA. In case of errors, the ASC/ASCQ fields in the * sense buffer will be set accordingly, paired with @@ -2842,12 +3219,6 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, uint32_t bgstat = bgf->bgstat; uint64_t failing_sector = 0; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd" - " 0x%x lba 0x%llx blk cnt 0x%x " - "bgstat=0x%x bghm=0x%x\n", - cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), - blk_rq_sectors(cmd->request), bgstat, bghm); - spin_lock(&_dump_buf_lock); if (!_dump_buf_done) { lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving" @@ -2870,18 +3241,24 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, if (lpfc_bgs_get_invalid_prof(bgstat)) { cmd->result = ScsiResult(DID_ERROR, 0); - lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid" - " BlockGuard profile. bgstat:0x%x\n", - bgstat); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9072 BLKGRD: Invalid BG Profile in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); ret = (-1); goto out; } if (lpfc_bgs_get_uninit_dif_block(bgstat)) { cmd->result = ScsiResult(DID_ERROR, 0); - lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: " - "Invalid BlockGuard DIF Block. bgstat:0x%x\n", - bgstat); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9073 BLKGRD: Invalid BG PDIF Block in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); ret = (-1); goto out; } @@ -2894,8 +3271,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, cmd->result = DRIVER_SENSE << 24 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); phba->bg_guard_err_cnt++; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9055 BLKGRD: guard_tag error\n"); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9055 BLKGRD: Guard Tag error in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); } if (lpfc_bgs_get_reftag_err(bgstat)) { @@ -2907,8 +3288,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); phba->bg_reftag_err_cnt++; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9056 BLKGRD: ref_tag error\n"); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9056 BLKGRD: Ref Tag error in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); } if (lpfc_bgs_get_apptag_err(bgstat)) { @@ -2920,8 +3305,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); phba->bg_apptag_err_cnt++; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9061 BLKGRD: app_tag error\n"); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9061 BLKGRD: App Tag error in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); } if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { @@ -2960,11 +3349,16 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, if (!ret) { /* No error was reported - problem in FW? */ - cmd->result = ScsiResult(DID_ERROR, 0); - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9057 BLKGRD: Unknown error reported!\n"); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9057 BLKGRD: Unknown error in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); + + /* Calcuate what type of error it was */ + lpfc_calc_bg_err(phba, lpfc_cmd); } - out: return ret; } @@ -3028,6 +3422,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) "dma_map_sg. Config %d, seg_cnt %d\n", __func__, phba->cfg_sg_seg_cnt, lpfc_cmd->seg_cnt); + lpfc_cmd->seg_cnt = 0; scsi_dma_unmap(scsi_cmnd); return 1; } @@ -3094,45 +3489,6 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) } /** - * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard - * @phba: The Hba for which this call is being executed. - * @lpfc_cmd: The scsi buffer which is going to be adjusted. - * - * Adjust the data length to account for how much data - * is actually on the wire. - * - * returns the adjusted data length - **/ -static int -lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, - struct lpfc_scsi_buf *lpfc_cmd) -{ - struct scsi_cmnd *sc = lpfc_cmd->pCmd; - int diflen, fcpdl; - unsigned blksize; - - fcpdl = scsi_bufflen(sc); - - /* Check if there is protection data on the wire */ - if (sc->sc_data_direction == DMA_FROM_DEVICE) { - /* Read */ - if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT) - return fcpdl; - - } else { - /* Write */ - if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP) - return fcpdl; - } - - /* If protection data on the wire, adjust the count accordingly */ - blksize = lpfc_cmd_blksize(sc); - diflen = (fcpdl / blksize) * 8; - fcpdl += diflen; - return fcpdl; -} - -/** * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec * @phba: The Hba for which this call is being executed. * @lpfc_cmd: The scsi buffer which is going to be mapped. @@ -3149,14 +3505,14 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl); IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; - uint32_t num_bde = 0; + uint32_t num_sge = 0; int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; int prot_group_type = 0; int fcpdl; /* * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd - * fcp_rsp regions to the first data bde entry + * fcp_rsp regions to the first data sge entry */ if (scsi_sg_count(scsi_cmnd)) { /* @@ -3179,28 +3535,28 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, sgl += 1; lpfc_cmd->seg_cnt = datasegcnt; - if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9087 BLKGRD: %s: Too many sg segments" - " from dma_map_sg. Config %d, seg_cnt" - " %d\n", - __func__, phba->cfg_sg_seg_cnt, - lpfc_cmd->seg_cnt); - scsi_dma_unmap(scsi_cmnd); - return 1; - } + + /* First check if data segment count from SCSI Layer is good */ + if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) + goto err; prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); switch (prot_group_type) { case LPFC_PG_TYPE_NO_DIF: - num_bde = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl, + /* Here we need to add a DISEED to the count */ + if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt) + goto err; + + num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl, datasegcnt); + /* we should have 2 or more entries in buffer list */ - if (num_bde < 2) + if (num_sge < 2) goto err; break; - case LPFC_PG_TYPE_DIF_BUF:{ + + case LPFC_PG_TYPE_DIF_BUF: /* * This type indicates that protection buffers are * passed to the driver, so that needs to be prepared @@ -3215,31 +3571,28 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, } lpfc_cmd->prot_seg_cnt = protsegcnt; - if (lpfc_cmd->prot_seg_cnt - > phba->cfg_prot_sg_seg_cnt) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9088 BLKGRD: %s: Too many prot sg " - "segments from dma_map_sg. Config %d," - "prot_seg_cnt %d\n", __func__, - phba->cfg_prot_sg_seg_cnt, - lpfc_cmd->prot_seg_cnt); - dma_unmap_sg(&phba->pcidev->dev, - scsi_prot_sglist(scsi_cmnd), - scsi_prot_sg_count(scsi_cmnd), - datadir); - scsi_dma_unmap(scsi_cmnd); - return 1; - } + /* + * There is a minimun of 3 SGEs used for every + * protection data segment. + */ + if ((lpfc_cmd->prot_seg_cnt * 3) > + (phba->cfg_total_seg_cnt - 2)) + goto err; - num_bde = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl, + num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl, datasegcnt, protsegcnt); + /* we should have 3 or more entries in buffer list */ - if (num_bde < 3) + if ((num_sge < 3) || + (num_sge > phba->cfg_total_seg_cnt)) goto err; break; - } + case LPFC_PG_TYPE_INVALID: default: + scsi_dma_unmap(scsi_cmnd); + lpfc_cmd->seg_cnt = 0; + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "9083 Unexpected protection group %i\n", prot_group_type); @@ -3263,7 +3616,6 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, } fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); - fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); /* @@ -3274,10 +3626,22 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, return 0; err: + if (lpfc_cmd->seg_cnt) + scsi_dma_unmap(scsi_cmnd); + if (lpfc_cmd->prot_seg_cnt) + dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), + scsi_cmnd->sc_data_direction); + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "9084 Could not setup all needed BDE's" - "prot_group_type=%d, num_bde=%d\n", - prot_group_type, num_bde); + "9084 Cannot setup S/G List for HBA" + "IO segs %d/%d SGL %d SCSI %d: %d %d\n", + lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, + phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, + prot_group_type, num_sge); + + lpfc_cmd->seg_cnt = 0; + lpfc_cmd->prot_seg_cnt = 0; return 1; } @@ -4357,7 +4721,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { if (vport->phba->cfg_enable_bg) { - lpfc_printf_vlog(vport, KERN_INFO, LOG_BG, + lpfc_printf_vlog(vport, + KERN_INFO, LOG_SCSI_CMD, "9033 BLKGRD: rcvd %s cmd:x%x " "sector x%llx cnt %u pt %x\n", dif_op_str[scsi_get_prot_op(cmnd)], @@ -4369,7 +4734,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); } else { if (vport->phba->cfg_enable_bg) { - lpfc_printf_vlog(vport, KERN_INFO, LOG_BG, + lpfc_printf_vlog(vport, + KERN_INFO, LOG_SCSI_CMD, "9038 BLKGRD: rcvd PROT_NORMAL cmd: " "x%x sector x%llx cnt %u pt %x\n", cmnd->cmnd[0], @@ -4542,7 +4908,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) /* Wait for abort to complete */ wait_event_timeout(waitq, (lpfc_cmd->pCmd != cmnd), - (2*vport->cfg_devloss_tmo*HZ)); + msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); lpfc_cmd->waitq = NULL; if (lpfc_cmd->pCmd == cmnd) { @@ -5012,16 +5378,24 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd) struct lpfc_hba *phba = vport->phba; int rc, ret = SUCCESS; + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "3172 SCSI layer issued Host Reset Data:\n"); + lpfc_offline_prep(phba, LPFC_MBX_WAIT); lpfc_offline(phba); rc = lpfc_sli_brdrestart(phba); if (rc) ret = FAILED; - lpfc_online(phba); + rc = lpfc_online(phba); + if (rc) + ret = FAILED; lpfc_unblock_mgmt_io(phba); - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "3172 SCSI layer issued Host Reset Data: x%x\n", ret); + if (ret == FAILED) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "3323 Failed host reset, bring it offline\n"); + lpfc_sli4_offline_eratt(phba); + } return ret; } @@ -5088,11 +5462,11 @@ lpfc_slave_alloc(struct scsi_device *sdev) } num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc); if (num_to_alloc != num_allocated) { - lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, - "0708 Allocation request of %d " - "command buffers did not succeed. " - "Allocated %d buffers.\n", - num_to_alloc, num_allocated); + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "0708 Allocation request of %d " + "command buffers did not succeed. " + "Allocated %d buffers.\n", + num_to_alloc, num_allocated); } if (num_allocated > 0) phba->total_scsi_bufs += num_allocated; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 35dd17eb0f27..572579f87de4 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -667,7 +667,7 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba) spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag &= ~HBA_RRQ_ACTIVE; - next_time = jiffies + HZ * (phba->fc_ratov + 1); + next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { if (time_after(jiffies, rrq->rrq_stop_time)) @@ -782,7 +782,7 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba) return; spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag &= ~HBA_RRQ_ACTIVE; - next_time = jiffies + HZ * (phba->fc_ratov * 2); + next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)); list_splice_init(&phba->active_rrq_list, &rrq_list); spin_unlock_irqrestore(&phba->hbalock, iflags); @@ -878,7 +878,8 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, else rrq->send_rrq = 0; rrq->xritag = xritag; - rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1); + rrq->rrq_stop_time = jiffies + + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); rrq->ndlp = ndlp; rrq->nlp_DID = ndlp->nlp_DID; rrq->vport = ndlp->vport; @@ -926,8 +927,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) ndlp = piocbq->context_un.ndlp; - else if ((piocbq->iocb.ulpCommand == CMD_ELS_REQUEST64_CR) && - (piocbq->iocb_flag & LPFC_IO_LIBDFC)) + else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) ndlp = piocbq->context_un.ndlp; else ndlp = piocbq->context1; @@ -1339,7 +1339,8 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, BUG(); else mod_timer(&piocb->vport->els_tmofunc, - jiffies + HZ * (phba->fc_ratov << 1)); + jiffies + + msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); } @@ -2340,7 +2341,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) /* Mailbox cmd <cmd> Cmpl <cmpl> */ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " - "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", + "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " + "x%x x%x x%x\n", pmb->vport ? pmb->vport->vpi : 0, pmbox->mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, pmb), @@ -2354,7 +2356,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) pmbox->un.varWords[4], pmbox->un.varWords[5], pmbox->un.varWords[6], - pmbox->un.varWords[7]); + pmbox->un.varWords[7], + pmbox->un.varWords[8], + pmbox->un.varWords[9], + pmbox->un.varWords[10]); if (pmb->mbox_cmpl) pmb->mbox_cmpl(phba,pmb); @@ -2908,8 +2913,9 @@ void lpfc_poll_eratt(unsigned long ptr) lpfc_worker_wake_up(phba); else /* Restart the timer for next eratt poll */ - mod_timer(&phba->eratt_poll, jiffies + - HZ * LPFC_ERATT_POLL_INTERVAL); + mod_timer(&phba->eratt_poll, + jiffies + + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); return; } @@ -5511,6 +5517,7 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) list_del_init(&rsrc_blk->list); kfree(rsrc_blk); } + phba->sli4_hba.max_cfg_param.vpi_used = 0; break; case LPFC_RSC_TYPE_FCOE_XRI: kfree(phba->sli4_hba.xri_bmask); @@ -5811,6 +5818,7 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); } else { kfree(phba->vpi_bmask); + phba->sli4_hba.max_cfg_param.vpi_used = 0; kfree(phba->vpi_ids); bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); kfree(phba->sli4_hba.xri_bmask); @@ -5992,7 +6000,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) struct lpfc_sglq *sglq_entry = NULL; struct lpfc_sglq *sglq_entry_next = NULL; struct lpfc_sglq *sglq_entry_first = NULL; - int status, post_cnt = 0, num_posted = 0, block_cnt = 0; + int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0; int last_xritag = NO_XRI; LIST_HEAD(prep_sgl_list); LIST_HEAD(blck_sgl_list); @@ -6004,6 +6012,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list); spin_unlock_irq(&phba->hbalock); + total_cnt = phba->sli4_hba.els_xri_cnt; list_for_each_entry_safe(sglq_entry, sglq_entry_next, &allc_sgl_list, list) { list_del_init(&sglq_entry->list); @@ -6055,9 +6064,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) sglq_entry->sli4_xritag); list_add_tail(&sglq_entry->list, &free_sgl_list); - spin_lock_irq(&phba->hbalock); - phba->sli4_hba.els_xri_cnt--; - spin_unlock_irq(&phba->hbalock); + total_cnt--; } } } @@ -6085,9 +6092,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) (sglq_entry_first->sli4_xritag + post_cnt - 1)); list_splice_init(&blck_sgl_list, &free_sgl_list); - spin_lock_irq(&phba->hbalock); - phba->sli4_hba.els_xri_cnt -= post_cnt; - spin_unlock_irq(&phba->hbalock); + total_cnt -= post_cnt; } /* don't reset xirtag due to hole in xri block */ @@ -6097,6 +6102,8 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) /* reset els sgl post count for next round of posting */ post_cnt = 0; } + /* update the number of XRIs posted for ELS */ + phba->sli4_hba.els_xri_cnt = total_cnt; /* free the els sgls failed to post */ lpfc_free_sgl_list(phba, &free_sgl_list); @@ -6446,16 +6453,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Start the ELS watchdog timer */ mod_timer(&vport->els_tmofunc, - jiffies + HZ * (phba->fc_ratov * 2)); + jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2))); /* Start heart beat timer */ mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); phba->hb_outstanding = 0; phba->last_completion_time = jiffies; /* Start error attention (ERATT) polling timer */ - mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); + mod_timer(&phba->eratt_poll, + jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); /* Enable PCIe device Advanced Error Reporting (AER) if configured */ if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { @@ -6822,8 +6830,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, goto out_not_finished; } /* timeout active mbox command */ - mod_timer(&psli->mbox_tmo, (jiffies + - (HZ * lpfc_mbox_tmo_val(phba, pmbox)))); + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * + 1000); + mod_timer(&psli->mbox_tmo, jiffies + timeout); } /* Mailbox cmd <cmd> issue */ @@ -7496,7 +7505,7 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) /* Start timer for the mbox_tmo and log some mailbox post messages */ mod_timer(&psli->mbox_tmo, (jiffies + - (HZ * lpfc_mbox_tmo_val(phba, mboxq)))); + msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq)))); lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " @@ -7914,15 +7923,21 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, static inline uint32_t lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) { - int i; - - if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) - i = smp_processor_id(); - else - i = atomic_add_return(1, &phba->fcp_qidx); + struct lpfc_vector_map_info *cpup; + int chann, cpu; - i = (i % phba->cfg_fcp_io_channel); - return i; + if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) { + cpu = smp_processor_id(); + if (cpu < phba->sli4_hba.num_present_cpu) { + cpup = phba->sli4_hba.cpu_map; + cpup += cpu; + return cpup->channel_id; + } + chann = cpu; + } + chann = atomic_add_return(1, &phba->fcp_qidx); + chann = (chann % phba->cfg_fcp_io_channel); + return chann; } /** @@ -8444,10 +8459,14 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, if ((piocb->iocb_flag & LPFC_IO_FCP) || (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { + if (unlikely(!phba->sli4_hba.fcp_wq)) + return IOCB_ERROR; if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], &wqe)) return IOCB_ERROR; } else { + if (unlikely(!phba->sli4_hba.els_wq)) + return IOCB_ERROR; if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) return IOCB_ERROR; } @@ -10003,7 +10022,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, SLI_IOCB_RET_IOCB); if (retval == IOCB_SUCCESS) { - timeout_req = timeout * HZ; + timeout_req = msecs_to_jiffies(timeout * 1000); timeleft = wait_event_timeout(done_q, lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), timeout_req); @@ -10108,7 +10127,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, if (retval == MBX_BUSY || retval == MBX_SUCCESS) { wait_event_interruptible_timeout(done_q, pmboxq->mbox_flag & LPFC_MBX_WAKE, - timeout * HZ); + msecs_to_jiffies(timeout * 1000)); spin_lock_irqsave(&phba->hbalock, flag); pmboxq->context1 = NULL; @@ -12899,8 +12918,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, } wq->db_regaddr = bar_memmap_p + db_offset; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3264 WQ[%d]: barset:x%x, offset:x%x\n", - wq->queue_id, pci_barset, db_offset); + "3264 WQ[%d]: barset:x%x, offset:x%x, " + "format:x%x\n", wq->queue_id, pci_barset, + db_offset, wq->db_format); } else { wq->db_format = LPFC_DB_LIST_FORMAT; wq->db_regaddr = phba->sli4_hba.WQDBregaddr; @@ -13120,8 +13140,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, } hrq->db_regaddr = bar_memmap_p + db_offset; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3266 RQ[qid:%d]: barset:x%x, offset:x%x\n", - hrq->queue_id, pci_barset, db_offset); + "3266 RQ[qid:%d]: barset:x%x, offset:x%x, " + "format:x%x\n", hrq->queue_id, pci_barset, + db_offset, hrq->db_format); } else { hrq->db_format = LPFC_DB_RING_FORMAT; hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; @@ -13971,13 +13992,14 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) } lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "2538 Received frame rctl:%s type:%s " - "Frame Data:%08x %08x %08x %08x %08x %08x\n", - rctl_names[fc_hdr->fh_r_ctl], - type_names[fc_hdr->fh_type], + "2538 Received frame rctl:%s (x%x), type:%s (x%x), " + "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", + rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl, + type_names[fc_hdr->fh_type], fc_hdr->fh_type, be32_to_cpu(header[0]), be32_to_cpu(header[1]), be32_to_cpu(header[2]), be32_to_cpu(header[3]), - be32_to_cpu(header[4]), be32_to_cpu(header[5])); + be32_to_cpu(header[4]), be32_to_cpu(header[5]), + be32_to_cpu(header[6])); return 0; drop: lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index be02b59ea279..67af460184ba 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -346,11 +346,6 @@ struct lpfc_bmbx { #define SLI4_CT_VFI 2 #define SLI4_CT_FCFI 3 -#define LPFC_SLI4_FL1_MAX_SEGMENT_SIZE 0x10000 -#define LPFC_SLI4_FL1_MAX_BUF_SIZE 0X2000 -#define LPFC_SLI4_MIN_BUF_SIZE 0x400 -#define LPFC_SLI4_MAX_BUF_SIZE 0x20000 - /* * SLI4 specific data structures */ @@ -440,6 +435,17 @@ struct lpfc_sli4_lnk_info { #define LPFC_SLI4_HANDLER_NAME_SZ 16 +/* Used for IRQ vector to CPU mapping */ +struct lpfc_vector_map_info { + uint16_t phys_id; + uint16_t core_id; + uint16_t irq; + uint16_t channel_id; + struct cpumask maskbits; +}; +#define LPFC_VECTOR_MAP_EMPTY 0xffff +#define LPFC_MAX_CPU 256 + /* SLI4 HBA data structure entries */ struct lpfc_sli4_hba { void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for @@ -573,6 +579,11 @@ struct lpfc_sli4_hba { struct lpfc_iov iov; spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */ + + /* CPU to vector mapping information */ + struct lpfc_vector_map_info *cpu_map; + uint16_t num_online_cpu; + uint16_t num_present_cpu; }; enum lpfc_sge_type { diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 664cd04f7cd8..a38dc3b16969 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.38" +#define LPFC_DRIVER_VERSION "8.3.39" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 0fe188e66000..e28e431564b0 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -80,7 +80,7 @@ inline void lpfc_vport_set_state(struct lpfc_vport *vport, } } -static int +int lpfc_alloc_vpi(struct lpfc_hba *phba) { unsigned long vpi; @@ -568,6 +568,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport) struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; struct lpfc_hba *phba = vport->phba; long timeout; + bool ns_ndlp_referenced = false; if (vport->port_type == LPFC_PHYSICAL_PORT) { lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, @@ -628,6 +629,18 @@ lpfc_vport_delete(struct fc_vport *fc_vport) lpfc_debugfs_terminate(vport); + /* + * The call to fc_remove_host might release the NameServer ndlp. Since + * we might need to use the ndlp to send the DA_ID CT command, + * increment the reference for the NameServer ndlp to prevent it from + * being released. + */ + ndlp = lpfc_findnode_did(vport, NameServer_DID); + if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { + lpfc_nlp_get(ndlp); + ns_ndlp_referenced = true; + } + /* Remove FC host and then SCSI host with the vport */ fc_remove_host(lpfc_shost_from_vport(vport)); scsi_remove_host(lpfc_shost_from_vport(vport)); @@ -734,6 +747,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport) lpfc_discovery_wait(vport); skip_logo: + + /* + * If the NameServer ndlp has been incremented to allow the DA_ID CT + * command to be sent, decrement the ndlp now. + */ + if (ns_ndlp_referenced) { + ndlp = lpfc_findnode_did(vport, NameServer_DID); + lpfc_nlp_put(ndlp); + } + lpfc_cleanup(vport); lpfc_sli_host_down(vport); diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h index 90828340acea..6b2c94eb8134 100644 --- a/drivers/scsi/lpfc/lpfc_vport.h +++ b/drivers/scsi/lpfc/lpfc_vport.h @@ -90,6 +90,7 @@ int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *); int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint); struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *); void lpfc_destroy_vport_work_array(struct lpfc_hba *, struct lpfc_vport **); +int lpfc_alloc_vpi(struct lpfc_hba *phba); /* * queuecommand VPORT-specific return codes. Specified in the host byte code. diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 7c90d57b867e..3a9ddae86f1f 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -4931,11 +4931,12 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) printk(KERN_ERR "megaraid_sas: timed out while" "waiting for HBA to recover\n"); error = -ENODEV; - goto out_kfree_ioc; + goto out_up; } spin_unlock_irqrestore(&instance->hba_lock, flags); error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); + out_up: up(&instance->ioctl_sem); out_kfree_ioc: diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 74550922ad55..7b7381d7671f 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -254,7 +254,7 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost) } for (i = 0; i < MVS_MAX_DEVICES; i++) { mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED; - mvi->devices[i].dev_type = NO_DEVICE; + mvi->devices[i].dev_type = SAS_PHY_UNUSED; mvi->devices[i].device_id = i; mvi->devices[i].dev_status = MVS_DEV_NORMAL; init_timer(&mvi->devices[i].timer); diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 532110f4562a..c9e244984e30 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -706,7 +706,7 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi, return 0; } -#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE))) +#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == SAS_PHY_UNUSED))) static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf, struct mvs_tmf_task *tmf, int *pass) { @@ -726,7 +726,7 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf * libsas will use dev->port, should * not call task_done for sata */ - if (dev->dev_type != SATA_DEV) + if (dev->dev_type != SAS_SATA_DEV) task->task_done(task); return rc; } @@ -1159,10 +1159,10 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st) phy->identify.device_type = phy->att_dev_info & PORT_DEV_TYPE_MASK; - if (phy->identify.device_type == SAS_END_DEV) + if (phy->identify.device_type == SAS_END_DEVICE) phy->identify.target_port_protocols = SAS_PROTOCOL_SSP; - else if (phy->identify.device_type != NO_DEVICE) + else if (phy->identify.device_type != SAS_PHY_UNUSED) phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; if (oob_done) @@ -1260,7 +1260,7 @@ struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi) { u32 dev; for (dev = 0; dev < MVS_MAX_DEVICES; dev++) { - if (mvi->devices[dev].dev_type == NO_DEVICE) { + if (mvi->devices[dev].dev_type == SAS_PHY_UNUSED) { mvi->devices[dev].device_id = dev; return &mvi->devices[dev]; } @@ -1278,7 +1278,7 @@ void mvs_free_dev(struct mvs_device *mvi_dev) u32 id = mvi_dev->device_id; memset(mvi_dev, 0, sizeof(*mvi_dev)); mvi_dev->device_id = id; - mvi_dev->dev_type = NO_DEVICE; + mvi_dev->dev_type = SAS_PHY_UNUSED; mvi_dev->dev_status = MVS_DEV_NORMAL; mvi_dev->taskfileset = MVS_ID_NOT_MAPPED; } @@ -1480,7 +1480,7 @@ static int mvs_debug_I_T_nexus_reset(struct domain_device *dev) { int rc; struct sas_phy *phy = sas_get_local_phy(dev); - int reset_type = (dev->dev_type == SATA_DEV || + int reset_type = (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; rc = sas_phy_reset(phy, reset_type); sas_put_local_phy(phy); @@ -1629,7 +1629,7 @@ int mvs_abort_task(struct sas_task *task) } else if (task->task_proto & SAS_PROTOCOL_SATA || task->task_proto & SAS_PROTOCOL_STP) { - if (SATA_DEV == dev->dev_type) { + if (SAS_SATA_DEV == dev->dev_type) { struct mvs_slot_info *slot = task->lldd_task; u32 slot_idx = (u32)(slot - mvi->slot_info); mv_dprintk("mvs_abort_task() mvi=%p task=%p " diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h index 9f3cc13a5ce7..60e2fb7f2dca 100644 --- a/drivers/scsi/mvsas/mv_sas.h +++ b/drivers/scsi/mvsas/mv_sas.h @@ -67,7 +67,7 @@ extern const struct mvs_dispatch mvs_94xx_dispatch; extern struct kmem_cache *mvs_task_list_cache; #define DEV_IS_EXPANDER(type) \ - ((type == EDGE_DEV) || (type == FANOUT_DEV)) + ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE)) #define bit(n) ((u64)1 << n) @@ -241,7 +241,7 @@ struct mvs_phy { struct mvs_device { struct list_head dev_entry; - enum sas_dev_type dev_type; + enum sas_device_type dev_type; struct mvs_info *mvi_info; struct domain_device *sas_device; struct timer_list timer; diff --git a/drivers/scsi/pm8001/Makefile b/drivers/scsi/pm8001/Makefile index 52f04296171c..ce4cd87c7c66 100644 --- a/drivers/scsi/pm8001/Makefile +++ b/drivers/scsi/pm8001/Makefile @@ -4,9 +4,10 @@ # Copyright (C) 2008-2009 USI Co., Ltd. -obj-$(CONFIG_SCSI_PM8001) += pm8001.o -pm8001-y += pm8001_init.o \ +obj-$(CONFIG_SCSI_PM8001) += pm80xx.o +pm80xx-y += pm8001_init.o \ pm8001_sas.o \ pm8001_ctl.o \ - pm8001_hwi.o + pm8001_hwi.o \ + pm80xx_hwi.o diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c index 45bc197bc22f..d99f41c2ca13 100644 --- a/drivers/scsi/pm8001/pm8001_ctl.c +++ b/drivers/scsi/pm8001/pm8001_ctl.c @@ -1,5 +1,5 @@ /* - * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. @@ -58,8 +58,13 @@ static ssize_t pm8001_ctl_mpi_interface_rev_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%d\n", - pm8001_ha->main_cfg_tbl.interface_rev); + if (pm8001_ha->chip_id == chip_8001) { + return snprintf(buf, PAGE_SIZE, "%d\n", + pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev); + } else { + return snprintf(buf, PAGE_SIZE, "%d\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev); + } } static DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL); @@ -78,11 +83,19 @@ static ssize_t pm8001_ctl_fw_version_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n", - (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 24), - (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 16), - (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 8), - (u8)(pm8001_ha->main_cfg_tbl.firmware_rev)); + if (pm8001_ha->chip_id == chip_8001) { + return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n", + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 24), + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 16), + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 8), + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev)); + } else { + return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n", + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 24), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 16), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 8), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev)); + } } static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL); /** @@ -99,8 +112,13 @@ static ssize_t pm8001_ctl_max_out_io_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%d\n", - pm8001_ha->main_cfg_tbl.max_out_io); + if (pm8001_ha->chip_id == chip_8001) { + return snprintf(buf, PAGE_SIZE, "%d\n", + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io); + } else { + return snprintf(buf, PAGE_SIZE, "%d\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io); + } } static DEVICE_ATTR(max_out_io, S_IRUGO, pm8001_ctl_max_out_io_show, NULL); /** @@ -117,8 +135,15 @@ static ssize_t pm8001_ctl_max_devices_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%04d\n", - (u16)(pm8001_ha->main_cfg_tbl.max_sgl >> 16)); + if (pm8001_ha->chip_id == chip_8001) { + return snprintf(buf, PAGE_SIZE, "%04d\n", + (u16)(pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl >> 16) + ); + } else { + return snprintf(buf, PAGE_SIZE, "%04d\n", + (u16)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl >> 16) + ); + } } static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL); /** @@ -136,8 +161,15 @@ static ssize_t pm8001_ctl_max_sg_list_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%04d\n", - pm8001_ha->main_cfg_tbl.max_sgl & 0x0000FFFF); + if (pm8001_ha->chip_id == chip_8001) { + return snprintf(buf, PAGE_SIZE, "%04d\n", + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl & 0x0000FFFF + ); + } else { + return snprintf(buf, PAGE_SIZE, "%04d\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl & 0x0000FFFF + ); + } } static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL); @@ -173,7 +205,14 @@ static ssize_t pm8001_ctl_sas_spec_support_show(struct device *cdev, struct Scsi_Host *shost = class_to_shost(cdev); struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - mode = (pm8001_ha->main_cfg_tbl.ctrl_cap_flag & 0xfe000000)>>25; + /* fe000000 means supports SAS2.1 */ + if (pm8001_ha->chip_id == chip_8001) + mode = (pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag & + 0xfe000000)>>25; + else + /* fe000000 means supports SAS2.1 */ + mode = (pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag & + 0xfe000000)>>25; return show_sas_spec_support_status(mode, buf); } static DEVICE_ATTR(sas_spec_support, S_IRUGO, @@ -361,10 +400,11 @@ static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha) goto out; } payload = (struct pm8001_ioctl_payload *)ioctlbuffer; - memcpy((u8 *)payload->func_specific, (u8 *)pm8001_ha->fw_image->data, + memcpy((u8 *)&payload->func_specific, (u8 *)pm8001_ha->fw_image->data, pm8001_ha->fw_image->size); payload->length = pm8001_ha->fw_image->size; payload->id = 0; + payload->minor_function = 0x1; pm8001_ha->nvmd_completion = &completion; ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload); wait_for_completion(&completion); @@ -411,7 +451,7 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha) payload->length = 1024*16; payload->id = 0; fwControl = - (struct fw_control_info *)payload->func_specific; + (struct fw_control_info *)&payload->func_specific; fwControl->len = IOCTL_BUF_SIZE; /* IN */ fwControl->size = partitionSize + HEADER_LEN;/* IN */ fwControl->retcode = 0;/* OUT */ diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h index c3d20c8d4abe..479c5a7a863a 100644 --- a/drivers/scsi/pm8001/pm8001_defs.h +++ b/drivers/scsi/pm8001/pm8001_defs.h @@ -1,5 +1,5 @@ /* - * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. @@ -43,9 +43,12 @@ enum chip_flavors { chip_8001, + chip_8008, + chip_8009, + chip_8018, + chip_8019 }; -#define USI_MAX_MEMCNT 9 -#define PM8001_MAX_DMA_SG SG_ALL + enum phy_speed { PHY_SPEED_15 = 0x01, PHY_SPEED_30 = 0x02, @@ -69,23 +72,34 @@ enum port_type { #define PM8001_MPI_QUEUE 1024 /* maximum mpi queue entries */ #define PM8001_MAX_INB_NUM 1 #define PM8001_MAX_OUTB_NUM 1 +#define PM8001_MAX_SPCV_INB_NUM 1 +#define PM8001_MAX_SPCV_OUTB_NUM 4 #define PM8001_CAN_QUEUE 508 /* SCSI Queue depth */ +/* Inbound/Outbound queue size */ +#define IOMB_SIZE_SPC 64 +#define IOMB_SIZE_SPCV 128 + /* unchangeable hardware details */ -#define PM8001_MAX_PHYS 8 /* max. possible phys */ -#define PM8001_MAX_PORTS 8 /* max. possible ports */ -#define PM8001_MAX_DEVICES 1024 /* max supported device */ +#define PM8001_MAX_PHYS 16 /* max. possible phys */ +#define PM8001_MAX_PORTS 16 /* max. possible ports */ +#define PM8001_MAX_DEVICES 2048 /* max supported device */ +#define PM8001_MAX_MSIX_VEC 64 /* max msi-x int for spcv/ve */ +#define USI_MAX_MEMCNT_BASE 5 +#define IB (USI_MAX_MEMCNT_BASE + 1) +#define CI (IB + PM8001_MAX_SPCV_INB_NUM) +#define OB (CI + PM8001_MAX_SPCV_INB_NUM) +#define PI (OB + PM8001_MAX_SPCV_OUTB_NUM) +#define USI_MAX_MEMCNT (PI + PM8001_MAX_SPCV_OUTB_NUM) +#define PM8001_MAX_DMA_SG SG_ALL enum memory_region_num { AAP1 = 0x0, /* application acceleration processor */ IOP, /* IO processor */ - CI, /* consumer index */ - PI, /* producer index */ - IB, /* inbound queue */ - OB, /* outbound queue */ NVMD, /* NVM device */ DEV_MEM, /* memory for devices */ CCB_MEM, /* memory for command control block */ + FW_FLASH /* memory for fw flash update */ }; #define PM8001_EVENT_LOG_SIZE (128 * 1024) diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index b8dd05074abb..69dd49c05f1e 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -50,32 +50,39 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha) { void __iomem *address = pm8001_ha->main_cfg_tbl_addr; - pm8001_ha->main_cfg_tbl.signature = pm8001_mr32(address, 0x00); - pm8001_ha->main_cfg_tbl.interface_rev = pm8001_mr32(address, 0x04); - pm8001_ha->main_cfg_tbl.firmware_rev = pm8001_mr32(address, 0x08); - pm8001_ha->main_cfg_tbl.max_out_io = pm8001_mr32(address, 0x0C); - pm8001_ha->main_cfg_tbl.max_sgl = pm8001_mr32(address, 0x10); - pm8001_ha->main_cfg_tbl.ctrl_cap_flag = pm8001_mr32(address, 0x14); - pm8001_ha->main_cfg_tbl.gst_offset = pm8001_mr32(address, 0x18); - pm8001_ha->main_cfg_tbl.inbound_queue_offset = + pm8001_ha->main_cfg_tbl.pm8001_tbl.signature = + pm8001_mr32(address, 0x00); + pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev = + pm8001_mr32(address, 0x04); + pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev = + pm8001_mr32(address, 0x08); + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io = + pm8001_mr32(address, 0x0C); + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl = + pm8001_mr32(address, 0x10); + pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag = + pm8001_mr32(address, 0x14); + pm8001_ha->main_cfg_tbl.pm8001_tbl.gst_offset = + pm8001_mr32(address, 0x18); + pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_queue_offset = pm8001_mr32(address, MAIN_IBQ_OFFSET); - pm8001_ha->main_cfg_tbl.outbound_queue_offset = + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_queue_offset = pm8001_mr32(address, MAIN_OBQ_OFFSET); - pm8001_ha->main_cfg_tbl.hda_mode_flag = + pm8001_ha->main_cfg_tbl.pm8001_tbl.hda_mode_flag = pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET); /* read analog Setting offset from the configuration table */ - pm8001_ha->main_cfg_tbl.anolog_setup_table_offset = + pm8001_ha->main_cfg_tbl.pm8001_tbl.anolog_setup_table_offset = pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET); /* read Error Dump Offset and Length */ - pm8001_ha->main_cfg_tbl.fatal_err_dump_offset0 = + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset0 = pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET); - pm8001_ha->main_cfg_tbl.fatal_err_dump_length0 = + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length0 = pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH); - pm8001_ha->main_cfg_tbl.fatal_err_dump_offset1 = + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset1 = pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET); - pm8001_ha->main_cfg_tbl.fatal_err_dump_length1 = + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length1 = pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH); } @@ -86,31 +93,56 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha) static void read_general_status_table(struct pm8001_hba_info *pm8001_ha) { void __iomem *address = pm8001_ha->general_stat_tbl_addr; - pm8001_ha->gs_tbl.gst_len_mpistate = pm8001_mr32(address, 0x00); - pm8001_ha->gs_tbl.iq_freeze_state0 = pm8001_mr32(address, 0x04); - pm8001_ha->gs_tbl.iq_freeze_state1 = pm8001_mr32(address, 0x08); - pm8001_ha->gs_tbl.msgu_tcnt = pm8001_mr32(address, 0x0C); - pm8001_ha->gs_tbl.iop_tcnt = pm8001_mr32(address, 0x10); - pm8001_ha->gs_tbl.reserved = pm8001_mr32(address, 0x14); - pm8001_ha->gs_tbl.phy_state[0] = pm8001_mr32(address, 0x18); - pm8001_ha->gs_tbl.phy_state[1] = pm8001_mr32(address, 0x1C); - pm8001_ha->gs_tbl.phy_state[2] = pm8001_mr32(address, 0x20); - pm8001_ha->gs_tbl.phy_state[3] = pm8001_mr32(address, 0x24); - pm8001_ha->gs_tbl.phy_state[4] = pm8001_mr32(address, 0x28); - pm8001_ha->gs_tbl.phy_state[5] = pm8001_mr32(address, 0x2C); - pm8001_ha->gs_tbl.phy_state[6] = pm8001_mr32(address, 0x30); - pm8001_ha->gs_tbl.phy_state[7] = pm8001_mr32(address, 0x34); - pm8001_ha->gs_tbl.reserved1 = pm8001_mr32(address, 0x38); - pm8001_ha->gs_tbl.reserved2 = pm8001_mr32(address, 0x3C); - pm8001_ha->gs_tbl.reserved3 = pm8001_mr32(address, 0x40); - pm8001_ha->gs_tbl.recover_err_info[0] = pm8001_mr32(address, 0x44); - pm8001_ha->gs_tbl.recover_err_info[1] = pm8001_mr32(address, 0x48); - pm8001_ha->gs_tbl.recover_err_info[2] = pm8001_mr32(address, 0x4C); - pm8001_ha->gs_tbl.recover_err_info[3] = pm8001_mr32(address, 0x50); - pm8001_ha->gs_tbl.recover_err_info[4] = pm8001_mr32(address, 0x54); - pm8001_ha->gs_tbl.recover_err_info[5] = pm8001_mr32(address, 0x58); - pm8001_ha->gs_tbl.recover_err_info[6] = pm8001_mr32(address, 0x5C); - pm8001_ha->gs_tbl.recover_err_info[7] = pm8001_mr32(address, 0x60); + pm8001_ha->gs_tbl.pm8001_tbl.gst_len_mpistate = + pm8001_mr32(address, 0x00); + pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state0 = + pm8001_mr32(address, 0x04); + pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state1 = + pm8001_mr32(address, 0x08); + pm8001_ha->gs_tbl.pm8001_tbl.msgu_tcnt = + pm8001_mr32(address, 0x0C); + pm8001_ha->gs_tbl.pm8001_tbl.iop_tcnt = + pm8001_mr32(address, 0x10); + pm8001_ha->gs_tbl.pm8001_tbl.rsvd = + pm8001_mr32(address, 0x14); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[0] = + pm8001_mr32(address, 0x18); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[1] = + pm8001_mr32(address, 0x1C); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[2] = + pm8001_mr32(address, 0x20); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[3] = + pm8001_mr32(address, 0x24); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[4] = + pm8001_mr32(address, 0x28); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[5] = + pm8001_mr32(address, 0x2C); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[6] = + pm8001_mr32(address, 0x30); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[7] = + pm8001_mr32(address, 0x34); + pm8001_ha->gs_tbl.pm8001_tbl.gpio_input_val = + pm8001_mr32(address, 0x38); + pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[0] = + pm8001_mr32(address, 0x3C); + pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[1] = + pm8001_mr32(address, 0x40); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[0] = + pm8001_mr32(address, 0x44); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[1] = + pm8001_mr32(address, 0x48); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[2] = + pm8001_mr32(address, 0x4C); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[3] = + pm8001_mr32(address, 0x50); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[4] = + pm8001_mr32(address, 0x54); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[5] = + pm8001_mr32(address, 0x58); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[6] = + pm8001_mr32(address, 0x5C); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[7] = + pm8001_mr32(address, 0x60); } /** @@ -119,10 +151,9 @@ static void read_general_status_table(struct pm8001_hba_info *pm8001_ha) */ static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) { - int inbQ_num = 1; int i; void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; - for (i = 0; i < inbQ_num; i++) { + for (i = 0; i < PM8001_MAX_INB_NUM; i++) { u32 offset = i * 0x20; pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); @@ -137,10 +168,9 @@ static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) */ static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) { - int outbQ_num = 1; int i; void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; - for (i = 0; i < outbQ_num; i++) { + for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { u32 offset = i * 0x24; pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); @@ -155,54 +185,57 @@ static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) */ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) { - int qn = 1; int i; u32 offsetib, offsetob; void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr; void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr; - pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd = 0; - pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3 = 0; - pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7 = 0; - pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3 = 0; - pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7 = 0; - - pm8001_ha->main_cfg_tbl.upper_event_log_addr = + pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid0_3 = + 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid4_7 = + 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid4_7 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid4_7 = 0; + + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr = pm8001_ha->memoryMap.region[AAP1].phys_addr_hi; - pm8001_ha->main_cfg_tbl.lower_event_log_addr = + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr = pm8001_ha->memoryMap.region[AAP1].phys_addr_lo; - pm8001_ha->main_cfg_tbl.event_log_size = PM8001_EVENT_LOG_SIZE; - pm8001_ha->main_cfg_tbl.event_log_option = 0x01; - pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr = + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option = 0x01; + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr = pm8001_ha->memoryMap.region[IOP].phys_addr_hi; - pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr = + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr = pm8001_ha->memoryMap.region[IOP].phys_addr_lo; - pm8001_ha->main_cfg_tbl.iop_event_log_size = PM8001_EVENT_LOG_SIZE; - pm8001_ha->main_cfg_tbl.iop_event_log_option = 0x01; - pm8001_ha->main_cfg_tbl.fatal_err_interrupt = 0x01; - for (i = 0; i < qn; i++) { + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option = 0x01; + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01; + for (i = 0; i < PM8001_MAX_INB_NUM; i++) { pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30); pm8001_ha->inbnd_q_tbl[i].upper_base_addr = - pm8001_ha->memoryMap.region[IB].phys_addr_hi; + pm8001_ha->memoryMap.region[IB + i].phys_addr_hi; pm8001_ha->inbnd_q_tbl[i].lower_base_addr = - pm8001_ha->memoryMap.region[IB].phys_addr_lo; + pm8001_ha->memoryMap.region[IB + i].phys_addr_lo; pm8001_ha->inbnd_q_tbl[i].base_virt = - (u8 *)pm8001_ha->memoryMap.region[IB].virt_ptr; + (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr; pm8001_ha->inbnd_q_tbl[i].total_length = - pm8001_ha->memoryMap.region[IB].total_len; + pm8001_ha->memoryMap.region[IB + i].total_len; pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr = - pm8001_ha->memoryMap.region[CI].phys_addr_hi; + pm8001_ha->memoryMap.region[CI + i].phys_addr_hi; pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr = - pm8001_ha->memoryMap.region[CI].phys_addr_lo; + pm8001_ha->memoryMap.region[CI + i].phys_addr_lo; pm8001_ha->inbnd_q_tbl[i].ci_virt = - pm8001_ha->memoryMap.region[CI].virt_ptr; + pm8001_ha->memoryMap.region[CI + i].virt_ptr; offsetib = i * 0x20; pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = get_pci_bar_index(pm8001_mr32(addressib, @@ -212,25 +245,25 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; } - for (i = 0; i < qn; i++) { + for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { pm8001_ha->outbnd_q_tbl[i].element_size_cnt = PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30); pm8001_ha->outbnd_q_tbl[i].upper_base_addr = - pm8001_ha->memoryMap.region[OB].phys_addr_hi; + pm8001_ha->memoryMap.region[OB + i].phys_addr_hi; pm8001_ha->outbnd_q_tbl[i].lower_base_addr = - pm8001_ha->memoryMap.region[OB].phys_addr_lo; + pm8001_ha->memoryMap.region[OB + i].phys_addr_lo; pm8001_ha->outbnd_q_tbl[i].base_virt = - (u8 *)pm8001_ha->memoryMap.region[OB].virt_ptr; + (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr; pm8001_ha->outbnd_q_tbl[i].total_length = - pm8001_ha->memoryMap.region[OB].total_len; + pm8001_ha->memoryMap.region[OB + i].total_len; pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr = - pm8001_ha->memoryMap.region[PI].phys_addr_hi; + pm8001_ha->memoryMap.region[PI + i].phys_addr_hi; pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr = - pm8001_ha->memoryMap.region[PI].phys_addr_lo; + pm8001_ha->memoryMap.region[PI + i].phys_addr_lo; pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = - 0 | (10 << 16) | (0 << 24); + 0 | (10 << 16) | (i << 24); pm8001_ha->outbnd_q_tbl[i].pi_virt = - pm8001_ha->memoryMap.region[PI].virt_ptr; + pm8001_ha->memoryMap.region[PI + i].virt_ptr; offsetob = i * 0x24; pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = get_pci_bar_index(pm8001_mr32(addressob, @@ -250,42 +283,51 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha) { void __iomem *address = pm8001_ha->main_cfg_tbl_addr; pm8001_mw32(address, 0x24, - pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd); + pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd); pm8001_mw32(address, 0x28, - pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3); + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3); pm8001_mw32(address, 0x2C, - pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7); + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7); pm8001_mw32(address, 0x30, - pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3); + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3); pm8001_mw32(address, 0x34, - pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7); + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7); pm8001_mw32(address, 0x38, - pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ITNexus_event_pid0_3); pm8001_mw32(address, 0x3C, - pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ITNexus_event_pid4_7); pm8001_mw32(address, 0x40, - pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ssp_event_pid0_3); pm8001_mw32(address, 0x44, - pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ssp_event_pid4_7); pm8001_mw32(address, 0x48, - pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_smp_event_pid0_3); pm8001_mw32(address, 0x4C, - pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_smp_event_pid4_7); pm8001_mw32(address, 0x50, - pm8001_ha->main_cfg_tbl.upper_event_log_addr); + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr); pm8001_mw32(address, 0x54, - pm8001_ha->main_cfg_tbl.lower_event_log_addr); - pm8001_mw32(address, 0x58, pm8001_ha->main_cfg_tbl.event_log_size); - pm8001_mw32(address, 0x5C, pm8001_ha->main_cfg_tbl.event_log_option); + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr); + pm8001_mw32(address, 0x58, + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size); + pm8001_mw32(address, 0x5C, + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option); pm8001_mw32(address, 0x60, - pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr); + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr); pm8001_mw32(address, 0x64, - pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr); - pm8001_mw32(address, 0x68, pm8001_ha->main_cfg_tbl.iop_event_log_size); + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr); + pm8001_mw32(address, 0x68, + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size); pm8001_mw32(address, 0x6C, - pm8001_ha->main_cfg_tbl.iop_event_log_option); + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option); pm8001_mw32(address, 0x70, - pm8001_ha->main_cfg_tbl.fatal_err_interrupt); + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt); } /** @@ -597,6 +639,19 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) */ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha) { + u8 i = 0; + u16 deviceid; + pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); + /* 8081 controllers need BAR shift to access MPI space + * as this is shared with BIOS data */ + if (deviceid == 0x8081) { + if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Shift Bar4 to 0x%x failed\n", + GSM_SM_BASE)); + return -1; + } + } /* check the firmware status */ if (-1 == check_fw_ready(pm8001_ha)) { PM8001_FAIL_DBG(pm8001_ha, @@ -613,11 +668,16 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha) read_outbnd_queue_table(pm8001_ha); /* update main config table ,inbound table and outbound table */ update_main_config_table(pm8001_ha); - update_inbnd_queue_table(pm8001_ha, 0); - update_outbnd_queue_table(pm8001_ha, 0); - mpi_set_phys_g3_with_ssc(pm8001_ha, 0); - /* 7->130ms, 34->500ms, 119->1.5s */ - mpi_set_open_retry_interval_reg(pm8001_ha, 119); + for (i = 0; i < PM8001_MAX_INB_NUM; i++) + update_inbnd_queue_table(pm8001_ha, i); + for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) + update_outbnd_queue_table(pm8001_ha, i); + /* 8081 controller donot require these operations */ + if (deviceid != 0x8081) { + mpi_set_phys_g3_with_ssc(pm8001_ha, 0); + /* 7->130ms, 34->500ms, 119->1.5s */ + mpi_set_open_retry_interval_reg(pm8001_ha, 119); + } /* notify firmware update finished and check initialization status */ if (0 == mpi_init_check(pm8001_ha)) { PM8001_INIT_DBG(pm8001_ha, @@ -639,6 +699,16 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha) u32 max_wait_count; u32 value; u32 gst_len_mpistate; + u16 deviceid; + pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); + if (deviceid == 0x8081) { + if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Shift Bar4 to 0x%x failed\n", + GSM_SM_BASE)); + return -1; + } + } init_pci_device_addresses(pm8001_ha); /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the table is stop */ @@ -740,14 +810,14 @@ static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha) * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all * the FW register status to the originated status. * @pm8001_ha: our hba card information - * @signature: signature in host scratch pad0 register. */ static int -pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature) +pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha) { u32 regVal, toggleVal; u32 max_wait_count; u32 regVal1, regVal2, regVal3; + u32 signature = 0x252acbcd; /* for host scratch pad0 */ unsigned long flags; /* step1: Check FW is ready for soft reset */ @@ -1113,7 +1183,7 @@ static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha) * pm8001_chip_iounmap - which maped when initialized. * @pm8001_ha: our hba card information */ -static void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha) +void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha) { s8 bar, logical = 0; for (bar = 0; bar < 6; bar++) { @@ -1192,7 +1262,7 @@ pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha, * @pm8001_ha: our hba card information */ static void -pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha) +pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec) { #ifdef PM8001_USE_MSIX pm8001_chip_msix_interrupt_enable(pm8001_ha, 0); @@ -1207,7 +1277,7 @@ pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha) * @pm8001_ha: our hba card information */ static void -pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha) +pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec) { #ifdef PM8001_USE_MSIX pm8001_chip_msix_interrupt_disable(pm8001_ha, 0); @@ -1218,12 +1288,13 @@ pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha) } /** - * mpi_msg_free_get- get the free message buffer for transfer inbound queue. + * pm8001_mpi_msg_free_get - get the free message buffer for transfer + * inbound queue. * @circularQ: the inbound queue we want to transfer to HBA. * @messageSize: the message size of this transfer, normally it is 64 bytes * @messagePtr: the pointer to message. */ -static int mpi_msg_free_get(struct inbound_queue_table *circularQ, +int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ, u16 messageSize, void **messagePtr) { u32 offset, consumer_index; @@ -1231,7 +1302,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ, u8 bcCount = 1; /* only support single buffer */ /* Checks is the requested message size can be allocated in this queue*/ - if (messageSize > 64) { + if (messageSize > IOMB_SIZE_SPCV) { *messagePtr = NULL; return -1; } @@ -1245,7 +1316,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ, return -1; } /* get memory IOMB buffer address */ - offset = circularQ->producer_idx * 64; + offset = circularQ->producer_idx * messageSize; /* increment to next bcCount element */ circularQ->producer_idx = (circularQ->producer_idx + bcCount) % PM8001_MPI_QUEUE; @@ -1257,29 +1328,30 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ, } /** - * mpi_build_cmd- build the message queue for transfer, update the PI to FW - * to tell the fw to get this message from IOMB. + * pm8001_mpi_build_cmd- build the message queue for transfer, update the PI to + * FW to tell the fw to get this message from IOMB. * @pm8001_ha: our hba card information * @circularQ: the inbound queue we want to transfer to HBA. * @opCode: the operation code represents commands which LLDD and fw recognized. * @payload: the command payload of each operation command. */ -static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, +int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, struct inbound_queue_table *circularQ, - u32 opCode, void *payload) + u32 opCode, void *payload, u32 responseQueue) { u32 Header = 0, hpriority = 0, bc = 1, category = 0x02; - u32 responseQueue = 0; void *pMessage; - if (mpi_msg_free_get(circularQ, 64, &pMessage) < 0) { + if (pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size, + &pMessage) < 0) { PM8001_IO_DBG(pm8001_ha, pm8001_printk("No free mpi buffer\n")); return -1; } BUG_ON(!payload); /*Copy to the payload*/ - memcpy(pMessage, payload, (64 - sizeof(struct mpi_msg_hdr))); + memcpy(pMessage, payload, (pm8001_ha->iomb_size - + sizeof(struct mpi_msg_hdr))); /*Build the header*/ Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24) @@ -1291,12 +1363,13 @@ static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar, circularQ->pi_offset, circularQ->producer_idx); PM8001_IO_DBG(pm8001_ha, - pm8001_printk("after PI= %d CI= %d\n", circularQ->producer_idx, - circularQ->consumer_index)); + pm8001_printk("INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n", + responseQueue, opCode, circularQ->producer_idx, + circularQ->consumer_index)); return 0; } -static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, +u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, struct outbound_queue_table *circularQ, u8 bc) { u32 producer_index; @@ -1305,7 +1378,7 @@ static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, msgHeader = (struct mpi_msg_hdr *)(pMsg - sizeof(struct mpi_msg_hdr)); pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt + - circularQ->consumer_idx * 64); + circularQ->consumer_idx * pm8001_ha->iomb_size); if (pOutBoundMsgHeader != msgHeader) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("consumer_idx = %d msgHeader = %p\n", @@ -1336,13 +1409,14 @@ static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, } /** - * mpi_msg_consume- get the MPI message from outbound queue message table. + * pm8001_mpi_msg_consume- get the MPI message from outbound queue + * message table. * @pm8001_ha: our hba card information * @circularQ: the outbound queue table. * @messagePtr1: the message contents of this outbound message. * @pBC: the message size. */ -static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, +u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, struct outbound_queue_table *circularQ, void **messagePtr1, u8 *pBC) { @@ -1356,7 +1430,7 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, /*Get the pointer to the circular queue buffer element*/ msgHeader = (struct mpi_msg_hdr *) (circularQ->base_virt + - circularQ->consumer_idx * 64); + circularQ->consumer_idx * pm8001_ha->iomb_size); /* read header */ header_tmp = pm8001_read_32(msgHeader); msgHeader_tmp = cpu_to_le32(header_tmp); @@ -1416,7 +1490,7 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, return MPI_IO_STATUS_BUSY; } -static void pm8001_work_fn(struct work_struct *work) +void pm8001_work_fn(struct work_struct *work) { struct pm8001_work *pw = container_of(work, struct pm8001_work, work); struct pm8001_device *pm8001_dev; @@ -1431,7 +1505,7 @@ static void pm8001_work_fn(struct work_struct *work) pm8001_dev = pw->data; /* Most stash device structure */ if ((pm8001_dev == NULL) || ((pw->handler != IO_XFER_ERROR_BREAK) - && (pm8001_dev->dev_type == NO_DEVICE))) { + && (pm8001_dev->dev_type == SAS_PHY_UNUSED))) { kfree(pw); return; } @@ -1596,7 +1670,7 @@ static void pm8001_work_fn(struct work_struct *work) } break; case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: dev = pm8001_dev->sas_device; - pm8001_I_T_nexus_reset(dev); + pm8001_I_T_nexus_event_handler(dev); break; case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: dev = pm8001_dev->sas_device; @@ -1614,7 +1688,7 @@ static void pm8001_work_fn(struct work_struct *work) kfree(pw); } -static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, +int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, int handler) { struct pm8001_work *pw; @@ -1633,6 +1707,123 @@ static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, return ret; } +static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_ha_dev) +{ + int res; + u32 ccb_tag; + struct pm8001_ccb_info *ccb; + struct sas_task *task = NULL; + struct task_abort_req task_abort; + struct inbound_queue_table *circularQ; + u32 opc = OPC_INB_SATA_ABORT; + int ret; + + if (!pm8001_ha_dev) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n")); + return; + } + + task = sas_alloc_slow_task(GFP_ATOMIC); + + if (!task) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot " + "allocate task\n")); + return; + } + + task->task_done = pm8001_task_done; + + res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); + if (res) + return; + + ccb = &pm8001_ha->ccb_info[ccb_tag]; + ccb->device = pm8001_ha_dev; + ccb->ccb_tag = ccb_tag; + ccb->task = task; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + memset(&task_abort, 0, sizeof(task_abort)); + task_abort.abort_all = cpu_to_le32(1); + task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + task_abort.tag = cpu_to_le32(ccb_tag); + + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0); + +} + +static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_ha_dev) +{ + struct sata_start_req sata_cmd; + int res; + u32 ccb_tag; + struct pm8001_ccb_info *ccb; + struct sas_task *task = NULL; + struct host_to_dev_fis fis; + struct domain_device *dev; + struct inbound_queue_table *circularQ; + u32 opc = OPC_INB_SATA_HOST_OPSTART; + + task = sas_alloc_slow_task(GFP_ATOMIC); + + if (!task) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("cannot allocate task !!!\n")); + return; + } + task->task_done = pm8001_task_done; + + res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); + if (res) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("cannot allocate tag !!!\n")); + return; + } + + /* allocate domain device by ourselves as libsas + * is not going to provide any + */ + dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC); + if (!dev) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Domain device cannot be allocated\n")); + sas_free_task(task); + return; + } else { + task->dev = dev; + task->dev->lldd_dev = pm8001_ha_dev; + } + + ccb = &pm8001_ha->ccb_info[ccb_tag]; + ccb->device = pm8001_ha_dev; + ccb->ccb_tag = ccb_tag; + ccb->task = task; + pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG; + pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG; + + memset(&sata_cmd, 0, sizeof(sata_cmd)); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + /* construct read log FIS */ + memset(&fis, 0, sizeof(struct host_to_dev_fis)); + fis.fis_type = 0x27; + fis.flags = 0x80; + fis.command = ATA_CMD_READ_LOG_EXT; + fis.lbal = 0x10; + fis.sector_count = 0x1; + + sata_cmd.tag = cpu_to_le32(ccb_tag); + sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9)); + memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); + + res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0); + +} + /** * mpi_ssp_completion- process the event that FW response to the SSP request. * @pm8001_ha: our hba card information @@ -1867,7 +2058,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) break; } PM8001_IO_DBG(pm8001_ha, - pm8001_printk("scsi_status = %x \n ", + pm8001_printk("scsi_status = %x\n ", psspPayload->ssp_resp_iu.status)); spin_lock_irqsave(&t->task_state_lock, flags); t->task_state_flags &= ~SAS_TASK_STATE_PENDING; @@ -2096,16 +2287,44 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) status = le32_to_cpu(psataPayload->status); tag = le32_to_cpu(psataPayload->tag); + if (!tag) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("tag null\n")); + return; + } ccb = &pm8001_ha->ccb_info[tag]; param = le32_to_cpu(psataPayload->param); - t = ccb->task; + if (ccb) { + t = ccb->task; + pm8001_dev = ccb->device; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("ccb null\n")); + return; + } + + if (t) { + if (t->dev && (t->dev->lldd_dev)) + pm8001_dev = t->dev->lldd_dev; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task null\n")); + return; + } + + if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG)) + && unlikely(!t || !t->lldd_task || !t->dev)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task or dev null\n")); + return; + } + ts = &t->task_status; - pm8001_dev = ccb->device; - if (status) + if (!ts) { PM8001_FAIL_DBG(pm8001_ha, - pm8001_printk("sata IO status 0x%x\n", status)); - if (unlikely(!t || !t->lldd_task || !t->dev)) + pm8001_printk("ts null\n")); return; + } switch (status) { case IO_SUCCESS: @@ -2113,6 +2332,19 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) if (param == 0) { ts->resp = SAS_TASK_COMPLETE; ts->stat = SAM_STAT_GOOD; + /* check if response is for SEND READ LOG */ + if (pm8001_dev && + (pm8001_dev->id & NCQ_READ_LOG_FLAG)) { + /* set new bit for abort_all */ + pm8001_dev->id |= NCQ_ABORT_ALL_FLAG; + /* clear bit for read log */ + pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF; + pm8001_send_abort_all(pm8001_ha, pm8001_dev); + /* Free the tag */ + pm8001_tag_free(pm8001_ha, tag); + sas_free_task(t); + return; + } } else { u8 len; ts->resp = SAS_TASK_COMPLETE; @@ -2424,6 +2656,29 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) unsigned long flags; ccb = &pm8001_ha->ccb_info[tag]; + + if (ccb) { + t = ccb->task; + pm8001_dev = ccb->device; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("No CCB !!!. returning\n")); + } + if (event) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SATA EVENT 0x%x\n", event)); + + /* Check if this is NCQ error */ + if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) { + /* find device using device id */ + pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id); + /* send read log extension */ + if (pm8001_dev) + pm8001_send_read_log(pm8001_ha, pm8001_dev); + return; + } + + ccb = &pm8001_ha->ccb_info[tag]; t = ccb->task; pm8001_dev = ccb->device; if (event) @@ -2432,9 +2687,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) if (unlikely(!t || !t->lldd_task || !t->dev)) return; ts = &t->task_status; - PM8001_IO_DBG(pm8001_ha, - pm8001_printk("port_id = %x,device_id = %x\n", - port_id, dev_id)); + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n", + port_id, dev_id, tag, event)); switch (event) { case IO_OVERFLOW: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n")); @@ -2822,8 +3077,8 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) } } -static void -mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) { struct set_dev_state_resp *pPayload = (struct set_dev_state_resp *)(piomb + 4); @@ -2843,8 +3098,7 @@ mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_ccb_free(pm8001_ha, tag); } -static void -mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct get_nvm_data_resp *pPayload = (struct get_nvm_data_resp *)(piomb + 4); @@ -2863,8 +3117,8 @@ mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_ccb_free(pm8001_ha, tag); } -static void -mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +void +pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct fw_control_ex *fw_control_context; struct get_nvm_data_resp *pPayload = @@ -2925,7 +3179,7 @@ mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_ccb_free(pm8001_ha, tag); } -static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) +int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct local_phy_ctl_resp *pPayload = (struct local_phy_ctl_resp *)(piomb + 4); @@ -2954,7 +3208,7 @@ static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) * while receive a broadcast(change) primitive just tell the sas * layer to discover the changed domain rather than the whole domain. */ -static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i) +void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i) { struct pm8001_phy *phy = &pm8001_ha->phy[i]; struct asd_sas_phy *sas_phy = &phy->sas_phy; @@ -2988,7 +3242,7 @@ static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i) } /* Get the link rate speed */ -static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate) +void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate) { struct sas_phy *sas_phy = phy->sas_phy.phy; @@ -3025,7 +3279,7 @@ static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate) * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame * buffer. */ -static void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, +void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, u8 *sas_addr) { if (phy->sas_phy.frame_rcvd[0] == 0x34 @@ -3067,7 +3321,7 @@ static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha, ((phyId & 0x0F) << 4) | (port_id & 0x0F)); payload.param0 = cpu_to_le32(param0); payload.param1 = cpu_to_le32(param1); - mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); } static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, @@ -3112,19 +3366,19 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_chip_phy_ctl_req(pm8001_ha, phy_id, PHY_NOTIFY_ENABLE_SPINUP); port->port_attached = 1; - get_lrate_mode(phy, link_rate); + pm8001_get_lrate_mode(phy, link_rate); break; case SAS_EDGE_EXPANDER_DEVICE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("expander device.\n")); port->port_attached = 1; - get_lrate_mode(phy, link_rate); + pm8001_get_lrate_mode(phy, link_rate); break; case SAS_FANOUT_EXPANDER_DEVICE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("fanout expander device.\n")); port->port_attached = 1; - get_lrate_mode(phy, link_rate); + pm8001_get_lrate_mode(phy, link_rate); break; default: PM8001_MSG_DBG(pm8001_ha, @@ -3179,7 +3433,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) " phy id = %d\n", port_id, phy_id)); port->port_state = portstate; port->port_attached = 1; - get_lrate_mode(phy, link_rate); + pm8001_get_lrate_mode(phy, link_rate); phy->phy_type |= PORT_TYPE_SATA; phy->phy_attached = 1; phy->sas_phy.oob_mode = SATA_OOB_MODE; @@ -3189,7 +3443,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) sizeof(struct dev_to_host_fis)); phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; - phy->identify.device_type = SATA_DEV; + phy->identify.device_type = SAS_SATA_DEV; pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); pm8001_bytes_dmaed(pm8001_ha, phy_id); @@ -3260,7 +3514,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) } /** - * mpi_reg_resp -process register device ID response. + * pm8001_mpi_reg_resp -process register device ID response. * @pm8001_ha: our hba card information * @piomb: IO message buffer * @@ -3269,7 +3523,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) * has assigned, from now,inter-communication with FW is no longer using the * SAS address, use device ID which FW assigned. */ -static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { u32 status; u32 device_id; @@ -3331,7 +3585,7 @@ static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) return 0; } -static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { u32 status; u32 device_id; @@ -3347,8 +3601,13 @@ static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) return 0; } -static int -mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +/** + * fw_flash_update_resp - Response from FW for flash update command. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) { u32 status; struct fw_control_ex fw_control_context; @@ -3403,10 +3662,6 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) break; } ccb->fw_control_context->fw_control->retcode = status; - pci_free_consistent(pm8001_ha->pdev, - fw_control_context.len, - fw_control_context.virtAddr, - fw_control_context.phys_addr); complete(pm8001_ha->nvmd_completion); ccb->task = NULL; ccb->ccb_tag = 0xFFFFFFFF; @@ -3414,8 +3669,7 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) return 0; } -static int -mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb) +int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb) { u32 status; int i; @@ -3431,8 +3685,7 @@ mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb) return 0; } -static int -mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct sas_task *t; struct pm8001_ccb_info *ccb; @@ -3440,19 +3693,29 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) u32 status ; u32 tag, scp; struct task_status_struct *ts; + struct pm8001_device *pm8001_dev; struct task_abort_resp *pPayload = (struct task_abort_resp *)(piomb + 4); status = le32_to_cpu(pPayload->status); tag = le32_to_cpu(pPayload->tag); + if (!tag) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk(" TAG NULL. RETURNING !!!")); + return -1; + } + scp = le32_to_cpu(pPayload->scp); ccb = &pm8001_ha->ccb_info[tag]; t = ccb->task; - PM8001_IO_DBG(pm8001_ha, - pm8001_printk(" status = 0x%x\n", status)); - if (t == NULL) + pm8001_dev = ccb->device; /* retrieve device */ + + if (!t) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk(" TASK NULL. RETURNING !!!")); return -1; + } ts = &t->task_status; if (status != 0) PM8001_FAIL_DBG(pm8001_ha, @@ -3476,7 +3739,15 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) spin_unlock_irqrestore(&t->task_state_lock, flags); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb(); - t->task_done(t); + + if ((pm8001_dev->id & NCQ_ABORT_ALL_FLAG) && t) { + pm8001_tag_free(pm8001_ha, tag); + sas_free_task(t); + /* clear the flag */ + pm8001_dev->id &= 0xBFFFFFFF; + } else + t->task_done(t); + return 0; } @@ -3727,17 +3998,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_LOCAL_PHY_CNTRL: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n")); - mpi_local_phy_ctl(pm8001_ha, piomb); + pm8001_mpi_local_phy_ctl(pm8001_ha, piomb); break; case OPC_OUB_DEV_REGIST: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_DEV_REGIST\n")); - mpi_reg_resp(pm8001_ha, piomb); + pm8001_mpi_reg_resp(pm8001_ha, piomb); break; case OPC_OUB_DEREG_DEV: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("unregister the device\n")); - mpi_dereg_resp(pm8001_ha, piomb); + pm8001_mpi_dereg_resp(pm8001_ha, piomb); break; case OPC_OUB_GET_DEV_HANDLE: PM8001_MSG_DBG(pm8001_ha, @@ -3775,7 +4046,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_FW_FLASH_UPDATE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n")); - mpi_fw_flash_update_resp(pm8001_ha, piomb); + pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb); break; case OPC_OUB_GPIO_RESPONSE: PM8001_MSG_DBG(pm8001_ha, @@ -3788,17 +4059,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_GENERAL_EVENT: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_GENERAL_EVENT\n")); - mpi_general_event(pm8001_ha, piomb); + pm8001_mpi_general_event(pm8001_ha, piomb); break; case OPC_OUB_SSP_ABORT_RSP: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n")); - mpi_task_abort_resp(pm8001_ha, piomb); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); break; case OPC_OUB_SATA_ABORT_RSP: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n")); - mpi_task_abort_resp(pm8001_ha, piomb); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); break; case OPC_OUB_SAS_DIAG_MODE_START_END: PM8001_MSG_DBG(pm8001_ha, @@ -3823,17 +4094,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_SMP_ABORT_RSP: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n")); - mpi_task_abort_resp(pm8001_ha, piomb); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); break; case OPC_OUB_GET_NVMD_DATA: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_GET_NVMD_DATA\n")); - mpi_get_nvmd_resp(pm8001_ha, piomb); + pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb); break; case OPC_OUB_SET_NVMD_DATA: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SET_NVMD_DATA\n")); - mpi_set_nvmd_resp(pm8001_ha, piomb); + pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb); break; case OPC_OUB_DEVICE_HANDLE_REMOVAL: PM8001_MSG_DBG(pm8001_ha, @@ -3842,7 +4113,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_SET_DEVICE_STATE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n")); - mpi_set_dev_state_resp(pm8001_ha, piomb); + pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb); break; case OPC_OUB_GET_DEVICE_STATE: PM8001_MSG_DBG(pm8001_ha, @@ -3864,7 +4135,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) } } -static int process_oq(struct pm8001_hba_info *pm8001_ha) +static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) { struct outbound_queue_table *circularQ; void *pMsg1 = NULL; @@ -3873,14 +4144,15 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha) unsigned long flags; spin_lock_irqsave(&pm8001_ha->lock, flags); - circularQ = &pm8001_ha->outbnd_q_tbl[0]; + circularQ = &pm8001_ha->outbnd_q_tbl[vec]; do { - ret = mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); + ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); if (MPI_IO_STATUS_SUCCESS == ret) { /* process the outbound message */ process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4)); /* free the message from the outbound circular buffer */ - mpi_msg_free_set(pm8001_ha, pMsg1, circularQ, bc); + pm8001_mpi_msg_free_set(pm8001_ha, pMsg1, + circularQ, bc); } if (MPI_IO_STATUS_BUSY == ret) { /* Update the producer index from SPC */ @@ -3903,7 +4175,7 @@ static const u8 data_dir_flags[] = { [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */ [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */ }; -static void +void pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd) { int i; @@ -3978,7 +4250,7 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha, smp_cmd.long_smp_req.long_resp_size = cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4); build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd); - mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd); + pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0); return 0; err_out_2: @@ -4042,7 +4314,7 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, ssp_cmd.len = cpu_to_le32(task->total_xfer_len); ssp_cmd.esgl = 0; } - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, 0); return ret; } @@ -4060,6 +4332,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, u32 ATAP = 0x0; u32 dir; struct inbound_queue_table *circularQ; + unsigned long flags; u32 opc = OPC_INB_SATA_HOST_OPSTART; memset(&sata_cmd, 0, sizeof(sata_cmd)); circularQ = &pm8001_ha->inbnd_q_tbl[0]; @@ -4080,8 +4353,10 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n")); } } - if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) + if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) { + task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); ncg_tag = hdr_tag; + } dir = data_dir_flags[task->data_dir] << 8; sata_cmd.tag = cpu_to_le32(tag); sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); @@ -4112,7 +4387,55 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, sata_cmd.len = cpu_to_le32(task->total_xfer_len); sata_cmd.esgl = 0; } - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd); + + /* Check for read log for failed drive and return */ + if (sata_cmd.sata_fis.command == 0x2f) { + if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) || + (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) || + (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) { + struct task_status_struct *ts; + + pm8001_ha_dev->id &= 0xDFFFFFFF; + ts = &task->task_status; + + spin_lock_irqsave(&task->task_state_lock, flags); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_GOOD; + task->task_state_flags &= ~SAS_TASK_STATE_PENDING; + task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + task->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((task->task_state_flags & + SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task 0x%p resp 0x%x " + " stat 0x%x but aborted by upper layer " + "\n", task, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); + } else if (task->uldd_task) { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); + mb();/* ditto */ + spin_unlock_irq(&pm8001_ha->lock); + task->task_done(task); + spin_lock_irq(&pm8001_ha->lock); + return 0; + } else if (!task->uldd_task) { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + task->task_done(task); + spin_lock_irq(&pm8001_ha->lock); + return 0; + } + } + } + + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0); return ret; } @@ -4142,12 +4465,12 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 | phy_id); - payload.sas_identify.dev_type = SAS_END_DEV; + payload.sas_identify.dev_type = SAS_END_DEVICE; payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; memcpy(payload.sas_identify.sas_addr, pm8001_ha->sas_addr, SAS_ADDR_SIZE); payload.sas_identify.phy_id = phy_id; - ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); return ret; } @@ -4157,7 +4480,7 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) * @num: the inbound queue number * @phy_id: the phy id which we wanted to start up. */ -static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) { struct phy_stop_req payload; @@ -4169,12 +4492,12 @@ static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, memset(&payload, 0, sizeof(payload)); payload.tag = cpu_to_le32(tag); payload.phy_id = cpu_to_le32(phy_id); - ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); return ret; } /** - * see comments on mpi_reg_resp. + * see comments on pm8001_mpi_reg_resp. */ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *pm8001_dev, u32 flag) @@ -4204,11 +4527,11 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, if (flag == 1) stp_sspsmp_sata = 0x02; /*direct attached sata */ else { - if (pm8001_dev->dev_type == SATA_DEV) + if (pm8001_dev->dev_type == SAS_SATA_DEV) stp_sspsmp_sata = 0x00; /* stp*/ - else if (pm8001_dev->dev_type == SAS_END_DEV || - pm8001_dev->dev_type == EDGE_DEV || - pm8001_dev->dev_type == FANOUT_DEV) + else if (pm8001_dev->dev_type == SAS_END_DEVICE || + pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || + pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) stp_sspsmp_sata = 0x01; /*ssp or smp*/ } if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) @@ -4228,14 +4551,14 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr, SAS_ADDR_SIZE); - rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return rc; } /** - * see comments on mpi_reg_resp. + * see comments on pm8001_mpi_reg_resp. */ -static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 device_id) { struct dereg_dev_req payload; @@ -4249,7 +4572,7 @@ static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, payload.device_id = cpu_to_le32(device_id); PM8001_MSG_DBG(pm8001_ha, pm8001_printk("unregister device device_id = %d\n", device_id)); - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return ret; } @@ -4272,7 +4595,7 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, payload.tag = cpu_to_le32(1); payload.phyop_phyid = cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F)); - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return ret; } @@ -4296,11 +4619,11 @@ static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha) * @stat: stat. */ static irqreturn_t -pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha) +pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec) { - pm8001_chip_interrupt_disable(pm8001_ha); - process_oq(pm8001_ha); - pm8001_chip_interrupt_enable(pm8001_ha); + pm8001_chip_interrupt_disable(pm8001_ha, vec); + process_oq(pm8001_ha, vec); + pm8001_chip_interrupt_enable(pm8001_ha, vec); return IRQ_HANDLED; } @@ -4322,7 +4645,7 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc, task_abort.device_id = cpu_to_le32(dev_id); task_abort.tag = cpu_to_le32(cmd_tag); } - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0); return ret; } @@ -4331,16 +4654,17 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc, * @task: the task we wanted to aborted. * @flag: the abort flag. */ -static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, u32 cmd_tag) { u32 opc, device_id; int rc = TMF_RESP_FUNC_FAILED; - PM8001_EH_DBG(pm8001_ha, pm8001_printk("cmd_tag = %x, abort task tag" - " = %x", cmd_tag, task_tag)); - if (pm8001_dev->dev_type == SAS_END_DEV) + PM8001_EH_DBG(pm8001_ha, + pm8001_printk("cmd_tag = %x, abort task tag = 0x%x", + cmd_tag, task_tag)); + if (pm8001_dev->dev_type == SAS_END_DEVICE) opc = OPC_INB_SSP_ABORT; - else if (pm8001_dev->dev_type == SATA_DEV) + else if (pm8001_dev->dev_type == SAS_SATA_DEV) opc = OPC_INB_SATA_ABORT; else opc = OPC_INB_SMP_ABORT;/* SMP */ @@ -4358,7 +4682,7 @@ static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, * @ccb: the ccb information. * @tmf: task management function. */ -static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf) { struct sas_task *task = ccb->task; @@ -4376,11 +4700,11 @@ static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8); sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag); circularQ = &pm8001_ha->inbnd_q_tbl[0]; - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0); return ret; } -static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload) { u32 opc = OPC_INB_GET_NVMD_DATA; @@ -4397,7 +4721,7 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); if (!fw_control_context) return -ENOMEM; - fw_control_context->usrAddr = (u8 *)&ioctl_payload->func_specific[0]; + fw_control_context->usrAddr = (u8 *)ioctl_payload->func_specific; fw_control_context->len = ioctl_payload->length; circularQ = &pm8001_ha->inbnd_q_tbl[0]; memset(&nvmd_req, 0, sizeof(nvmd_req)); @@ -4456,11 +4780,11 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, default: break; } - rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0); return rc; } -static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload) { u32 opc = OPC_INB_SET_NVMD_DATA; @@ -4479,7 +4803,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, return -ENOMEM; circularQ = &pm8001_ha->inbnd_q_tbl[0]; memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr, - ioctl_payload->func_specific, + &ioctl_payload->func_specific, ioctl_payload->length); memset(&nvmd_req, 0, sizeof(nvmd_req)); rc = pm8001_tag_alloc(pm8001_ha, &tag); @@ -4536,7 +4860,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, default: break; } - rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0); return rc; } @@ -4545,7 +4869,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, * @pm8001_ha: our hba card information. * @fw_flash_updata_info: firmware flash update param */ -static int +int pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, void *fw_flash_updata_info, u32 tag) { @@ -4567,11 +4891,11 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr))); payload.sgl_addr_hi = cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr))); - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return ret; } -static int +int pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, void *payload) { @@ -4581,29 +4905,14 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, int rc; u32 tag; struct pm8001_ccb_info *ccb; - void *buffer = NULL; - dma_addr_t phys_addr; - u32 phys_addr_hi; - u32 phys_addr_lo; + void *buffer = pm8001_ha->memoryMap.region[FW_FLASH].virt_ptr; + dma_addr_t phys_addr = pm8001_ha->memoryMap.region[FW_FLASH].phys_addr; struct pm8001_ioctl_payload *ioctl_payload = payload; fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); if (!fw_control_context) return -ENOMEM; - fw_control = (struct fw_control_info *)&ioctl_payload->func_specific[0]; - if (fw_control->len != 0) { - if (pm8001_mem_alloc(pm8001_ha->pdev, - (void **)&buffer, - &phys_addr, - &phys_addr_hi, - &phys_addr_lo, - fw_control->len, 0) != 0) { - PM8001_FAIL_DBG(pm8001_ha, - pm8001_printk("Mem alloc failure\n")); - kfree(fw_control_context); - return -ENOMEM; - } - } + fw_control = (struct fw_control_info *)&ioctl_payload->func_specific; memcpy(buffer, fw_control->buffer, fw_control->len); flash_update_info.sgl.addr = cpu_to_le64(phys_addr); flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len); @@ -4613,6 +4922,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, flash_update_info.total_image_len = fw_control->size; fw_control_context->fw_control = fw_control; fw_control_context->virtAddr = buffer; + fw_control_context->phys_addr = phys_addr; fw_control_context->len = fw_control->len; rc = pm8001_tag_alloc(pm8001_ha, &tag); if (rc) { @@ -4627,7 +4937,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, return rc; } -static int +int pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *pm8001_dev, u32 state) { @@ -4648,7 +4958,7 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, payload.tag = cpu_to_le32(tag); payload.device_id = cpu_to_le32(pm8001_dev->device_id); payload.nds = cpu_to_le32(state); - rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return rc; } @@ -4673,7 +4983,7 @@ pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha) payload.SSAHOLT = cpu_to_le32(0xd << 25); payload.sata_hol_tmo = cpu_to_le32(80); payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff); - rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return rc; } @@ -4706,4 +5016,3 @@ const struct pm8001_dispatch pm8001_8001_dispatch = { .set_dev_state_req = pm8001_chip_set_dev_state_req, .sas_re_init_req = pm8001_chip_sas_re_initialization, }; - diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h index d437309cb1e1..d7c1e2034226 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.h +++ b/drivers/scsi/pm8001/pm8001_hwi.h @@ -131,6 +131,8 @@ #define LINKRATE_30 (0x02 << 8) #define LINKRATE_60 (0x04 << 8) +/* for new SPC controllers MEMBASE III is shared between BIOS and DATA */ +#define GSM_SM_BASE 0x4F0000 struct mpi_msg_hdr{ __le32 header; /* Bits [11:0] - Message operation code */ /* Bits [15:12] - Message Category */ @@ -298,7 +300,7 @@ struct local_phy_ctl_resp { #define OP_BITS 0x0000FF00 -#define ID_BITS 0x0000000F +#define ID_BITS 0x000000FF /* * brief the data structure of PORT Control Command diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 3d5e522e00fc..e4b9bc7f5410 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -1,5 +1,5 @@ /* - * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. @@ -44,8 +44,16 @@ static struct scsi_transport_template *pm8001_stt; +/** + * chip info structure to identify chip key functionality as + * encryption available/not, no of ports, hw specific function ref + */ static const struct pm8001_chip_info pm8001_chips[] = { - [chip_8001] = { 8, &pm8001_8001_dispatch,}, + [chip_8001] = {0, 8, &pm8001_8001_dispatch,}, + [chip_8008] = {0, 8, &pm8001_80xx_dispatch,}, + [chip_8009] = {1, 8, &pm8001_80xx_dispatch,}, + [chip_8018] = {0, 16, &pm8001_80xx_dispatch,}, + [chip_8019] = {1, 16, &pm8001_80xx_dispatch,}, }; static int pm8001_id; @@ -155,37 +163,75 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha) } #ifdef PM8001_USE_TASKLET + +/** + * tasklet for 64 msi-x interrupt handler + * @opaque: the passed general host adapter struct + * Note: pm8001_tasklet is common for pm8001 & pm80xx + */ static void pm8001_tasklet(unsigned long opaque) { struct pm8001_hba_info *pm8001_ha; + u32 vec; pm8001_ha = (struct pm8001_hba_info *)opaque; if (unlikely(!pm8001_ha)) BUG_ON(1); - PM8001_CHIP_DISP->isr(pm8001_ha); + vec = pm8001_ha->int_vector; + PM8001_CHIP_DISP->isr(pm8001_ha, vec); +} +#endif + +static struct pm8001_hba_info *outq_to_hba(u8 *outq) +{ + return container_of((outq - *outq), struct pm8001_hba_info, outq[0]); } + +/** + * pm8001_interrupt_handler_msix - main MSIX interrupt handler. + * It obtains the vector number and calls the equivalent bottom + * half or services directly. + * @opaque: the passed outbound queue/vector. Host structure is + * retrieved from the same. + */ +static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque) +{ + struct pm8001_hba_info *pm8001_ha = outq_to_hba(opaque); + u8 outq = *(u8 *)opaque; + irqreturn_t ret = IRQ_HANDLED; + if (unlikely(!pm8001_ha)) + return IRQ_NONE; + if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha)) + return IRQ_NONE; + pm8001_ha->int_vector = outq; +#ifdef PM8001_USE_TASKLET + tasklet_schedule(&pm8001_ha->tasklet); +#else + ret = PM8001_CHIP_DISP->isr(pm8001_ha, outq); #endif + return ret; +} +/** + * pm8001_interrupt_handler_intx - main INTx interrupt handler. + * @dev_id: sas_ha structure. The HBA is retrieved from sas_has structure. + */ - /** - * pm8001_interrupt - when HBA originate a interrupt,we should invoke this - * dispatcher to handle each case. - * @irq: irq number. - * @opaque: the passed general host adapter struct - */ -static irqreturn_t pm8001_interrupt(int irq, void *opaque) +static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id) { struct pm8001_hba_info *pm8001_ha; irqreturn_t ret = IRQ_HANDLED; - struct sas_ha_struct *sha = opaque; + struct sas_ha_struct *sha = dev_id; pm8001_ha = sha->lldd_ha; if (unlikely(!pm8001_ha)) return IRQ_NONE; if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha)) return IRQ_NONE; + + pm8001_ha->int_vector = 0; #ifdef PM8001_USE_TASKLET tasklet_schedule(&pm8001_ha->tasklet); #else - ret = PM8001_CHIP_DISP->isr(pm8001_ha); + ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0); #endif return ret; } @@ -195,10 +241,14 @@ static irqreturn_t pm8001_interrupt(int irq, void *opaque) * @pm8001_ha:our hba structure. * */ -static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha) +static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha, + const struct pci_device_id *ent) { int i; spin_lock_init(&pm8001_ha->lock); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("pm8001_alloc: PHY:%x\n", + pm8001_ha->chip->n_phy)); for (i = 0; i < pm8001_ha->chip->n_phy; i++) { pm8001_phy_init(pm8001_ha, i); pm8001_ha->port[i].wide_port_phymap = 0; @@ -222,30 +272,57 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha) pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE; pm8001_ha->memoryMap.region[IOP].alignment = 32; - /* MPI Memory region 3 for consumer Index of inbound queues */ - pm8001_ha->memoryMap.region[CI].num_elements = 1; - pm8001_ha->memoryMap.region[CI].element_size = 4; - pm8001_ha->memoryMap.region[CI].total_len = 4; - pm8001_ha->memoryMap.region[CI].alignment = 4; - - /* MPI Memory region 4 for producer Index of outbound queues */ - pm8001_ha->memoryMap.region[PI].num_elements = 1; - pm8001_ha->memoryMap.region[PI].element_size = 4; - pm8001_ha->memoryMap.region[PI].total_len = 4; - pm8001_ha->memoryMap.region[PI].alignment = 4; - - /* MPI Memory region 5 inbound queues */ - pm8001_ha->memoryMap.region[IB].num_elements = PM8001_MPI_QUEUE; - pm8001_ha->memoryMap.region[IB].element_size = 64; - pm8001_ha->memoryMap.region[IB].total_len = PM8001_MPI_QUEUE * 64; - pm8001_ha->memoryMap.region[IB].alignment = 64; - - /* MPI Memory region 6 outbound queues */ - pm8001_ha->memoryMap.region[OB].num_elements = PM8001_MPI_QUEUE; - pm8001_ha->memoryMap.region[OB].element_size = 64; - pm8001_ha->memoryMap.region[OB].total_len = PM8001_MPI_QUEUE * 64; - pm8001_ha->memoryMap.region[OB].alignment = 64; + for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) { + /* MPI Memory region 3 for consumer Index of inbound queues */ + pm8001_ha->memoryMap.region[CI+i].num_elements = 1; + pm8001_ha->memoryMap.region[CI+i].element_size = 4; + pm8001_ha->memoryMap.region[CI+i].total_len = 4; + pm8001_ha->memoryMap.region[CI+i].alignment = 4; + + if ((ent->driver_data) != chip_8001) { + /* MPI Memory region 5 inbound queues */ + pm8001_ha->memoryMap.region[IB+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[IB+i].element_size = 128; + pm8001_ha->memoryMap.region[IB+i].total_len = + PM8001_MPI_QUEUE * 128; + pm8001_ha->memoryMap.region[IB+i].alignment = 128; + } else { + pm8001_ha->memoryMap.region[IB+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[IB+i].element_size = 64; + pm8001_ha->memoryMap.region[IB+i].total_len = + PM8001_MPI_QUEUE * 64; + pm8001_ha->memoryMap.region[IB+i].alignment = 64; + } + } + + for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) { + /* MPI Memory region 4 for producer Index of outbound queues */ + pm8001_ha->memoryMap.region[PI+i].num_elements = 1; + pm8001_ha->memoryMap.region[PI+i].element_size = 4; + pm8001_ha->memoryMap.region[PI+i].total_len = 4; + pm8001_ha->memoryMap.region[PI+i].alignment = 4; + + if (ent->driver_data != chip_8001) { + /* MPI Memory region 6 Outbound queues */ + pm8001_ha->memoryMap.region[OB+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[OB+i].element_size = 128; + pm8001_ha->memoryMap.region[OB+i].total_len = + PM8001_MPI_QUEUE * 128; + pm8001_ha->memoryMap.region[OB+i].alignment = 128; + } else { + /* MPI Memory region 6 Outbound queues */ + pm8001_ha->memoryMap.region[OB+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[OB+i].element_size = 64; + pm8001_ha->memoryMap.region[OB+i].total_len = + PM8001_MPI_QUEUE * 64; + pm8001_ha->memoryMap.region[OB+i].alignment = 64; + } + } /* Memory region write DMA*/ pm8001_ha->memoryMap.region[NVMD].num_elements = 1; pm8001_ha->memoryMap.region[NVMD].element_size = 4096; @@ -264,6 +341,9 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha) pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB * sizeof(struct pm8001_ccb_info); + /* Memory region for fw flash */ + pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096; + for (i = 0; i < USI_MAX_MEMCNT; i++) { if (pm8001_mem_alloc(pm8001_ha->pdev, &pm8001_ha->memoryMap.region[i].virt_ptr, @@ -281,7 +361,7 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha) pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr; for (i = 0; i < PM8001_MAX_DEVICES; i++) { - pm8001_ha->devices[i].dev_type = NO_DEVICE; + pm8001_ha->devices[i].dev_type = SAS_PHY_UNUSED; pm8001_ha->devices[i].id = i; pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES; pm8001_ha->devices[i].running_req = 0; @@ -339,10 +419,12 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha) ioremap(pm8001_ha->io_mem[logicalBar].membase, pm8001_ha->io_mem[logicalBar].memsize); PM8001_INIT_DBG(pm8001_ha, - pm8001_printk("PCI: bar %d, logicalBar %d " - "virt_addr=%lx,len=%d\n", bar, logicalBar, - (unsigned long) - pm8001_ha->io_mem[logicalBar].memvirtaddr, + pm8001_printk("PCI: bar %d, logicalBar %d ", + bar, logicalBar)); + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "base addr %llx virt_addr=%llx len=%d\n", + (u64)pm8001_ha->io_mem[logicalBar].membase, + (u64)pm8001_ha->io_mem[logicalBar].memvirtaddr, pm8001_ha->io_mem[logicalBar].memsize)); } else { pm8001_ha->io_mem[logicalBar].membase = 0; @@ -361,8 +443,9 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha) * @shost: scsi host struct which has been initialized before. */ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev, - u32 chip_id, - struct Scsi_Host *shost) + const struct pci_device_id *ent, + struct Scsi_Host *shost) + { struct pm8001_hba_info *pm8001_ha; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); @@ -374,7 +457,7 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev, pm8001_ha->pdev = pdev; pm8001_ha->dev = &pdev->dev; - pm8001_ha->chip_id = chip_id; + pm8001_ha->chip_id = ent->driver_data; pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id]; pm8001_ha->irq = pdev->irq; pm8001_ha->sas = sha; @@ -382,12 +465,22 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev, pm8001_ha->id = pm8001_id++; pm8001_ha->logging_level = 0x01; sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id); + /* IOMB size is 128 for 8088/89 controllers */ + if (pm8001_ha->chip_id != chip_8001) + pm8001_ha->iomb_size = IOMB_SIZE_SPCV; + else + pm8001_ha->iomb_size = IOMB_SIZE_SPC; + #ifdef PM8001_USE_TASKLET + /** + * default tasklet for non msi-x interrupt handler/first msi-x + * interrupt handler + **/ tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, - (unsigned long)pm8001_ha); + (unsigned long)pm8001_ha); #endif pm8001_ioremap(pm8001_ha); - if (!pm8001_alloc(pm8001_ha)) + if (!pm8001_alloc(pm8001_ha, ent)) return pm8001_ha; pm8001_free(pm8001_ha); return NULL; @@ -512,21 +605,50 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost, */ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) { - u8 i; + u8 i, j; #ifdef PM8001_READ_VPD + /* For new SPC controllers WWN is stored in flash vpd + * For SPC/SPCve controllers WWN is stored in EEPROM + * For Older SPC WWN is stored in NVMD + */ DECLARE_COMPLETION_ONSTACK(completion); struct pm8001_ioctl_payload payload; + u16 deviceid; + pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); pm8001_ha->nvmd_completion = &completion; - payload.minor_function = 0; - payload.length = 128; - payload.func_specific = kzalloc(128, GFP_KERNEL); + + if (pm8001_ha->chip_id == chip_8001) { + if (deviceid == 0x8081) { + payload.minor_function = 4; + payload.length = 4096; + } else { + payload.minor_function = 0; + payload.length = 128; + } + } else { + payload.minor_function = 1; + payload.length = 4096; + } + payload.offset = 0; + payload.func_specific = kzalloc(payload.length, GFP_KERNEL); PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); wait_for_completion(&completion); + + for (i = 0, j = 0; i <= 7; i++, j++) { + if (pm8001_ha->chip_id == chip_8001) { + if (deviceid == 0x8081) + pm8001_ha->sas_addr[j] = + payload.func_specific[0x704 + i]; + } else + pm8001_ha->sas_addr[j] = + payload.func_specific[0x804 + i]; + } + for (i = 0; i < pm8001_ha->chip->n_phy; i++) { - memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr, - SAS_ADDR_SIZE); + memcpy(&pm8001_ha->phy[i].dev_sas_addr, + pm8001_ha->sas_addr, SAS_ADDR_SIZE); PM8001_INIT_DBG(pm8001_ha, - pm8001_printk("phy %d sas_addr = %016llx \n", i, + pm8001_printk("phy %d sas_addr = %016llx\n", i, pm8001_ha->phy[i].dev_sas_addr)); } #else @@ -547,31 +669,50 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) * @chip_info: our ha struct. * @irq_handler: irq_handler */ -static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha, - irq_handler_t irq_handler) +static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha) { u32 i = 0, j = 0; - u32 number_of_intr = 1; + u32 number_of_intr; int flag = 0; u32 max_entry; int rc; + static char intr_drvname[PM8001_MAX_MSIX_VEC][sizeof(DRV_NAME)+3]; + + /* SPCv controllers supports 64 msi-x */ + if (pm8001_ha->chip_id == chip_8001) { + number_of_intr = 1; + flag |= IRQF_DISABLED; + } else { + number_of_intr = PM8001_MAX_MSIX_VEC; + flag &= ~IRQF_SHARED; + flag |= IRQF_DISABLED; + } + max_entry = sizeof(pm8001_ha->msix_entries) / sizeof(pm8001_ha->msix_entries[0]); - flag |= IRQF_DISABLED; for (i = 0; i < max_entry ; i++) pm8001_ha->msix_entries[i].entry = i; rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries, number_of_intr); pm8001_ha->number_of_intr = number_of_intr; if (!rc) { + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "pci_enable_msix request ret:%d no of intr %d\n", + rc, pm8001_ha->number_of_intr)); + + for (i = 0; i < number_of_intr; i++) + pm8001_ha->outq[i] = i; + for (i = 0; i < number_of_intr; i++) { + snprintf(intr_drvname[i], sizeof(intr_drvname[0]), + DRV_NAME"%d", i); if (request_irq(pm8001_ha->msix_entries[i].vector, - irq_handler, flag, DRV_NAME, - SHOST_TO_SAS_HA(pm8001_ha->shost))) { + pm8001_interrupt_handler_msix, flag, + intr_drvname[i], &pm8001_ha->outq[i])) { for (j = 0; j < i; j++) free_irq( pm8001_ha->msix_entries[j].vector, - SHOST_TO_SAS_HA(pm8001_ha->shost)); + &pm8001_ha->outq[j]); pci_disable_msix(pm8001_ha->pdev); break; } @@ -588,22 +729,24 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha, static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha) { struct pci_dev *pdev; - irq_handler_t irq_handler = pm8001_interrupt; int rc; pdev = pm8001_ha->pdev; #ifdef PM8001_USE_MSIX if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) - return pm8001_setup_msix(pm8001_ha, irq_handler); - else + return pm8001_setup_msix(pm8001_ha); + else { + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("MSIX not supported!!!\n")); goto intx; + } #endif intx: /* initialize the INT-X interrupt */ - rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, - SHOST_TO_SAS_HA(pm8001_ha->shost)); + rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED, + DRV_NAME, SHOST_TO_SAS_HA(pm8001_ha->shost)); return rc; } @@ -621,12 +764,13 @@ static int pm8001_pci_probe(struct pci_dev *pdev, { unsigned int rc; u32 pci_reg; + u8 i = 0; struct pm8001_hba_info *pm8001_ha; struct Scsi_Host *shost = NULL; const struct pm8001_chip_info *chip; dev_printk(KERN_INFO, &pdev->dev, - "pm8001: driver version %s\n", DRV_VERSION); + "pm80xx: driver version %s\n", DRV_VERSION); rc = pci_enable_device(pdev); if (rc) goto err_out_enable; @@ -665,25 +809,39 @@ static int pm8001_pci_probe(struct pci_dev *pdev, goto err_out_free; } pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost)); - pm8001_ha = pm8001_pci_alloc(pdev, chip_8001, shost); + /* ent->driver variable is used to differentiate between controllers */ + pm8001_ha = pm8001_pci_alloc(pdev, ent, shost); if (!pm8001_ha) { rc = -ENOMEM; goto err_out_free; } list_add_tail(&pm8001_ha->list, &hba_list); - PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); rc = PM8001_CHIP_DISP->chip_init(pm8001_ha); - if (rc) + if (rc) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "chip_init failed [ret: %d]\n", rc)); goto err_out_ha_free; + } rc = scsi_add_host(shost, &pdev->dev); if (rc) goto err_out_ha_free; rc = pm8001_request_irq(pm8001_ha); - if (rc) + if (rc) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "pm8001_request_irq failed [ret: %d]\n", rc)); goto err_out_shost; + } + + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0); + if (pm8001_ha->chip_id != chip_8001) { + for (i = 1; i < pm8001_ha->number_of_intr; i++) + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i); + /* setup thermal configuration. */ + pm80xx_set_thermal_config(pm8001_ha); + } - PM8001_CHIP_DISP->interrupt_enable(pm8001_ha); pm8001_init_sas_add(pm8001_ha); pm8001_post_sas_ha_init(shost, chip); rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); @@ -719,14 +877,15 @@ static void pm8001_pci_remove(struct pci_dev *pdev) sas_remove_host(pm8001_ha->shost); list_del(&pm8001_ha->list); scsi_remove_host(pm8001_ha->shost); - PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); - PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); + PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF); + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); #ifdef PM8001_USE_MSIX for (i = 0; i < pm8001_ha->number_of_intr; i++) synchronize_irq(pm8001_ha->msix_entries[i].vector); for (i = 0; i < pm8001_ha->number_of_intr; i++) - free_irq(pm8001_ha->msix_entries[i].vector, sha); + free_irq(pm8001_ha->msix_entries[i].vector, + &pm8001_ha->outq[i]); pci_disable_msix(pdev); #else free_irq(pm8001_ha->irq, sha); @@ -763,13 +922,14 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state) printk(KERN_ERR " PCI PM not supported\n"); return -ENODEV; } - PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); - PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); + PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF); + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); #ifdef PM8001_USE_MSIX for (i = 0; i < pm8001_ha->number_of_intr; i++) synchronize_irq(pm8001_ha->msix_entries[i].vector); for (i = 0; i < pm8001_ha->number_of_intr; i++) - free_irq(pm8001_ha->msix_entries[i].vector, sha); + free_irq(pm8001_ha->msix_entries[i].vector, + &pm8001_ha->outq[i]); pci_disable_msix(pdev); #else free_irq(pm8001_ha->irq, sha); @@ -798,6 +958,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev) struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct pm8001_hba_info *pm8001_ha; int rc; + u8 i = 0; u32 device_state; pm8001_ha = sha->lldd_ha; device_state = pdev->current_state; @@ -820,19 +981,33 @@ static int pm8001_pci_resume(struct pci_dev *pdev) if (rc) goto err_out_disable; - PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); + /* chip soft rst only for spc */ + if (pm8001_ha->chip_id == chip_8001) { + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("chip soft reset successful\n")); + } rc = PM8001_CHIP_DISP->chip_init(pm8001_ha); if (rc) goto err_out_disable; - PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); + + /* disable all the interrupt bits */ + PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF); + rc = pm8001_request_irq(pm8001_ha); if (rc) goto err_out_disable; - #ifdef PM8001_USE_TASKLET +#ifdef PM8001_USE_TASKLET + /* default tasklet for non msi-x interrupt handler/first msi-x + * interrupt handler */ tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, - (unsigned long)pm8001_ha); - #endif - PM8001_CHIP_DISP->interrupt_enable(pm8001_ha); + (unsigned long)pm8001_ha); +#endif + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0); + if (pm8001_ha->chip_id != chip_8001) { + for (i = 1; i < pm8001_ha->number_of_intr; i++) + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i); + } scsi_unblock_requests(pm8001_ha->shost); return 0; @@ -843,14 +1018,45 @@ err_out_enable: return rc; } +/* update of pci device, vendor id and driver data with + * unique value for each of the controller + */ static struct pci_device_id pm8001_pci_table[] = { - { - PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 - }, + { PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 }, { PCI_DEVICE(0x117c, 0x0042), .driver_data = chip_8001 }, + /* Support for SPC/SPCv/SPCve controllers */ + { PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 }, + { PCI_VDEVICE(PMC_Sierra, 0x8008), chip_8008 }, + { PCI_VDEVICE(ADAPTEC2, 0x8008), chip_8008 }, + { PCI_VDEVICE(PMC_Sierra, 0x8018), chip_8018 }, + { PCI_VDEVICE(ADAPTEC2, 0x8018), chip_8018 }, + { PCI_VDEVICE(PMC_Sierra, 0x8009), chip_8009 }, + { PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 }, + { PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 }, + { PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8081, + PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8081, + PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8001 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8008 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8008 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8009 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8009 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8018 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8018 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 }, {} /* terminate list */ }; @@ -870,7 +1076,7 @@ static int __init pm8001_init(void) { int rc = -ENOMEM; - pm8001_wq = alloc_workqueue("pm8001", 0, 0); + pm8001_wq = alloc_workqueue("pm80xx", 0, 0); if (!pm8001_wq) goto err; @@ -902,7 +1108,8 @@ module_init(pm8001_init); module_exit(pm8001_exit); MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>"); -MODULE_DESCRIPTION("PMC-Sierra PM8001 SAS/SATA controller driver"); +MODULE_DESCRIPTION( + "PMC-Sierra PM8001/8081/8088/8089 SAS/SATA controller driver"); MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, pm8001_pci_table); diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index b961112395d5..a85d73de7c80 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -1,5 +1,5 @@ /* - * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. @@ -68,7 +68,7 @@ static void pm8001_tag_clear(struct pm8001_hba_info *pm8001_ha, u32 tag) clear_bit(tag, bitmap); } -static void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) +void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) { pm8001_tag_clear(pm8001_ha, tag); } @@ -212,10 +212,12 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, break; case PHY_FUNC_GET_EVENTS: spin_lock_irqsave(&pm8001_ha->lock, flags); - if (-1 == pm8001_bar4_shift(pm8001_ha, + if (pm8001_ha->chip_id == chip_8001) { + if (-1 == pm8001_bar4_shift(pm8001_ha, (phy_id < 4) ? 0x30000 : 0x40000)) { - spin_unlock_irqrestore(&pm8001_ha->lock, flags); - return -EINVAL; + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return -EINVAL; + } } { struct sas_phy *phy = sas_phy->phy; @@ -228,7 +230,8 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, phy->loss_of_dword_sync_count = qp[3]; phy->phy_reset_problem_count = qp[4]; } - pm8001_bar4_shift(pm8001_ha, 0); + if (pm8001_ha->chip_id == chip_8001) + pm8001_bar4_shift(pm8001_ha, 0); spin_unlock_irqrestore(&pm8001_ha->lock, flags); return 0; default: @@ -249,7 +252,9 @@ void pm8001_scan_start(struct Scsi_Host *shost) struct pm8001_hba_info *pm8001_ha; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); pm8001_ha = sha->lldd_ha; - PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha); + /* SAS_RE_INITIALIZATION not available in SPCv/ve */ + if (pm8001_ha->chip_id == chip_8001) + PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha); for (i = 0; i < pm8001_ha->chip->n_phy; ++i) PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i); } @@ -352,7 +357,7 @@ static int sas_find_local_port_id(struct domain_device *dev) * @tmf: the task management IU */ #define DEV_IS_GONE(pm8001_dev) \ - ((!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE))) + ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))) static int pm8001_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf) { @@ -370,7 +375,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num, struct task_status_struct *tsm = &t->task_status; tsm->resp = SAS_TASK_UNDELIVERED; tsm->stat = SAS_PHY_DOWN; - if (dev->dev_type != SATA_DEV) + if (dev->dev_type != SAS_SATA_DEV) t->task_done(t); return 0; } @@ -548,7 +553,7 @@ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) { u32 dev; for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { - if (pm8001_ha->devices[dev].dev_type == NO_DEVICE) { + if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) { pm8001_ha->devices[dev].id = dev; return &pm8001_ha->devices[dev]; } @@ -560,13 +565,31 @@ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) } return NULL; } +/** + * pm8001_find_dev - find a matching pm8001_device + * @pm8001_ha: our hba card information + */ +struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha, + u32 device_id) +{ + u32 dev; + for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { + if (pm8001_ha->devices[dev].device_id == device_id) + return &pm8001_ha->devices[dev]; + } + if (dev == PM8001_MAX_DEVICES) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("NO MATCHING " + "DEVICE FOUND !!!\n")); + } + return NULL; +} static void pm8001_free_dev(struct pm8001_device *pm8001_dev) { u32 id = pm8001_dev->id; memset(pm8001_dev, 0, sizeof(*pm8001_dev)); pm8001_dev->id = id; - pm8001_dev->dev_type = NO_DEVICE; + pm8001_dev->dev_type = SAS_PHY_UNUSED; pm8001_dev->device_id = PM8001_MAX_DEVICES; pm8001_dev->sas_device = NULL; } @@ -624,7 +647,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev) res = -1; } } else { - if (dev->dev_type == SATA_DEV) { + if (dev->dev_type == SAS_SATA_DEV) { pm8001_device->attached_phy = dev->rphy->identify.phy_identifier; flag = 1; /* directly sata*/ @@ -634,7 +657,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev) PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag); spin_unlock_irqrestore(&pm8001_ha->lock, flags); wait_for_completion(&completion); - if (dev->dev_type == SAS_END_DEV) + if (dev->dev_type == SAS_END_DEVICE) msleep(50); pm8001_ha->flags = PM8001F_RUN_TIME; return 0; @@ -648,7 +671,7 @@ int pm8001_dev_found(struct domain_device *dev) return pm8001_dev_found_notify(dev); } -static void pm8001_task_done(struct sas_task *task) +void pm8001_task_done(struct sas_task *task) { if (!del_timer(&task->slow_task->timer)) return; @@ -904,7 +927,7 @@ void pm8001_open_reject_retry( struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i]; pm8001_dev = ccb->device; - if (!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE)) + if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)) continue; if (!device_to_close) { uintptr_t d = (uintptr_t)pm8001_dev @@ -995,6 +1018,72 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev) return rc; } +/* +* This function handle the IT_NEXUS_XXX event or completion +* status code for SSP/SATA/SMP I/O request. +*/ +int pm8001_I_T_nexus_event_handler(struct domain_device *dev) +{ + int rc = TMF_RESP_FUNC_FAILED; + struct pm8001_device *pm8001_dev; + struct pm8001_hba_info *pm8001_ha; + struct sas_phy *phy; + u32 device_id = 0; + + if (!dev || !dev->lldd_dev) + return -1; + + pm8001_dev = dev->lldd_dev; + device_id = pm8001_dev->device_id; + pm8001_ha = pm8001_find_ha_by_dev(dev); + + PM8001_EH_DBG(pm8001_ha, + pm8001_printk("I_T_Nexus handler invoked !!")); + + phy = sas_get_local_phy(dev); + + if (dev_is_sata(dev)) { + DECLARE_COMPLETION_ONSTACK(completion_setstate); + if (scsi_is_sas_phy_local(phy)) { + rc = 0; + goto out; + } + /* send internal ssp/sata/smp abort command to FW */ + rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , + dev, 1, 0); + msleep(100); + + /* deregister the target device */ + pm8001_dev_gone_notify(dev); + msleep(200); + + /*send phy reset to hard reset target */ + rc = sas_phy_reset(phy, 1); + msleep(2000); + pm8001_dev->setds_completion = &completion_setstate; + + wait_for_completion(&completion_setstate); + } else { + /* send internal ssp/sata/smp abort command to FW */ + rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , + dev, 1, 0); + msleep(100); + + /* deregister the target device */ + pm8001_dev_gone_notify(dev); + msleep(200); + + /*send phy reset to hard reset target */ + rc = sas_phy_reset(phy, 1); + msleep(2000); + } + PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n", + pm8001_dev->device_id, rc)); +out: + sas_put_local_phy(phy); + + return rc; +} /* mandatory SAM-3, the task reset the specified LUN*/ int pm8001_lu_reset(struct domain_device *dev, u8 *lun) { diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index 11008205aeb3..570819464d90 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -1,5 +1,5 @@ /* - * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. @@ -57,8 +57,8 @@ #include <linux/atomic.h> #include "pm8001_defs.h" -#define DRV_NAME "pm8001" -#define DRV_VERSION "0.1.36" +#define DRV_NAME "pm80xx" +#define DRV_VERSION "0.1.37" #define PM8001_FAIL_LOGGING 0x01 /* Error message logging */ #define PM8001_INIT_LOGGING 0x02 /* driver init logging */ #define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */ @@ -66,8 +66,8 @@ #define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/ #define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */ #define PM8001_MSG_LOGGING 0x40 /* misc message logging */ -#define pm8001_printk(format, arg...) printk(KERN_INFO "%s %d:" format,\ - __func__, __LINE__, ## arg) +#define pm8001_printk(format, arg...) printk(KERN_INFO "pm80xx %s %d:" \ + format, __func__, __LINE__, ## arg) #define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD) \ do { \ if (unlikely(HBA->logging_level & LEVEL)) \ @@ -103,11 +103,12 @@ do { \ #define PM8001_READ_VPD -#define DEV_IS_EXPANDER(type) ((type == EDGE_DEV) || (type == FANOUT_DEV)) +#define DEV_IS_EXPANDER(type) ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE)) #define PM8001_NAME_LENGTH 32/* generic length of strings */ extern struct list_head hba_list; extern const struct pm8001_dispatch pm8001_8001_dispatch; +extern const struct pm8001_dispatch pm8001_80xx_dispatch; struct pm8001_hba_info; struct pm8001_ccb_info; @@ -131,15 +132,15 @@ struct pm8001_ioctl_payload { struct pm8001_dispatch { char *name; int (*chip_init)(struct pm8001_hba_info *pm8001_ha); - int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha, u32 signature); + int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha); void (*chip_rst)(struct pm8001_hba_info *pm8001_ha); int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha); void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha); - irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha); + irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha, u8 vec); u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha); - int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha); - void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha); - void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha); + int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha, u8 vec); + void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha, u8 vec); + void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha, u8 vec); void (*make_prd)(struct scatterlist *scatter, int nr, void *prd); int (*smp_req)(struct pm8001_hba_info *pm8001_ha, struct pm8001_ccb_info *ccb); @@ -173,6 +174,7 @@ struct pm8001_dispatch { }; struct pm8001_chip_info { + u32 encrypt; u32 n_phy; const struct pm8001_dispatch *dispatch; }; @@ -204,7 +206,7 @@ struct pm8001_phy { }; struct pm8001_device { - enum sas_dev_type dev_type; + enum sas_device_type dev_type; struct domain_device *sas_device; u32 attached_phy; u32 id; @@ -256,7 +258,20 @@ struct mpi_mem_req { struct mpi_mem region[USI_MAX_MEMCNT]; }; -struct main_cfg_table { +struct encrypt { + u32 cipher_mode; + u32 sec_mode; + u32 status; + u32 flag; +}; + +struct sas_phy_attribute_table { + u32 phystart1_16[16]; + u32 outbound_hw_event_pid1_16[16]; +}; + +union main_cfg_table { + struct { u32 signature; u32 interface_rev; u32 firmware_rev; @@ -292,19 +307,69 @@ struct main_cfg_table { u32 fatal_err_dump_length1; u32 hda_mode_flag; u32 anolog_setup_table_offset; + u32 rsvd[4]; + } pm8001_tbl; + + struct { + u32 signature; + u32 interface_rev; + u32 firmware_rev; + u32 max_out_io; + u32 max_sgl; + u32 ctrl_cap_flag; + u32 gst_offset; + u32 inbound_queue_offset; + u32 outbound_queue_offset; + u32 inbound_q_nppd_hppd; + u32 rsvd[8]; + u32 crc_core_dump; + u32 rsvd1; + u32 upper_event_log_addr; + u32 lower_event_log_addr; + u32 event_log_size; + u32 event_log_severity; + u32 upper_pcs_event_log_addr; + u32 lower_pcs_event_log_addr; + u32 pcs_event_log_size; + u32 pcs_event_log_severity; + u32 fatal_err_interrupt; + u32 fatal_err_dump_offset0; + u32 fatal_err_dump_length0; + u32 fatal_err_dump_offset1; + u32 fatal_err_dump_length1; + u32 gpio_led_mapping; + u32 analog_setup_table_offset; + u32 int_vec_table_offset; + u32 phy_attr_table_offset; + u32 port_recovery_timer; + u32 interrupt_reassertion_delay; + } pm80xx_tbl; }; -struct general_status_table { + +union general_status_table { + struct { u32 gst_len_mpistate; u32 iq_freeze_state0; u32 iq_freeze_state1; u32 msgu_tcnt; u32 iop_tcnt; - u32 reserved; + u32 rsvd; u32 phy_state[8]; - u32 reserved1; - u32 reserved2; - u32 reserved3; + u32 gpio_input_val; + u32 rsvd1[2]; + u32 recover_err_info[8]; + } pm8001_tbl; + struct { + u32 gst_len_mpistate; + u32 iq_freeze_state0; + u32 iq_freeze_state1; + u32 msgu_tcnt; + u32 iop_tcnt; + u32 rsvd[9]; + u32 gpio_input_val; + u32 rsvd1[2]; u32 recover_err_info[8]; + } pm80xx_tbl; }; struct inbound_queue_table { u32 element_pri_size_cnt; @@ -351,15 +416,21 @@ struct pm8001_hba_info { struct device *dev; struct pm8001_hba_memspace io_mem[6]; struct mpi_mem_req memoryMap; + struct encrypt encrypt_info; /* support encryption */ void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/ void __iomem *main_cfg_tbl_addr;/*Main Config Table Addr*/ void __iomem *general_stat_tbl_addr;/*General Status Table Addr*/ void __iomem *inbnd_q_tbl_addr;/*Inbound Queue Config Table Addr*/ void __iomem *outbnd_q_tbl_addr;/*Outbound Queue Config Table Addr*/ - struct main_cfg_table main_cfg_tbl; - struct general_status_table gs_tbl; - struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_INB_NUM]; - struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_OUTB_NUM]; + void __iomem *pspa_q_tbl_addr; + /*MPI SAS PHY attributes Queue Config Table Addr*/ + void __iomem *ivt_tbl_addr; /*MPI IVT Table Addr */ + union main_cfg_table main_cfg_tbl; + union general_status_table gs_tbl; + struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM]; + struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_SPCV_OUTB_NUM]; + struct sas_phy_attribute_table phy_attr_table; + /* MPI SAS PHY attributes */ u8 sas_addr[SAS_ADDR_SIZE]; struct sas_ha_struct *sas;/* SCSI/SAS glue */ struct Scsi_Host *shost; @@ -372,10 +443,12 @@ struct pm8001_hba_info { struct pm8001_port port[PM8001_MAX_PHYS]; u32 id; u32 irq; + u32 iomb_size; /* SPC and SPCV IOMB size */ struct pm8001_device *devices; struct pm8001_ccb_info *ccb_info; #ifdef PM8001_USE_MSIX - struct msix_entry msix_entries[16];/*for msi-x interrupt*/ + struct msix_entry msix_entries[PM8001_MAX_MSIX_VEC]; + /*for msi-x interrupt*/ int number_of_intr;/*will be used in remove()*/ #endif #ifdef PM8001_USE_TASKLET @@ -383,7 +456,10 @@ struct pm8001_hba_info { #endif u32 logging_level; u32 fw_status; + u32 smp_exp_mode; + u32 int_vector; const struct firmware *fw_image; + u8 outq[PM8001_MAX_MSIX_VEC]; }; struct pm8001_work { @@ -419,6 +495,9 @@ struct pm8001_fw_image_header { #define FLASH_UPDATE_DNLD_NOT_SUPPORTED 0x10 #define FLASH_UPDATE_DISABLED 0x11 +#define NCQ_READ_LOG_FLAG 0x80000000 +#define NCQ_ABORT_ALL_FLAG 0x40000000 +#define NCQ_2ND_RLE_FLAG 0x20000000 /** * brief param structure for firmware flash update. */ @@ -484,6 +563,7 @@ int pm8001_dev_found(struct domain_device *dev); void pm8001_dev_gone(struct domain_device *dev); int pm8001_lu_reset(struct domain_device *dev, u8 *lun); int pm8001_I_T_nexus_reset(struct domain_device *dev); +int pm8001_I_T_nexus_event_handler(struct domain_device *dev); int pm8001_query_task(struct sas_task *task); void pm8001_open_reject_retry( struct pm8001_hba_info *pm8001_ha, @@ -493,6 +573,61 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo, u32 mem_size, u32 align); +void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha); +int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, + struct inbound_queue_table *circularQ, + u32 opCode, void *payload, u32 responseQueue); +int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ, + u16 messageSize, void **messagePtr); +u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, + struct outbound_queue_table *circularQ, u8 bc); +u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, + struct outbound_queue_table *circularQ, + void **messagePtr1, u8 *pBC); +int pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u32 state); +int pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, + void *payload); +int pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, + void *fw_flash_updata_info, u32 tag); +int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload); +int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload); +int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb, + struct pm8001_tmf_task *tmf); +int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, + u8 flag, u32 task_tag, u32 cmd_tag); +int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 device_id); +void pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd); +void pm8001_work_fn(struct work_struct *work); +int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, + void *data, int handler); +void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +void pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, + void *piomb); +void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate); +void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, u8 *sas_addr); +void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i); +int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb); +int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb); +int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb); +int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb); +struct sas_task *pm8001_alloc_task(void); +void pm8001_task_done(struct sas_task *task); +void pm8001_free_task(struct sas_task *task); +void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag); +struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha, + u32 device_id); +int pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha); + int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue); /* ctl shared API */ diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c new file mode 100644 index 000000000000..302514d8157b --- /dev/null +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -0,0 +1,4130 @@ +/* + * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 PMC-Sierra, Inc., + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + #include <linux/slab.h> + #include "pm8001_sas.h" + #include "pm80xx_hwi.h" + #include "pm8001_chips.h" + #include "pm8001_ctl.h" + +#define SMP_DIRECT 1 +#define SMP_INDIRECT 2 +/** + * read_main_config_table - read the configure table and save it. + * @pm8001_ha: our hba card information + */ +static void read_main_config_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->main_cfg_tbl_addr; + + pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature = + pm8001_mr32(address, MAIN_SIGNATURE_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev = + pm8001_mr32(address, MAIN_INTERFACE_REVISION); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev = + pm8001_mr32(address, MAIN_FW_REVISION); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io = + pm8001_mr32(address, MAIN_MAX_OUTSTANDING_IO_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl = + pm8001_mr32(address, MAIN_MAX_SGL_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag = + pm8001_mr32(address, MAIN_CNTRL_CAP_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset = + pm8001_mr32(address, MAIN_GST_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset = + pm8001_mr32(address, MAIN_IBQ_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset = + pm8001_mr32(address, MAIN_OBQ_OFFSET); + + /* read Error Dump Offset and Length */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset0 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length0 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset1 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length1 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH); + + /* read GPIO LED settings from the configuration table */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping = + pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET); + + /* read analog Setting offset from the configuration table */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.analog_setup_table_offset = + pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET); + + pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset = + pm8001_mr32(address, MAIN_INT_VECTOR_TABLE_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset = + pm8001_mr32(address, MAIN_SAS_PHY_ATTR_TABLE_OFFSET); +} + +/** + * read_general_status_table - read the general status table and save it. + * @pm8001_ha: our hba card information + */ +static void read_general_status_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->general_stat_tbl_addr; + pm8001_ha->gs_tbl.pm80xx_tbl.gst_len_mpistate = + pm8001_mr32(address, GST_GSTLEN_MPIS_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state0 = + pm8001_mr32(address, GST_IQ_FREEZE_STATE0_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state1 = + pm8001_mr32(address, GST_IQ_FREEZE_STATE1_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.msgu_tcnt = + pm8001_mr32(address, GST_MSGUTCNT_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.iop_tcnt = + pm8001_mr32(address, GST_IOPTCNT_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.gpio_input_val = + pm8001_mr32(address, GST_GPIO_INPUT_VAL); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[0] = + pm8001_mr32(address, GST_RERRINFO_OFFSET0); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[1] = + pm8001_mr32(address, GST_RERRINFO_OFFSET1); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[2] = + pm8001_mr32(address, GST_RERRINFO_OFFSET2); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[3] = + pm8001_mr32(address, GST_RERRINFO_OFFSET3); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[4] = + pm8001_mr32(address, GST_RERRINFO_OFFSET4); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[5] = + pm8001_mr32(address, GST_RERRINFO_OFFSET5); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[6] = + pm8001_mr32(address, GST_RERRINFO_OFFSET6); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[7] = + pm8001_mr32(address, GST_RERRINFO_OFFSET7); +} +/** + * read_phy_attr_table - read the phy attribute table and save it. + * @pm8001_ha: our hba card information + */ +static void read_phy_attr_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->pspa_q_tbl_addr; + pm8001_ha->phy_attr_table.phystart1_16[0] = + pm8001_mr32(address, PSPA_PHYSTATE0_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[1] = + pm8001_mr32(address, PSPA_PHYSTATE1_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[2] = + pm8001_mr32(address, PSPA_PHYSTATE2_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[3] = + pm8001_mr32(address, PSPA_PHYSTATE3_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[4] = + pm8001_mr32(address, PSPA_PHYSTATE4_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[5] = + pm8001_mr32(address, PSPA_PHYSTATE5_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[6] = + pm8001_mr32(address, PSPA_PHYSTATE6_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[7] = + pm8001_mr32(address, PSPA_PHYSTATE7_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[8] = + pm8001_mr32(address, PSPA_PHYSTATE8_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[9] = + pm8001_mr32(address, PSPA_PHYSTATE9_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[10] = + pm8001_mr32(address, PSPA_PHYSTATE10_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[11] = + pm8001_mr32(address, PSPA_PHYSTATE11_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[12] = + pm8001_mr32(address, PSPA_PHYSTATE12_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[13] = + pm8001_mr32(address, PSPA_PHYSTATE13_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[14] = + pm8001_mr32(address, PSPA_PHYSTATE14_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[15] = + pm8001_mr32(address, PSPA_PHYSTATE15_OFFSET); + + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[0] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID0_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[1] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID1_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[2] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID2_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[3] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID3_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[4] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID4_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[5] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID5_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[6] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID6_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[7] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID7_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[8] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID8_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[9] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID9_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[10] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID10_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[11] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID11_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[12] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID12_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[13] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID13_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[14] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID14_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[15] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID15_OFFSET); + +} + +/** + * read_inbnd_queue_table - read the inbound queue table and save it. + * @pm8001_ha: our hba card information + */ +static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) +{ + int i; + void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; + for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) { + u32 offset = i * 0x20; + pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = + get_pci_bar_index(pm8001_mr32(address, + (offset + IB_PIPCI_BAR))); + pm8001_ha->inbnd_q_tbl[i].pi_offset = + pm8001_mr32(address, (offset + IB_PIPCI_BAR_OFFSET)); + } +} + +/** + * read_outbnd_queue_table - read the outbound queue table and save it. + * @pm8001_ha: our hba card information + */ +static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) +{ + int i; + void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; + for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) { + u32 offset = i * 0x24; + pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = + get_pci_bar_index(pm8001_mr32(address, + (offset + OB_CIPCI_BAR))); + pm8001_ha->outbnd_q_tbl[i].ci_offset = + pm8001_mr32(address, (offset + OB_CIPCI_BAR_OFFSET)); + } +} + +/** + * init_default_table_values - init the default table. + * @pm8001_ha: our hba card information + */ +static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) +{ + int i; + u32 offsetib, offsetob; + void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr; + void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr; + + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr = + pm8001_ha->memoryMap.region[AAP1].phys_addr_hi; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr = + pm8001_ha->memoryMap.region[AAP1].phys_addr_lo; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity = 0x01; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr = + pm8001_ha->memoryMap.region[IOP].phys_addr_hi; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr = + pm8001_ha->memoryMap.region[IOP].phys_addr_lo; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01; + + /* Disable end to end CRC checking */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16); + + for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) { + pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = + PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30); + pm8001_ha->inbnd_q_tbl[i].upper_base_addr = + pm8001_ha->memoryMap.region[IB + i].phys_addr_hi; + pm8001_ha->inbnd_q_tbl[i].lower_base_addr = + pm8001_ha->memoryMap.region[IB + i].phys_addr_lo; + pm8001_ha->inbnd_q_tbl[i].base_virt = + (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr; + pm8001_ha->inbnd_q_tbl[i].total_length = + pm8001_ha->memoryMap.region[IB + i].total_len; + pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr = + pm8001_ha->memoryMap.region[CI + i].phys_addr_hi; + pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr = + pm8001_ha->memoryMap.region[CI + i].phys_addr_lo; + pm8001_ha->inbnd_q_tbl[i].ci_virt = + pm8001_ha->memoryMap.region[CI + i].virt_ptr; + offsetib = i * 0x20; + pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = + get_pci_bar_index(pm8001_mr32(addressib, + (offsetib + 0x14))); + pm8001_ha->inbnd_q_tbl[i].pi_offset = + pm8001_mr32(addressib, (offsetib + 0x18)); + pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; + pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; + } + for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) { + pm8001_ha->outbnd_q_tbl[i].element_size_cnt = + PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30); + pm8001_ha->outbnd_q_tbl[i].upper_base_addr = + pm8001_ha->memoryMap.region[OB + i].phys_addr_hi; + pm8001_ha->outbnd_q_tbl[i].lower_base_addr = + pm8001_ha->memoryMap.region[OB + i].phys_addr_lo; + pm8001_ha->outbnd_q_tbl[i].base_virt = + (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr; + pm8001_ha->outbnd_q_tbl[i].total_length = + pm8001_ha->memoryMap.region[OB + i].total_len; + pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr = + pm8001_ha->memoryMap.region[PI + i].phys_addr_hi; + pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr = + pm8001_ha->memoryMap.region[PI + i].phys_addr_lo; + /* interrupt vector based on oq */ + pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = (i << 24); + pm8001_ha->outbnd_q_tbl[i].pi_virt = + pm8001_ha->memoryMap.region[PI + i].virt_ptr; + offsetob = i * 0x24; + pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = + get_pci_bar_index(pm8001_mr32(addressob, + offsetob + 0x14)); + pm8001_ha->outbnd_q_tbl[i].ci_offset = + pm8001_mr32(addressob, (offsetob + 0x18)); + pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0; + pm8001_ha->outbnd_q_tbl[i].producer_index = 0; + } +} + +/** + * update_main_config_table - update the main default table to the HBA. + * @pm8001_ha: our hba card information + */ +static void update_main_config_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->main_cfg_tbl_addr; + pm8001_mw32(address, MAIN_IQNPPD_HPPD_OFFSET, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_q_nppd_hppd); + pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_HI, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr); + pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_LO, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr); + pm8001_mw32(address, MAIN_EVENT_LOG_BUFF_SIZE, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size); + pm8001_mw32(address, MAIN_EVENT_LOG_OPTION, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_HI, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_LO, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_BUFF_SIZE, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity); + pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt); + pm8001_mw32(address, MAIN_EVENT_CRC_CHECK, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump); + + /* SPCv specific */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping &= 0xCFFFFFFF; + /* Set GPIOLED to 0x2 for LED indicator */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping |= 0x20000000; + pm8001_mw32(address, MAIN_GPIO_LED_FLAGS_OFFSET, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping); + + pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer); + pm8001_mw32(address, MAIN_INT_REASSERTION_DELAY, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.interrupt_reassertion_delay); +} + +/** + * update_inbnd_queue_table - update the inbound queue table to the HBA. + * @pm8001_ha: our hba card information + */ +static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha, + int number) +{ + void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; + u16 offset = number * 0x20; + pm8001_mw32(address, offset + IB_PROPERITY_OFFSET, + pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt); + pm8001_mw32(address, offset + IB_BASE_ADDR_HI_OFFSET, + pm8001_ha->inbnd_q_tbl[number].upper_base_addr); + pm8001_mw32(address, offset + IB_BASE_ADDR_LO_OFFSET, + pm8001_ha->inbnd_q_tbl[number].lower_base_addr); + pm8001_mw32(address, offset + IB_CI_BASE_ADDR_HI_OFFSET, + pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr); + pm8001_mw32(address, offset + IB_CI_BASE_ADDR_LO_OFFSET, + pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr); +} + +/** + * update_outbnd_queue_table - update the outbound queue table to the HBA. + * @pm8001_ha: our hba card information + */ +static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, + int number) +{ + void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; + u16 offset = number * 0x24; + pm8001_mw32(address, offset + OB_PROPERITY_OFFSET, + pm8001_ha->outbnd_q_tbl[number].element_size_cnt); + pm8001_mw32(address, offset + OB_BASE_ADDR_HI_OFFSET, + pm8001_ha->outbnd_q_tbl[number].upper_base_addr); + pm8001_mw32(address, offset + OB_BASE_ADDR_LO_OFFSET, + pm8001_ha->outbnd_q_tbl[number].lower_base_addr); + pm8001_mw32(address, offset + OB_PI_BASE_ADDR_HI_OFFSET, + pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr); + pm8001_mw32(address, offset + OB_PI_BASE_ADDR_LO_OFFSET, + pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr); + pm8001_mw32(address, offset + OB_INTERRUPT_COALES_OFFSET, + pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay); +} + +/** + * mpi_init_check - check firmware initialization status. + * @pm8001_ha: our hba card information + */ +static int mpi_init_check(struct pm8001_hba_info *pm8001_ha) +{ + u32 max_wait_count; + u32 value; + u32 gst_len_mpistate; + + /* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the + table is updated */ + pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE); + /* wait until Inbound DoorBell Clear Register toggled */ + max_wait_count = 2 * 1000 * 1000;/* 2 sec for spcv/ve */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); + value &= SPCv_MSGU_CFG_TABLE_UPDATE; + } while ((value != 0) && (--max_wait_count)); + + if (!max_wait_count) + return -1; + /* check the MPI-State for initialization upto 100ms*/ + max_wait_count = 100 * 1000;/* 100 msec */ + do { + udelay(1); + gst_len_mpistate = + pm8001_mr32(pm8001_ha->general_stat_tbl_addr, + GST_GSTLEN_MPIS_OFFSET); + } while ((GST_MPI_STATE_INIT != + (gst_len_mpistate & GST_MPI_STATE_MASK)) && (--max_wait_count)); + if (!max_wait_count) + return -1; + + /* check MPI Initialization error */ + gst_len_mpistate = gst_len_mpistate >> 16; + if (0x0000 != gst_len_mpistate) + return -1; + + return 0; +} + +/** + * check_fw_ready - The LLDD check if the FW is ready, if not, return error. + * @pm8001_ha: our hba card information + */ +static int check_fw_ready(struct pm8001_hba_info *pm8001_ha) +{ + u32 value; + u32 max_wait_count; + u32 max_wait_time; + int ret = 0; + + /* reset / PCIe ready */ + max_wait_time = max_wait_count = 100 * 1000; /* 100 milli sec */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while ((value == 0xFFFFFFFF) && (--max_wait_count)); + + /* check ila status */ + max_wait_time = max_wait_count = 1000 * 1000; /* 1000 milli sec */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while (((value & SCRATCH_PAD_ILA_READY) != + SCRATCH_PAD_ILA_READY) && (--max_wait_count)); + if (!max_wait_count) + ret = -1; + else { + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" ila ready status in %d millisec\n", + (max_wait_time - max_wait_count))); + } + + /* check RAAE status */ + max_wait_time = max_wait_count = 1800 * 1000; /* 1800 milli sec */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while (((value & SCRATCH_PAD_RAAE_READY) != + SCRATCH_PAD_RAAE_READY) && (--max_wait_count)); + if (!max_wait_count) + ret = -1; + else { + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" raae ready status in %d millisec\n", + (max_wait_time - max_wait_count))); + } + + /* check iop0 status */ + max_wait_time = max_wait_count = 600 * 1000; /* 600 milli sec */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while (((value & SCRATCH_PAD_IOP0_READY) != SCRATCH_PAD_IOP0_READY) && + (--max_wait_count)); + if (!max_wait_count) + ret = -1; + else { + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" iop0 ready status in %d millisec\n", + (max_wait_time - max_wait_count))); + } + + /* check iop1 status only for 16 port controllers */ + if ((pm8001_ha->chip_id != chip_8008) && + (pm8001_ha->chip_id != chip_8009)) { + /* 200 milli sec */ + max_wait_time = max_wait_count = 200 * 1000; + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while (((value & SCRATCH_PAD_IOP1_READY) != + SCRATCH_PAD_IOP1_READY) && (--max_wait_count)); + if (!max_wait_count) + ret = -1; + else { + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "iop1 ready status in %d millisec\n", + (max_wait_time - max_wait_count))); + } + } + + return ret; +} + +static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *base_addr; + u32 value; + u32 offset; + u32 pcibar; + u32 pcilogic; + + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); + offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */ + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Scratchpad 0 Offset: 0x%x value 0x%x\n", + offset, value)); + pcilogic = (value & 0xFC000000) >> 26; + pcibar = get_pci_bar_index(pcilogic); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Scratchpad 0 PCI BAR: %d\n", pcibar)); + pm8001_ha->main_cfg_tbl_addr = base_addr = + pm8001_ha->io_mem[pcibar].memvirtaddr + offset; + pm8001_ha->general_stat_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x18) & + 0xFFFFFF); + pm8001_ha->inbnd_q_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C) & + 0xFFFFFF); + pm8001_ha->outbnd_q_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x20) & + 0xFFFFFF); + pm8001_ha->ivt_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C) & + 0xFFFFFF); + pm8001_ha->pspa_q_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x90) & + 0xFFFFFF); + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("GST OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x18))); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("INBND OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C))); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("OBND OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x20))); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("IVT OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C))); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("PSPA OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x90))); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("addr - main cfg %p general status %p\n", + pm8001_ha->main_cfg_tbl_addr, + pm8001_ha->general_stat_tbl_addr)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("addr - inbnd %p obnd %p\n", + pm8001_ha->inbnd_q_tbl_addr, + pm8001_ha->outbnd_q_tbl_addr)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("addr - pspa %p ivt %p\n", + pm8001_ha->pspa_q_tbl_addr, + pm8001_ha->ivt_tbl_addr)); +} + +/** + * pm80xx_set_thermal_config - support the thermal configuration + * @pm8001_ha: our hba card information. + */ +int +pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha) +{ + struct set_ctrl_cfg_req payload; + struct inbound_queue_table *circularQ; + int rc; + u32 tag; + u32 opc = OPC_INB_SET_CONTROLLER_CONFIG; + + memset(&payload, 0, sizeof(struct set_ctrl_cfg_req)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return -1; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + payload.tag = cpu_to_le32(tag); + payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) | + (THERMAL_ENABLE << 8) | THERMAL_OP_CODE; + payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8); + + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + return rc; + +} + +/** +* pm80xx_set_sas_protocol_timer_config - support the SAS Protocol +* Timer configuration page +* @pm8001_ha: our hba card information. +*/ +static int +pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha) +{ + struct set_ctrl_cfg_req payload; + struct inbound_queue_table *circularQ; + SASProtocolTimerConfig_t SASConfigPage; + int rc; + u32 tag; + u32 opc = OPC_INB_SET_CONTROLLER_CONFIG; + + memset(&payload, 0, sizeof(struct set_ctrl_cfg_req)); + memset(&SASConfigPage, 0, sizeof(SASProtocolTimerConfig_t)); + + rc = pm8001_tag_alloc(pm8001_ha, &tag); + + if (rc) + return -1; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + payload.tag = cpu_to_le32(tag); + + SASConfigPage.pageCode = SAS_PROTOCOL_TIMER_CONFIG_PAGE; + SASConfigPage.MST_MSI = 3 << 15; + SASConfigPage.STP_SSP_MCT_TMO = (STP_MCT_TMO << 16) | SSP_MCT_TMO; + SASConfigPage.STP_FRM_TMO = (SAS_MAX_OPEN_TIME << 24) | + (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER; + SASConfigPage.STP_IDLE_TMO = STP_IDLE_TIME; + + if (SASConfigPage.STP_IDLE_TMO > 0x3FFFFFF) + SASConfigPage.STP_IDLE_TMO = 0x3FFFFFF; + + + SASConfigPage.OPNRJT_RTRY_INTVL = (SAS_MFD << 16) | + SAS_OPNRJT_RTRY_INTVL; + SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO = (SAS_DOPNRJT_RTRY_TMO << 16) + | SAS_COPNRJT_RTRY_TMO; + SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR = (SAS_DOPNRJT_RTRY_THR << 16) + | SAS_COPNRJT_RTRY_THR; + SASConfigPage.MAX_AIP = SAS_MAX_AIP; + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.pageCode " + "0x%08x\n", SASConfigPage.pageCode)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.MST_MSI " + " 0x%08x\n", SASConfigPage.MST_MSI)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.STP_SSP_MCT_TMO " + " 0x%08x\n", SASConfigPage.STP_SSP_MCT_TMO)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.STP_FRM_TMO " + " 0x%08x\n", SASConfigPage.STP_FRM_TMO)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.STP_IDLE_TMO " + " 0x%08x\n", SASConfigPage.STP_IDLE_TMO)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.OPNRJT_RTRY_INTVL " + " 0x%08x\n", SASConfigPage.OPNRJT_RTRY_INTVL)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO " + " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR " + " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR)); + PM8001_INIT_DBG(pm8001_ha, pm8001_printk("SASConfigPage.MAX_AIP " + " 0x%08x\n", SASConfigPage.MAX_AIP)); + + memcpy(&payload.cfg_pg, &SASConfigPage, + sizeof(SASProtocolTimerConfig_t)); + + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + + return rc; +} + +/** + * pm80xx_get_encrypt_info - Check for encryption + * @pm8001_ha: our hba card information. + */ +static int +pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha) +{ + u32 scratch3_value; + int ret; + + /* Read encryption status from SCRATCH PAD 3 */ + scratch3_value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3); + + if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == + SCRATCH_PAD3_ENC_READY) { + if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED) + pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMF_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMA_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMB_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB; + pm8001_ha->encrypt_info.status = 0; + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "Encryption: SCRATCH_PAD3_ENC_READY 0x%08X." + "Cipher mode 0x%x Sec mode 0x%x status 0x%x\n", + scratch3_value, pm8001_ha->encrypt_info.cipher_mode, + pm8001_ha->encrypt_info.sec_mode, + pm8001_ha->encrypt_info.status)); + ret = 0; + } else if ((scratch3_value & SCRATCH_PAD3_ENC_READY) == + SCRATCH_PAD3_ENC_DISABLED) { + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "Encryption: SCRATCH_PAD3_ENC_DISABLED 0x%08X\n", + scratch3_value)); + pm8001_ha->encrypt_info.status = 0xFFFFFFFF; + pm8001_ha->encrypt_info.cipher_mode = 0; + pm8001_ha->encrypt_info.sec_mode = 0; + return 0; + } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == + SCRATCH_PAD3_ENC_DIS_ERR) { + pm8001_ha->encrypt_info.status = + (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16; + if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED) + pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMF_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMA_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMB_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB; + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "Encryption: SCRATCH_PAD3_DIS_ERR 0x%08X." + "Cipher mode 0x%x sec mode 0x%x status 0x%x\n", + scratch3_value, pm8001_ha->encrypt_info.cipher_mode, + pm8001_ha->encrypt_info.sec_mode, + pm8001_ha->encrypt_info.status)); + ret = -1; + } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == + SCRATCH_PAD3_ENC_ENA_ERR) { + + pm8001_ha->encrypt_info.status = + (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16; + if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED) + pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMF_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMA_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMB_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB; + + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "Encryption: SCRATCH_PAD3_ENA_ERR 0x%08X." + "Cipher mode 0x%x sec mode 0x%x status 0x%x\n", + scratch3_value, pm8001_ha->encrypt_info.cipher_mode, + pm8001_ha->encrypt_info.sec_mode, + pm8001_ha->encrypt_info.status)); + ret = -1; + } + return ret; +} + +/** + * pm80xx_encrypt_update - update flash with encryption informtion + * @pm8001_ha: our hba card information. + */ +static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha) +{ + struct kek_mgmt_req payload; + struct inbound_queue_table *circularQ; + int rc; + u32 tag; + u32 opc = OPC_INB_KEK_MANAGEMENT; + + memset(&payload, 0, sizeof(struct kek_mgmt_req)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return -1; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + payload.tag = cpu_to_le32(tag); + /* Currently only one key is used. New KEK index is 1. + * Current KEK index is 1. Store KEK to NVRAM is 1. + */ + payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) | + KEK_MGMT_SUBOP_KEYCARDUPDATE); + + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + + return rc; +} + +/** + * pm8001_chip_init - the main init function that initialize whole PM8001 chip. + * @pm8001_ha: our hba card information + */ +static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha) +{ + int ret; + u8 i = 0; + + /* check the firmware status */ + if (-1 == check_fw_ready(pm8001_ha)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Firmware is not ready!\n")); + return -EBUSY; + } + + /* Initialize pci space address eg: mpi offset */ + init_pci_device_addresses(pm8001_ha); + init_default_table_values(pm8001_ha); + read_main_config_table(pm8001_ha); + read_general_status_table(pm8001_ha); + read_inbnd_queue_table(pm8001_ha); + read_outbnd_queue_table(pm8001_ha); + read_phy_attr_table(pm8001_ha); + + /* update main config table ,inbound table and outbound table */ + update_main_config_table(pm8001_ha); + for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) + update_inbnd_queue_table(pm8001_ha, i); + for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) + update_outbnd_queue_table(pm8001_ha, i); + + /* notify firmware update finished and check initialization status */ + if (0 == mpi_init_check(pm8001_ha)) { + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("MPI initialize successful!\n")); + } else + return -EBUSY; + + /* send SAS protocol timer configuration page to FW */ + ret = pm80xx_set_sas_protocol_timer_config(pm8001_ha); + + /* Check for encryption */ + if (pm8001_ha->chip->encrypt) { + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Checking for encryption\n")); + ret = pm80xx_get_encrypt_info(pm8001_ha); + if (ret == -1) { + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Encryption error !!\n")); + if (pm8001_ha->encrypt_info.status == 0x81) { + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "Encryption enabled with error." + "Saving encryption key to flash\n")); + pm80xx_encrypt_update(pm8001_ha); + } + } + } + return 0; +} + +static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha) +{ + u32 max_wait_count; + u32 value; + u32 gst_len_mpistate; + init_pci_device_addresses(pm8001_ha); + /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the + table is stop */ + pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET); + + /* wait until Inbound DoorBell Clear Register toggled */ + max_wait_count = 2 * 1000 * 1000; /* 2 sec for spcv/ve */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); + value &= SPCv_MSGU_CFG_TABLE_RESET; + } while ((value != 0) && (--max_wait_count)); + + if (!max_wait_count) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("TIMEOUT:IBDB value/=%x\n", value)); + return -1; + } + + /* check the MPI-State for termination in progress */ + /* wait until Inbound DoorBell Clear Register toggled */ + max_wait_count = 2 * 1000 * 1000; /* 2 sec for spcv/ve */ + do { + udelay(1); + gst_len_mpistate = + pm8001_mr32(pm8001_ha->general_stat_tbl_addr, + GST_GSTLEN_MPIS_OFFSET); + if (GST_MPI_STATE_UNINIT == + (gst_len_mpistate & GST_MPI_STATE_MASK)) + break; + } while (--max_wait_count); + if (!max_wait_count) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk(" TIME OUT MPI State = 0x%x\n", + gst_len_mpistate & GST_MPI_STATE_MASK)); + return -1; + } + + return 0; +} + +/** + * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all + * the FW register status to the originated status. + * @pm8001_ha: our hba card information + */ + +static int +pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha) +{ + u32 regval; + u32 bootloader_state; + + /* Check if MPI is in ready state to reset */ + if (mpi_uninit_check(pm8001_ha) != 0) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MPI state is not ready\n")); + return -1; + } + + /* checked for reset register normal state; 0x0 */ + regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("reset register before write : 0x%x\n", regval)); + + pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, SPCv_NORMAL_RESET_VALUE); + mdelay(500); + + regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("reset register after write 0x%x\n", regval)); + + if ((regval & SPCv_SOFT_RESET_READ_MASK) == + SPCv_SOFT_RESET_NORMAL_RESET_OCCURED) { + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" soft reset successful [regval: 0x%x]\n", + regval)); + } else { + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" soft reset failed [regval: 0x%x]\n", + regval)); + + /* check bootloader is successfully executed or in HDA mode */ + bootloader_state = + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) & + SCRATCH_PAD1_BOOTSTATE_MASK; + + if (bootloader_state == SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM) { + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "Bootloader state - HDA mode SEEPROM\n")); + } else if (bootloader_state == + SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP) { + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "Bootloader state - HDA mode Bootstrap Pin\n")); + } else if (bootloader_state == + SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET) { + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "Bootloader state - HDA mode soft reset\n")); + } else if (bootloader_state == + SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR) { + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "Bootloader state-HDA mode critical error\n")); + } + return -EBUSY; + } + + /* check the firmware status after reset */ + if (-1 == check_fw_ready(pm8001_ha)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Firmware is not ready!\n")); + return -EBUSY; + } + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SPCv soft reset Complete\n")); + return 0; +} + +static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha) +{ + u32 i; + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("chip reset start\n")); + + /* do SPCv chip reset. */ + pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, 0x11); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SPC soft reset Complete\n")); + + /* Check this ..whether delay is required or no */ + /* delay 10 usec */ + udelay(10); + + /* wait for 20 msec until the firmware gets reloaded */ + i = 20; + do { + mdelay(1); + } while ((--i) != 0); + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("chip reset finished\n")); +} + +/** + * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm80xx_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha) +{ + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL); + pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL); +} + +/** + * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm80xx_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha) +{ + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, ODMR_MASK_ALL); +} + +/** + * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ +#ifdef PM8001_USE_MSIX + u32 mask; + mask = (u32)(1 << vec); + + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF)); + return; +#endif + pm80xx_chip_intx_interrupt_enable(pm8001_ha); + +} + +/** + * pm8001_chip_interrupt_disable- disable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ +#ifdef PM8001_USE_MSIX + u32 mask; + if (vec == 0xFF) + mask = 0xFFFFFFFF; + else + mask = (u32)(1 << vec); + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF)); + return; +#endif + pm80xx_chip_intx_interrupt_disable(pm8001_ha); +} + +static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_ha_dev) +{ + int res; + u32 ccb_tag; + struct pm8001_ccb_info *ccb; + struct sas_task *task = NULL; + struct task_abort_req task_abort; + struct inbound_queue_table *circularQ; + u32 opc = OPC_INB_SATA_ABORT; + int ret; + + if (!pm8001_ha_dev) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n")); + return; + } + + task = sas_alloc_slow_task(GFP_ATOMIC); + + if (!task) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot " + "allocate task\n")); + return; + } + + task->task_done = pm8001_task_done; + + res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); + if (res) + return; + + ccb = &pm8001_ha->ccb_info[ccb_tag]; + ccb->device = pm8001_ha_dev; + ccb->ccb_tag = ccb_tag; + ccb->task = task; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + memset(&task_abort, 0, sizeof(task_abort)); + task_abort.abort_all = cpu_to_le32(1); + task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + task_abort.tag = cpu_to_le32(ccb_tag); + + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0); + +} + +static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_ha_dev) +{ + struct sata_start_req sata_cmd; + int res; + u32 ccb_tag; + struct pm8001_ccb_info *ccb; + struct sas_task *task = NULL; + struct host_to_dev_fis fis; + struct domain_device *dev; + struct inbound_queue_table *circularQ; + u32 opc = OPC_INB_SATA_HOST_OPSTART; + + task = sas_alloc_slow_task(GFP_ATOMIC); + + if (!task) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("cannot allocate task !!!\n")); + return; + } + task->task_done = pm8001_task_done; + + res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); + if (res) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("cannot allocate tag !!!\n")); + return; + } + + /* allocate domain device by ourselves as libsas + * is not going to provide any + */ + dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC); + if (!dev) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Domain device cannot be allocated\n")); + sas_free_task(task); + return; + } else { + task->dev = dev; + task->dev->lldd_dev = pm8001_ha_dev; + } + + ccb = &pm8001_ha->ccb_info[ccb_tag]; + ccb->device = pm8001_ha_dev; + ccb->ccb_tag = ccb_tag; + ccb->task = task; + pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG; + pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG; + + memset(&sata_cmd, 0, sizeof(sata_cmd)); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + /* construct read log FIS */ + memset(&fis, 0, sizeof(struct host_to_dev_fis)); + fis.fis_type = 0x27; + fis.flags = 0x80; + fis.command = ATA_CMD_READ_LOG_EXT; + fis.lbal = 0x10; + fis.sector_count = 0x1; + + sata_cmd.tag = cpu_to_le32(ccb_tag); + sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9)); + memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); + + res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0); + +} + +/** + * mpi_ssp_completion- process the event that FW response to the SSP request. + * @pm8001_ha: our hba card information + * @piomb: the message contents of this outbound message. + * + * When FW has completed a ssp request for example a IO request, after it has + * filled the SG data with the data, it will trigger this event represent + * that he has finished the job,please check the coresponding buffer. + * So we will tell the caller who maybe waiting the result to tell upper layer + * that the task has been finished. + */ +static void +mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) +{ + struct sas_task *t; + struct pm8001_ccb_info *ccb; + unsigned long flags; + u32 status; + u32 param; + u32 tag; + struct ssp_completion_resp *psspPayload; + struct task_status_struct *ts; + struct ssp_response_iu *iu; + struct pm8001_device *pm8001_dev; + psspPayload = (struct ssp_completion_resp *)(piomb + 4); + status = le32_to_cpu(psspPayload->status); + tag = le32_to_cpu(psspPayload->tag); + ccb = &pm8001_ha->ccb_info[tag]; + if ((status == IO_ABORTED) && ccb->open_retry) { + /* Being completed by another */ + ccb->open_retry = 0; + return; + } + pm8001_dev = ccb->device; + param = le32_to_cpu(psspPayload->param); + t = ccb->task; + + if (status && status != IO_UNDERFLOW) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("sas IO status 0x%x\n", status)); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + ts = &t->task_status; + switch (status) { + case IO_SUCCESS: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_SUCCESS ,param = 0x%x\n", + param)); + if (param == 0) { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_GOOD; + } else { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PROTO_RESPONSE; + ts->residual = param; + iu = &psspPayload->ssp_resp_iu; + sas_ssp_task_response(pm8001_ha->dev, t, iu); + } + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_ABORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ABORTED IOMB Tag\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + break; + case IO_UNDERFLOW: + /* SSP Completion with error */ + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_UNDERFLOW ,param = 0x%x\n", + param)); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + ts->residual = param; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_NO_DEVICE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_NO_DEVICE\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + /* Force the midlayer to retry */ + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_DMA: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_DMA\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_PORT_IN_RESET: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_PORT_IN_RESET\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_DS_NON_OPERATIONAL: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_NON_OPERATIONAL\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_DS_NON_OPERATIONAL); + break; + case IO_DS_IN_RECOVERY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_IN_RECOVERY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_TM_TAG_NOT_FOUND: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_TM_TAG_NOT_FOUND\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_SSP_EXT_IU_ZERO_LEN_ERROR: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_SSP_EXT_IU_ZERO_LEN_ERROR\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", status)); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + } + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("scsi_status = 0x%x\n ", + psspPayload->ssp_resp_iu.status)); + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "task 0x%p done with io_status 0x%x resp 0x%x " + "stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* in order to force CPU ordering */ + t->task_done(t); + } +} + +/*See the comments for mpi_ssp_completion */ +static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb) +{ + struct sas_task *t; + unsigned long flags; + struct task_status_struct *ts; + struct pm8001_ccb_info *ccb; + struct pm8001_device *pm8001_dev; + struct ssp_event_resp *psspPayload = + (struct ssp_event_resp *)(piomb + 4); + u32 event = le32_to_cpu(psspPayload->event); + u32 tag = le32_to_cpu(psspPayload->tag); + u32 port_id = le32_to_cpu(psspPayload->port_id); + + ccb = &pm8001_ha->ccb_info[tag]; + t = ccb->task; + pm8001_dev = ccb->device; + if (event) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("sas IO status 0x%x\n", event)); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + ts = &t->task_status; + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n", + port_id, tag, event)); + switch (event) { + case IO_OVERFLOW: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n");) + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK); + return; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT); + return; + case IO_XFER_ERROR_UNEXPECTED_PHASE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_RDY_OVERRUN: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_INTERNAL_CRC_ERROR: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n")); + /* TBC: used default set values */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_CMD_FRAME_ISSUED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n")); + return; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", event)); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "task 0x%p done with event 0x%x resp 0x%x " + "stat 0x%x but aborted by upper layer!\n", + t, event, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* in order to force CPU ordering */ + t->task_done(t); + } +} + +/*See the comments for mpi_ssp_completion */ +static void +mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct sas_task *t; + struct pm8001_ccb_info *ccb; + u32 param; + u32 status; + u32 tag; + struct sata_completion_resp *psataPayload; + struct task_status_struct *ts; + struct ata_task_resp *resp ; + u32 *sata_resp; + struct pm8001_device *pm8001_dev; + unsigned long flags; + + psataPayload = (struct sata_completion_resp *)(piomb + 4); + status = le32_to_cpu(psataPayload->status); + tag = le32_to_cpu(psataPayload->tag); + + if (!tag) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("tag null\n")); + return; + } + ccb = &pm8001_ha->ccb_info[tag]; + param = le32_to_cpu(psataPayload->param); + if (ccb) { + t = ccb->task; + pm8001_dev = ccb->device; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("ccb null\n")); + return; + } + + if (t) { + if (t->dev && (t->dev->lldd_dev)) + pm8001_dev = t->dev->lldd_dev; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task null\n")); + return; + } + + if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG)) + && unlikely(!t || !t->lldd_task || !t->dev)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task or dev null\n")); + return; + } + + ts = &t->task_status; + if (!ts) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("ts null\n")); + return; + } + + switch (status) { + case IO_SUCCESS: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); + if (param == 0) { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_GOOD; + /* check if response is for SEND READ LOG */ + if (pm8001_dev && + (pm8001_dev->id & NCQ_READ_LOG_FLAG)) { + /* set new bit for abort_all */ + pm8001_dev->id |= NCQ_ABORT_ALL_FLAG; + /* clear bit for read log */ + pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF; + pm80xx_send_abort_all(pm8001_ha, pm8001_dev); + /* Free the tag */ + pm8001_tag_free(pm8001_ha, tag); + sas_free_task(t); + return; + } + } else { + u8 len; + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PROTO_RESPONSE; + ts->residual = param; + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("SAS_PROTO_RESPONSE len = %d\n", + param)); + sata_resp = &psataPayload->sata_resp[0]; + resp = (struct ata_task_resp *)ts->buf; + if (t->ata_task.dma_xfer == 0 && + t->data_dir == PCI_DMA_FROMDEVICE) { + len = sizeof(struct pio_setup_fis); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("PIO read len = %d\n", len)); + } else if (t->ata_task.use_ncq) { + len = sizeof(struct set_dev_bits_fis); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("FPDMA len = %d\n", len)); + } else { + len = sizeof(struct dev_to_host_fis); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("other len = %d\n", len)); + } + if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) { + resp->frame_len = len; + memcpy(&resp->ending_fis[0], sata_resp, len); + ts->buf_valid_size = sizeof(*resp); + } else + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("response to large\n")); + } + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_ABORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ABORTED IOMB Tag\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + /* following cases are to do cases */ + case IO_UNDERFLOW: + /* SATA Completion with error */ + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_UNDERFLOW param = %d\n", param)); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + ts->residual = param; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_NO_DEVICE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_NO_DEVICE\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_INTERRUPTED; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*in order to force CPU ordering*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + return; + } + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + return; + } + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + return; + } + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_DMA: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_DMA\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + break; + case IO_XFER_ERROR_SATA_LINK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_SATA_LINK_TIMEOUT\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_XFER_ERROR_REJECTED_NCQ_MODE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_PORT_IN_RESET: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_PORT_IN_RESET\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_DS_NON_OPERATIONAL: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_NON_OPERATIONAL\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, pm8001_dev, + IO_DS_NON_OPERATIONAL); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + return; + } + break; + case IO_DS_IN_RECOVERY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_IN_RECOVERY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_DS_IN_ERROR: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_IN_ERROR\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, pm8001_dev, + IO_DS_IN_ERROR); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + return; + } + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", status)); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task 0x%p done with io_status 0x%x" + " resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else if (t->uldd_task) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* ditto */ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + } else if (!t->uldd_task) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + } +} + +/*See the comments for mpi_ssp_completion */ +static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) +{ + struct sas_task *t; + struct task_status_struct *ts; + struct pm8001_ccb_info *ccb; + struct pm8001_device *pm8001_dev; + struct sata_event_resp *psataPayload = + (struct sata_event_resp *)(piomb + 4); + u32 event = le32_to_cpu(psataPayload->event); + u32 tag = le32_to_cpu(psataPayload->tag); + u32 port_id = le32_to_cpu(psataPayload->port_id); + u32 dev_id = le32_to_cpu(psataPayload->device_id); + unsigned long flags; + + ccb = &pm8001_ha->ccb_info[tag]; + + if (ccb) { + t = ccb->task; + pm8001_dev = ccb->device; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("No CCB !!!. returning\n")); + return; + } + if (event) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SATA EVENT 0x%x\n", event)); + + /* Check if this is NCQ error */ + if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) { + /* find device using device id */ + pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id); + /* send read log extension */ + if (pm8001_dev) + pm80xx_send_read_log(pm8001_ha, pm8001_dev); + return; + } + + if (unlikely(!t || !t->lldd_task || !t->dev)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task or dev null\n")); + return; + } + + ts = &t->task_status; + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n", + port_id, tag, event)); + switch (event) { + case IO_OVERFLOW: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_INTERRUPTED; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + return; + } + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_PEER_ABORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PEER_ABORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_REJECTED_NCQ_MODE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_UNEXPECTED_PHASE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_RDY_OVERRUN: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_CMD_FRAME_ISSUED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n")); + break; + case IO_XFER_PIO_SETUP_ERROR: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_PIO_SETUP_ERROR\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_INTERNAL_CRC_ERROR: + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n")); + /* TBC: used default set values */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_DMA_ACTIVATE_TIMEOUT: + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("IO_XFR_DMA_ACTIVATE_TIMEOUT\n")); + /* TBC: used default set values */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", event)); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task 0x%p done with io_status 0x%x" + " resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, event, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else if (t->uldd_task) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* ditto */ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + } else if (!t->uldd_task) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + } +} + +/*See the comments for mpi_ssp_completion */ +static void +mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + u32 param, i; + struct sas_task *t; + struct pm8001_ccb_info *ccb; + unsigned long flags; + u32 status; + u32 tag; + struct smp_completion_resp *psmpPayload; + struct task_status_struct *ts; + struct pm8001_device *pm8001_dev; + char *pdma_respaddr = NULL; + + psmpPayload = (struct smp_completion_resp *)(piomb + 4); + status = le32_to_cpu(psmpPayload->status); + tag = le32_to_cpu(psmpPayload->tag); + + ccb = &pm8001_ha->ccb_info[tag]; + param = le32_to_cpu(psmpPayload->param); + t = ccb->task; + ts = &t->task_status; + pm8001_dev = ccb->device; + if (status) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("smp IO status 0x%x\n", status)); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + + switch (status) { + + case IO_SUCCESS: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_GOOD; + if (pm8001_dev) + pm8001_dev->running_req--; + if (pm8001_ha->smp_exp_mode == SMP_DIRECT) { + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("DIRECT RESPONSE Length:%d\n", + param)); + pdma_respaddr = (char *)(phys_to_virt(cpu_to_le64 + ((u64)sg_dma_address + (&t->smp_task.smp_resp)))); + for (i = 0; i < param; i++) { + *(pdma_respaddr+i) = psmpPayload->_r_a[i]; + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "SMP Byte%d DMA data 0x%x psmp 0x%x\n", + i, *(pdma_respaddr+i), + psmpPayload->_r_a[i])); + } + } + break; + case IO_ABORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ABORTED IOMB\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_OVERFLOW: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_NO_DEVICE: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NO_DEVICE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PHY_DOWN; + break; + case IO_ERROR_HW_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ERROR_HW_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_BUSY; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_BUSY; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_BUSY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk(\ + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_RX_FRAME: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_RX_FRAME\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_ERROR_INTERNAL_SMP_RESOURCE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ERROR_INTERNAL_SMP_RESOURCE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_QUEUE_FULL; + break; + case IO_PORT_IN_RESET: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_PORT_IN_RESET\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_DS_NON_OPERATIONAL: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_NON_OPERATIONAL\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_DS_IN_RECOVERY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_IN_RECOVERY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", status)); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + /* not allowed case. Therefore, return failed status */ + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "task 0x%p done with io_status 0x%x resp 0x%x" + "stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* in order to force CPU ordering */ + t->task_done(t); + } +} + +/** + * pm80xx_hw_event_ack_req- For PM8001,some events need to acknowage to FW. + * @pm8001_ha: our hba card information + * @Qnum: the outbound queue message number. + * @SEA: source of event to ack + * @port_id: port id. + * @phyId: phy id. + * @param0: parameter 0. + * @param1: parameter 1. + */ +static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha, + u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1) +{ + struct hw_event_ack_req payload; + u32 opc = OPC_INB_SAS_HW_EVENT_ACK; + + struct inbound_queue_table *circularQ; + + memset((u8 *)&payload, 0, sizeof(payload)); + circularQ = &pm8001_ha->inbnd_q_tbl[Qnum]; + payload.tag = cpu_to_le32(1); + payload.phyid_sea_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) | + ((phyId & 0xFF) << 24) | (port_id & 0xFF)); + payload.param0 = cpu_to_le32(param0); + payload.param1 = cpu_to_le32(param1); + pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); +} + +static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, + u32 phyId, u32 phy_op); + +/** + * hw_event_sas_phy_up -FW tells me a SAS phy up event. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + + u8 link_rate = + (u8)((lr_status_evt_portid & 0xF0000000) >> 28); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); + + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct sas_ha_struct *sas_ha = pm8001_ha->sas; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + unsigned long flags; + u8 deviceType = pPayload->sas_identify.dev_type; + port->port_state = portstate; + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "portid:%d; phyid:%d; linkrate:%d; " + "portstate:%x; devicetype:%x\n", + port_id, phy_id, link_rate, portstate, deviceType)); + + switch (deviceType) { + case SAS_PHY_UNUSED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("device type no device.\n")); + break; + case SAS_END_DEVICE: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n")); + pm80xx_chip_phy_ctl_req(pm8001_ha, phy_id, + PHY_NOTIFY_ENABLE_SPINUP); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + case SAS_EDGE_EXPANDER_DEVICE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("expander device.\n")); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + case SAS_FANOUT_EXPANDER_DEVICE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("fanout expander device.\n")); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + default: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("unknown device type(%x)\n", deviceType)); + break; + } + phy->phy_type |= PORT_TYPE_SAS; + phy->identify.device_type = deviceType; + phy->phy_attached = 1; + if (phy->identify.device_type == SAS_END_DEVICE) + phy->identify.target_port_protocols = SAS_PROTOCOL_SSP; + else if (phy->identify.device_type != SAS_PHY_UNUSED) + phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; + phy->sas_phy.oob_mode = SAS_OOB_MODE; + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE); + spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); + memcpy(phy->frame_rcvd, &pPayload->sas_identify, + sizeof(struct sas_identify_frame)-4); + phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4; + pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); + if (pm8001_ha->flags == PM8001F_RUN_TIME) + mdelay(200);/*delay a moment to wait disk to spinup*/ + pm8001_bytes_dmaed(pm8001_ha, phy_id); +} + +/** + * hw_event_sata_phy_up -FW tells me a SATA phy up event. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u8 link_rate = + (u8)((lr_status_evt_portid & 0xF0000000) >> 28); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + + u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); + + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct sas_ha_struct *sas_ha = pm8001_ha->sas; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + unsigned long flags; + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "port id %d, phy id %d link_rate %d portstate 0x%x\n", + port_id, phy_id, link_rate, portstate)); + + port->port_state = portstate; + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + phy->phy_type |= PORT_TYPE_SATA; + phy->phy_attached = 1; + phy->sas_phy.oob_mode = SATA_OOB_MODE; + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE); + spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); + memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4), + sizeof(struct dev_to_host_fis)); + phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); + phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; + phy->identify.device_type = SAS_SATA_DEV; + pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); + pm8001_bytes_dmaed(pm8001_ha, phy_id); +} + +/** + * hw_event_phy_down -we should notify the libsas the phy is down. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); + + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + port->port_state = portstate; + phy->phy_type = 0; + phy->identify.device_type = 0; + phy->phy_attached = 0; + memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE); + switch (portstate) { + case PORT_VALID: + break; + case PORT_INVALID: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" PortInvalid portID %d\n", port_id)); + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" Last phy Down and port invalid\n")); + port->port_attached = 0; + pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, + port_id, phy_id, 0, 0); + break; + case PORT_IN_RESET: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" Port In Reset portID %d\n", port_id)); + break; + case PORT_NOT_ESTABLISHED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n")); + port->port_attached = 0; + break; + case PORT_LOSTCOMM: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" phy Down and PORT_LOSTCOMM\n")); + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" Last phy Down and port invalid\n")); + port->port_attached = 0; + pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, + port_id, phy_id, 0, 0); + break; + default: + port->port_attached = 0; + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" phy Down and(default) = 0x%x\n", + portstate)); + break; + + } +} + +static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct phy_start_resp *pPayload = + (struct phy_start_resp *)(piomb + 4); + u32 status = + le32_to_cpu(pPayload->status); + u32 phy_id = + le32_to_cpu(pPayload->phyid); + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("phy start resp status:0x%x, phyid:0x%x\n", + status, phy_id)); + if (status == 0) { + phy->phy_state = 1; + if (pm8001_ha->flags == PM8001F_RUN_TIME) + complete(phy->enable_completion); + } + return 0; + +} + +/** + * mpi_thermal_hw_event -The hw event has come. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_thermal_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct thermal_hw_event *pPayload = + (struct thermal_hw_event *)(piomb + 4); + + u32 thermal_event = le32_to_cpu(pPayload->thermal_event); + u32 rht_lht = le32_to_cpu(pPayload->rht_lht); + + if (thermal_event & 0x40) { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Thermal Event: Local high temperature violated!\n")); + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Thermal Event: Measured local high temperature %d\n", + ((rht_lht & 0xFF00) >> 8))); + } + if (thermal_event & 0x10) { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Thermal Event: Remote high temperature violated!\n")); + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Thermal Event: Measured remote high temperature %d\n", + ((rht_lht & 0xFF000000) >> 24))); + } + return 0; +} + +/** + * mpi_hw_event -The hw event has come. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + unsigned long flags; + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + u16 eventType = + (u16)((lr_status_evt_portid & 0x00FFFF00) >> 8); + u8 status = + (u8)((lr_status_evt_portid & 0x0F000000) >> 24); + + struct sas_ha_struct *sas_ha = pm8001_ha->sas; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n", + port_id, phy_id, eventType, status)); + + switch (eventType) { + + case HW_EVENT_SAS_PHY_UP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PHY_START_STATUS\n")); + hw_event_sas_phy_up(pm8001_ha, piomb); + break; + case HW_EVENT_SATA_PHY_UP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_SATA_PHY_UP\n")); + hw_event_sata_phy_up(pm8001_ha, piomb); + break; + case HW_EVENT_SATA_SPINUP_HOLD: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD\n")); + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD); + break; + case HW_EVENT_PHY_DOWN: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PHY_DOWN\n")); + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL); + phy->phy_attached = 0; + phy->phy_state = 0; + hw_event_phy_down(pm8001_ha, piomb); + break; + case HW_EVENT_PORT_INVALID: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_INVALID\n")); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + /* the broadcast change primitive received, tell the LIBSAS this event + to revalidate the sas domain*/ + case HW_EVENT_BROADCAST_CHANGE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_BROADCAST_CHANGE\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE, + port_id, phy_id, 1, 0); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); + break; + case HW_EVENT_PHY_ERROR: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PHY_ERROR\n")); + sas_phy_disconnected(&phy->sas_phy); + phy->phy_attached = 0; + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR); + break; + case HW_EVENT_BROADCAST_EXP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_BROADCAST_EXP\n")); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); + break; + case HW_EVENT_LINK_ERR_INVALID_DWORD: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_LINK_ERR_DISPARITY_ERROR: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_LINK_ERR_DISPARITY_ERROR\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_DISPARITY_ERROR, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_LINK_ERR_CODE_VIOLATION: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_LINK_ERR_CODE_VIOLATION\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_CODE_VIOLATION, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_MALFUNCTION: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_MALFUNCTION\n")); + break; + case HW_EVENT_BROADCAST_SES: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_BROADCAST_SES\n")); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_SES; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); + break; + case HW_EVENT_INBOUND_CRC_ERROR: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_INBOUND_CRC_ERROR\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_INBOUND_CRC_ERROR, + port_id, phy_id, 0, 0); + break; + case HW_EVENT_HARD_RESET_RECEIVED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_HARD_RESET_RECEIVED\n")); + sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET); + break; + case HW_EVENT_ID_FRAME_TIMEOUT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_ID_FRAME_TIMEOUT\n")); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_LINK_ERR_PHY_RESET_FAILED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_PHY_RESET_FAILED, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_PORT_RESET_TIMER_TMO: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO\n")); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_PORT_RECOVERY_TIMER_TMO: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_PORT_RECOVERY_TIMER_TMO, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_PORT_RECOVER: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_RECOVER\n")); + break; + case HW_EVENT_PORT_RESET_COMPLETE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE\n")); + break; + case EVENT_BROADCAST_ASYNCH_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n")); + break; + default: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("Unknown event type 0x%x\n", eventType)); + break; + } + return 0; +} + +/** + * mpi_phy_stop_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct phy_stop_resp *pPayload = + (struct phy_stop_resp *)(piomb + 4); + u32 status = + le32_to_cpu(pPayload->status); + u32 phyid = + le32_to_cpu(pPayload->phyid); + struct pm8001_phy *phy = &pm8001_ha->phy[phyid]; + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("phy:0x%x status:0x%x\n", + phyid, status)); + if (status == 0) + phy->phy_state = 0; + return 0; +} + +/** + * mpi_set_controller_config_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + struct set_ctrl_cfg_resp *pPayload = + (struct set_ctrl_cfg_resp *)(piomb + 4); + u32 status = le32_to_cpu(pPayload->status); + u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd); + + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n", + status, err_qlfr_pgcd)); + + return 0; +} + +/** + * mpi_get_controller_config_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_get_controller_config_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * mpi_get_phy_profile_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_get_phy_profile_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * mpi_flash_op_ext_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * mpi_set_phy_profile_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * mpi_kek_management_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_kek_management_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + struct kek_mgmt_resp *pPayload = (struct kek_mgmt_resp *)(piomb + 4); + + u32 status = le32_to_cpu(pPayload->status); + u32 kidx_new_curr_ksop = le32_to_cpu(pPayload->kidx_new_curr_ksop); + u32 err_qlfr = le32_to_cpu(pPayload->err_qlfr); + + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "KEK MGMT RESP. Status 0x%x idx_ksop 0x%x err_qlfr 0x%x\n", + status, kidx_new_curr_ksop, err_qlfr)); + + return 0; +} + +/** + * mpi_dek_management_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_dek_management_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * ssp_coalesced_comp_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * process_one_iomb - process one outbound Queue memory block + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + __le32 pHeader = *(__le32 *)piomb; + u32 opc = (u32)((le32_to_cpu(pHeader)) & 0xFFF); + + switch (opc) { + case OPC_OUB_ECHO: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO\n")); + break; + case OPC_OUB_HW_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_HW_EVENT\n")); + mpi_hw_event(pm8001_ha, piomb); + break; + case OPC_OUB_THERM_HW_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_THERMAL_EVENT\n")); + mpi_thermal_hw_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_COMP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SSP_COMP\n")); + mpi_ssp_completion(pm8001_ha, piomb); + break; + case OPC_OUB_SMP_COMP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SMP_COMP\n")); + mpi_smp_completion(pm8001_ha, piomb); + break; + case OPC_OUB_LOCAL_PHY_CNTRL: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n")); + pm8001_mpi_local_phy_ctl(pm8001_ha, piomb); + break; + case OPC_OUB_DEV_REGIST: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_DEV_REGIST\n")); + pm8001_mpi_reg_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEREG_DEV: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("unresgister the deviece\n")); + pm8001_mpi_dereg_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_DEV_HANDLE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GET_DEV_HANDLE\n")); + break; + case OPC_OUB_SATA_COMP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SATA_COMP\n")); + mpi_sata_completion(pm8001_ha, piomb); + break; + case OPC_OUB_SATA_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SATA_EVENT\n")); + mpi_sata_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SSP_EVENT\n")); + mpi_ssp_event(pm8001_ha, piomb); + break; + case OPC_OUB_DEV_HANDLE_ARRIV: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_DEV_HANDLE_ARRIV\n")); + /*This is for target*/ + break; + case OPC_OUB_SSP_RECV_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SSP_RECV_EVENT\n")); + /*This is for target*/ + break; + case OPC_OUB_FW_FLASH_UPDATE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n")); + pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GPIO_RESPONSE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GPIO_RESPONSE\n")); + break; + case OPC_OUB_GPIO_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GPIO_EVENT\n")); + break; + case OPC_OUB_GENERAL_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GENERAL_EVENT\n")); + pm8001_mpi_general_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_ABORT_RSP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n")); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SATA_ABORT_RSP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n")); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SAS_DIAG_MODE_START_END: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SAS_DIAG_MODE_START_END\n")); + break; + case OPC_OUB_SAS_DIAG_EXECUTE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SAS_DIAG_EXECUTE\n")); + break; + case OPC_OUB_GET_TIME_STAMP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GET_TIME_STAMP\n")); + break; + case OPC_OUB_SAS_HW_EVENT_ACK: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SAS_HW_EVENT_ACK\n")); + break; + case OPC_OUB_PORT_CONTROL: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_PORT_CONTROL\n")); + break; + case OPC_OUB_SMP_ABORT_RSP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n")); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_NVMD_DATA: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GET_NVMD_DATA\n")); + pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SET_NVMD_DATA: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SET_NVMD_DATA\n")); + pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEVICE_HANDLE_REMOVAL: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_DEVICE_HANDLE_REMOVAL\n")); + break; + case OPC_OUB_SET_DEVICE_STATE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n")); + pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_DEVICE_STATE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GET_DEVICE_STATE\n")); + break; + case OPC_OUB_SET_DEV_INFO: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SET_DEV_INFO\n")); + break; + /* spcv specifc commands */ + case OPC_OUB_PHY_START_RESP: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_PHY_START_RESP opcode:%x\n", opc)); + mpi_phy_start_resp(pm8001_ha, piomb); + break; + case OPC_OUB_PHY_STOP_RESP: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_PHY_STOP_RESP opcode:%x\n", opc)); + mpi_phy_stop_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SET_CONTROLLER_CONFIG: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_SET_CONTROLLER_CONFIG opcode:%x\n", opc)); + mpi_set_controller_config_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_CONTROLLER_CONFIG: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_GET_CONTROLLER_CONFIG opcode:%x\n", opc)); + mpi_get_controller_config_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_PHY_PROFILE: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_GET_PHY_PROFILE opcode:%x\n", opc)); + mpi_get_phy_profile_resp(pm8001_ha, piomb); + break; + case OPC_OUB_FLASH_OP_EXT: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_FLASH_OP_EXT opcode:%x\n", opc)); + mpi_flash_op_ext_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SET_PHY_PROFILE: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_SET_PHY_PROFILE opcode:%x\n", opc)); + mpi_set_phy_profile_resp(pm8001_ha, piomb); + break; + case OPC_OUB_KEK_MANAGEMENT_RESP: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_KEK_MANAGEMENT_RESP opcode:%x\n", opc)); + mpi_kek_management_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEK_MANAGEMENT_RESP: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_DEK_MANAGEMENT_RESP opcode:%x\n", opc)); + mpi_dek_management_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_COALESCED_COMP_RESP: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_SSP_COALESCED_COMP_RESP opcode:%x\n", opc)); + ssp_coalesced_comp_resp(pm8001_ha, piomb); + break; + default: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "Unknown outbound Queue IOMB OPC = 0x%x\n", opc)); + break; + } +} + +static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ + struct outbound_queue_table *circularQ; + void *pMsg1 = NULL; + u8 uninitialized_var(bc); + u32 ret = MPI_IO_STATUS_FAIL; + unsigned long flags; + + spin_lock_irqsave(&pm8001_ha->lock, flags); + circularQ = &pm8001_ha->outbnd_q_tbl[vec]; + do { + ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); + if (MPI_IO_STATUS_SUCCESS == ret) { + /* process the outbound message */ + process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4)); + /* free the message from the outbound circular buffer */ + pm8001_mpi_msg_free_set(pm8001_ha, pMsg1, + circularQ, bc); + } + if (MPI_IO_STATUS_BUSY == ret) { + /* Update the producer index from SPC */ + circularQ->producer_index = + cpu_to_le32(pm8001_read_32(circularQ->pi_virt)); + if (le32_to_cpu(circularQ->producer_index) == + circularQ->consumer_idx) + /* OQ is empty */ + break; + } + } while (1); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return ret; +} + +/* PCI_DMA_... to our direction translation. */ +static const u8 data_dir_flags[] = { + [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */ + [PCI_DMA_TODEVICE] = DATA_DIR_OUT,/* OUTBOUND */ + [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */ + [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */ +}; + +static void build_smp_cmd(u32 deviceID, __le32 hTag, + struct smp_req *psmp_cmd, int mode, int length) +{ + psmp_cmd->tag = hTag; + psmp_cmd->device_id = cpu_to_le32(deviceID); + if (mode == SMP_DIRECT) { + length = length - 4; /* subtract crc */ + psmp_cmd->len_ip_ir = cpu_to_le32(length << 16); + } else { + psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1)); + } +} + +/** + * pm8001_chip_smp_req - send a SMP task to FW + * @pm8001_ha: our hba card information. + * @ccb: the ccb information this request used. + */ +static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + int elem, rc; + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct scatterlist *sg_req, *sg_resp; + u32 req_len, resp_len; + struct smp_req smp_cmd; + u32 opc; + struct inbound_queue_table *circularQ; + char *preq_dma_addr = NULL; + __le64 tmp_addr; + u32 i, length; + + memset(&smp_cmd, 0, sizeof(smp_cmd)); + /* + * DMA-map SMP request, response buffers + */ + sg_req = &task->smp_task.smp_req; + elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE); + if (!elem) + return -ENOMEM; + req_len = sg_dma_len(sg_req); + + sg_resp = &task->smp_task.smp_resp; + elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE); + if (!elem) { + rc = -ENOMEM; + goto err_out; + } + resp_len = sg_dma_len(sg_resp); + /* must be in dwords */ + if ((req_len & 0x3) || (resp_len & 0x3)) { + rc = -EINVAL; + goto err_out_2; + } + + opc = OPC_INB_SMP_REQUEST; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + smp_cmd.tag = cpu_to_le32(ccb->ccb_tag); + + length = sg_req->length; + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("SMP Frame Length %d\n", sg_req->length)); + if (!(length - 8)) + pm8001_ha->smp_exp_mode = SMP_DIRECT; + else + pm8001_ha->smp_exp_mode = SMP_INDIRECT; + + /* DIRECT MODE support only in spcv/ve */ + pm8001_ha->smp_exp_mode = SMP_DIRECT; + + tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req)); + preq_dma_addr = (char *)phys_to_virt(tmp_addr); + + /* INDIRECT MODE command settings. Use DMA */ + if (pm8001_ha->smp_exp_mode == SMP_INDIRECT) { + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("SMP REQUEST INDIRECT MODE\n")); + /* for SPCv indirect mode. Place the top 4 bytes of + * SMP Request header here. */ + for (i = 0; i < 4; i++) + smp_cmd.smp_req16[i] = *(preq_dma_addr + i); + /* exclude top 4 bytes for SMP req header */ + smp_cmd.long_smp_req.long_req_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_req) - 4); + /* exclude 4 bytes for SMP req header and CRC */ + smp_cmd.long_smp_req.long_req_size = + cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8); + smp_cmd.long_smp_req.long_resp_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_resp)); + smp_cmd.long_smp_req.long_resp_size = + cpu_to_le32((u32)sg_dma_len + (&task->smp_task.smp_resp)-4); + } else { /* DIRECT MODE */ + smp_cmd.long_smp_req.long_req_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_req)); + smp_cmd.long_smp_req.long_req_size = + cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4); + smp_cmd.long_smp_req.long_resp_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_resp)); + smp_cmd.long_smp_req.long_resp_size = + cpu_to_le32 + ((u32)sg_dma_len(&task->smp_task.smp_resp)-4); + } + if (pm8001_ha->smp_exp_mode == SMP_DIRECT) { + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("SMP REQUEST DIRECT MODE\n")); + for (i = 0; i < length; i++) + if (i < 16) { + smp_cmd.smp_req16[i] = *(preq_dma_addr+i); + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Byte[%d]:%x (DMA data:%x)\n", + i, smp_cmd.smp_req16[i], + *(preq_dma_addr))); + } else { + smp_cmd.smp_req[i] = *(preq_dma_addr+i); + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Byte[%d]:%x (DMA data:%x)\n", + i, smp_cmd.smp_req[i], + *(preq_dma_addr))); + } + } + + build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, + &smp_cmd, pm8001_ha->smp_exp_mode, length); + pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0); + return 0; + +err_out_2: + dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1, + PCI_DMA_FROMDEVICE); +err_out: + dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1, + PCI_DMA_TODEVICE); + return rc; +} + +static int check_enc_sas_cmd(struct sas_task *task) +{ + if ((task->ssp_task.cdb[0] == READ_10) + || (task->ssp_task.cdb[0] == WRITE_10) + || (task->ssp_task.cdb[0] == WRITE_VERIFY)) + return 1; + else + return 0; +} + +static int check_enc_sat_cmd(struct sas_task *task) +{ + int ret = 0; + switch (task->ata_task.fis.command) { + case ATA_CMD_FPDMA_READ: + case ATA_CMD_READ_EXT: + case ATA_CMD_READ: + case ATA_CMD_FPDMA_WRITE: + case ATA_CMD_WRITE_EXT: + case ATA_CMD_WRITE: + case ATA_CMD_PIO_READ: + case ATA_CMD_PIO_READ_EXT: + case ATA_CMD_PIO_WRITE: + case ATA_CMD_PIO_WRITE_EXT: + ret = 1; + break; + default: + ret = 0; + break; + } + return ret; +} + +/** + * pm80xx_chip_ssp_io_req - send a SSP task to FW + * @pm8001_ha: our hba card information. + * @ccb: the ccb information this request used. + */ +static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct ssp_ini_io_start_req ssp_cmd; + u32 tag = ccb->ccb_tag; + int ret; + u64 phys_addr; + struct inbound_queue_table *circularQ; + static u32 inb; + static u32 outb; + u32 opc = OPC_INB_SSPINIIOSTART; + memset(&ssp_cmd, 0, sizeof(ssp_cmd)); + memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); + /* data address domain added for spcv; set to 0 by host, + * used internally by controller + * 0 for SAS 1.1 and SAS 2.0 compatible TLR + */ + ssp_cmd.dad_dir_m_tlr = + cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0); + ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id); + ssp_cmd.tag = cpu_to_le32(tag); + if (task->ssp_task.enable_first_burst) + ssp_cmd.ssp_iu.efb_prio_attr |= 0x80; + ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3); + ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7); + memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cdb, 16); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + /* Check if encryption is set */ + if (pm8001_ha->chip->encrypt && + !(pm8001_ha->encrypt_info.status) && check_enc_sas_cmd(task)) { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Encryption enabled.Sending Encrypt SAS command 0x%x\n", + task->ssp_task.cdb[0])); + opc = OPC_INB_SSP_INI_DIF_ENC_IO; + /* enable encryption. 0 for SAS 1.1 and SAS 2.0 compatible TLR*/ + ssp_cmd.dad_dir_m_tlr = cpu_to_le32 + ((data_dir_flags[task->data_dir] << 8) | 0x20 | 0x0); + + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, + ccb->n_elem, ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, buf_prd[0]); + ssp_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + ssp_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); + ssp_cmd.enc_esgl = cpu_to_le32(1<<31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + ssp_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(dma_addr)); + ssp_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(dma_addr)); + ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.enc_esgl = 0; + } else if (task->num_scatter == 0) { + ssp_cmd.enc_addr_low = 0; + ssp_cmd.enc_addr_high = 0; + ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.enc_esgl = 0; + } + /* XTS mode. All other fields are 0 */ + ssp_cmd.key_cmode = 0x6 << 4; + /* set tweak values. Should be the start lba */ + ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cdb[2] << 24) | + (task->ssp_task.cdb[3] << 16) | + (task->ssp_task.cdb[4] << 8) | + (task->ssp_task.cdb[5])); + } else { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Sending Normal SAS command 0x%x inb q %x\n", + task->ssp_task.cdb[0], inb)); + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, ccb->n_elem, + ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, buf_prd[0]); + ssp_cmd.addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + ssp_cmd.addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); + ssp_cmd.esgl = cpu_to_le32(1<<31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr)); + ssp_cmd.addr_high = + cpu_to_le32(upper_32_bits(dma_addr)); + ssp_cmd.len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.esgl = 0; + } else if (task->num_scatter == 0) { + ssp_cmd.addr_low = 0; + ssp_cmd.addr_high = 0; + ssp_cmd.len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.esgl = 0; + } + } + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, outb++); + + /* rotate the outb queue */ + outb = outb%PM8001_MAX_SPCV_OUTB_NUM; + + return ret; +} + +static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_ha_dev = dev->lldd_dev; + u32 tag = ccb->ccb_tag; + int ret; + static u32 inb; + static u32 outb; + struct sata_start_req sata_cmd; + u32 hdr_tag, ncg_tag = 0; + u64 phys_addr; + u32 ATAP = 0x0; + u32 dir; + struct inbound_queue_table *circularQ; + unsigned long flags; + u32 opc = OPC_INB_SATA_HOST_OPSTART; + memset(&sata_cmd, 0, sizeof(sata_cmd)); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + if (task->data_dir == PCI_DMA_NONE) { + ATAP = 0x04; /* no data*/ + PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n")); + } else if (likely(!task->ata_task.device_control_reg_update)) { + if (task->ata_task.dma_xfer) { + ATAP = 0x06; /* DMA */ + PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA\n")); + } else { + ATAP = 0x05; /* PIO*/ + PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n")); + } + if (task->ata_task.use_ncq && + dev->sata_dev.command_set != ATAPI_COMMAND_SET) { + ATAP = 0x07; /* FPDMA */ + PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n")); + } + } + if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) { + task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); + ncg_tag = hdr_tag; + } + dir = data_dir_flags[task->data_dir] << 8; + sata_cmd.tag = cpu_to_le32(tag); + sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + sata_cmd.data_len = cpu_to_le32(task->total_xfer_len); + + sata_cmd.sata_fis = task->ata_task.fis; + if (likely(!task->ata_task.device_control_reg_update)) + sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */ + sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */ + + /* Check if encryption is set */ + if (pm8001_ha->chip->encrypt && + !(pm8001_ha->encrypt_info.status) && check_enc_sat_cmd(task)) { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Encryption enabled.Sending Encrypt SATA cmd 0x%x\n", + sata_cmd.sata_fis.command)); + opc = OPC_INB_SATA_DIF_ENC_IO; + + /* set encryption bit */ + sata_cmd.ncqtag_atap_dir_m_dad = + cpu_to_le32(((ncg_tag & 0xff)<<16)| + ((ATAP & 0x3f) << 10) | 0x20 | dir); + /* dad (bit 0-1) is 0 */ + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, + ccb->n_elem, ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, buf_prd[0]); + sata_cmd.enc_addr_low = lower_32_bits(phys_addr); + sata_cmd.enc_addr_high = upper_32_bits(phys_addr); + sata_cmd.enc_esgl = cpu_to_le32(1 << 31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + sata_cmd.enc_addr_low = lower_32_bits(dma_addr); + sata_cmd.enc_addr_high = upper_32_bits(dma_addr); + sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + sata_cmd.enc_esgl = 0; + } else if (task->num_scatter == 0) { + sata_cmd.enc_addr_low = 0; + sata_cmd.enc_addr_high = 0; + sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + sata_cmd.enc_esgl = 0; + } + /* XTS mode. All other fields are 0 */ + sata_cmd.key_index_mode = 0x6 << 4; + /* set tweak values. Should be the start lba */ + sata_cmd.twk_val0 = + cpu_to_le32((sata_cmd.sata_fis.lbal_exp << 24) | + (sata_cmd.sata_fis.lbah << 16) | + (sata_cmd.sata_fis.lbam << 8) | + (sata_cmd.sata_fis.lbal)); + sata_cmd.twk_val1 = + cpu_to_le32((sata_cmd.sata_fis.lbah_exp << 8) | + (sata_cmd.sata_fis.lbam_exp)); + } else { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Sending Normal SATA command 0x%x inb %x\n", + sata_cmd.sata_fis.command, inb)); + /* dad (bit 0-1) is 0 */ + sata_cmd.ncqtag_atap_dir_m_dad = + cpu_to_le32(((ncg_tag & 0xff)<<16) | + ((ATAP & 0x3f) << 10) | dir); + + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, + ccb->n_elem, ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, buf_prd[0]); + sata_cmd.addr_low = lower_32_bits(phys_addr); + sata_cmd.addr_high = upper_32_bits(phys_addr); + sata_cmd.esgl = cpu_to_le32(1 << 31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + sata_cmd.addr_low = lower_32_bits(dma_addr); + sata_cmd.addr_high = upper_32_bits(dma_addr); + sata_cmd.len = cpu_to_le32(task->total_xfer_len); + sata_cmd.esgl = 0; + } else if (task->num_scatter == 0) { + sata_cmd.addr_low = 0; + sata_cmd.addr_high = 0; + sata_cmd.len = cpu_to_le32(task->total_xfer_len); + sata_cmd.esgl = 0; + } + /* scsi cdb */ + sata_cmd.atapi_scsi_cdb[0] = + cpu_to_le32(((task->ata_task.atapi_packet[0]) | + (task->ata_task.atapi_packet[1] << 8) | + (task->ata_task.atapi_packet[2] << 16) | + (task->ata_task.atapi_packet[3] << 24))); + sata_cmd.atapi_scsi_cdb[1] = + cpu_to_le32(((task->ata_task.atapi_packet[4]) | + (task->ata_task.atapi_packet[5] << 8) | + (task->ata_task.atapi_packet[6] << 16) | + (task->ata_task.atapi_packet[7] << 24))); + sata_cmd.atapi_scsi_cdb[2] = + cpu_to_le32(((task->ata_task.atapi_packet[8]) | + (task->ata_task.atapi_packet[9] << 8) | + (task->ata_task.atapi_packet[10] << 16) | + (task->ata_task.atapi_packet[11] << 24))); + sata_cmd.atapi_scsi_cdb[3] = + cpu_to_le32(((task->ata_task.atapi_packet[12]) | + (task->ata_task.atapi_packet[13] << 8) | + (task->ata_task.atapi_packet[14] << 16) | + (task->ata_task.atapi_packet[15] << 24))); + } + + /* Check for read log for failed drive and return */ + if (sata_cmd.sata_fis.command == 0x2f) { + if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) || + (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) || + (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) { + struct task_status_struct *ts; + + pm8001_ha_dev->id &= 0xDFFFFFFF; + ts = &task->task_status; + + spin_lock_irqsave(&task->task_state_lock, flags); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_GOOD; + task->task_state_flags &= ~SAS_TASK_STATE_PENDING; + task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + task->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((task->task_state_flags & + SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task 0x%p resp 0x%x " + " stat 0x%x but aborted by upper layer " + "\n", task, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); + return 0; + } else if (task->uldd_task) { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); + mb();/* ditto */ + spin_unlock_irq(&pm8001_ha->lock); + task->task_done(task); + spin_lock_irq(&pm8001_ha->lock); + return 0; + } else if (!task->uldd_task) { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + task->task_done(task); + spin_lock_irq(&pm8001_ha->lock); + return 0; + } + } + } + + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, + &sata_cmd, outb++); + + /* rotate the outb queue */ + outb = outb%PM8001_MAX_SPCV_OUTB_NUM; + return ret; +} + +/** + * pm80xx_chip_phy_start_req - start phy via PHY_START COMMAND + * @pm8001_ha: our hba card information. + * @num: the inbound queue number + * @phy_id: the phy id which we wanted to start up. + */ +static int +pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) +{ + struct phy_start_req payload; + struct inbound_queue_table *circularQ; + int ret; + u32 tag = 0x01; + u32 opcode = OPC_INB_PHYSTART; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + memset(&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(tag); + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("PHY START REQ for phy_id %d\n", phy_id)); + /* + ** [0:7] PHY Identifier + ** [8:11] link rate 1.5G, 3G, 6G + ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b Auto mode + ** [14] 0b disable spin up hold; 1b enable spin up hold + ** [15] ob no change in current PHY analig setup 1b enable using SPAST + */ + payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | + LINKMODE_AUTO | LINKRATE_15 | + LINKRATE_30 | LINKRATE_60 | phy_id); + /* SSC Disable and SAS Analog ST configuration */ + /** + payload.ase_sh_lm_slr_phyid = + cpu_to_le32(SSC_DISABLE_30 | SAS_ASE | SPINHOLD_DISABLE | + LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 | + phy_id); + Have to add "SAS PHY Analog Setup SPASTI 1 Byte" Based on need + **/ + + payload.sas_identify.dev_type = SAS_END_DEVICE; + payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; + memcpy(payload.sas_identify.sas_addr, + pm8001_ha->sas_addr, SAS_ADDR_SIZE); + payload.sas_identify.phy_id = phy_id; + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); + return ret; +} + +/** + * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND + * @pm8001_ha: our hba card information. + * @num: the inbound queue number + * @phy_id: the phy id which we wanted to start up. + */ +static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, + u8 phy_id) +{ + struct phy_stop_req payload; + struct inbound_queue_table *circularQ; + int ret; + u32 tag = 0x01; + u32 opcode = OPC_INB_PHYSTOP; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + memset(&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(tag); + payload.phy_id = cpu_to_le32(phy_id); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); + return ret; +} + +/** + * see comments on pm8001_mpi_reg_resp. + */ +static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u32 flag) +{ + struct reg_dev_req payload; + u32 opc; + u32 stp_sspsmp_sata = 0x4; + struct inbound_queue_table *circularQ; + u32 linkrate, phy_id; + int rc, tag = 0xdeadbeef; + struct pm8001_ccb_info *ccb; + u8 retryFlag = 0x1; + u16 firstBurstSize = 0; + u16 ITNT = 2000; + struct domain_device *dev = pm8001_dev->sas_device; + struct domain_device *parent_dev = dev->parent; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + memset(&payload, 0, sizeof(payload)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return rc; + ccb = &pm8001_ha->ccb_info[tag]; + ccb->device = pm8001_dev; + ccb->ccb_tag = tag; + payload.tag = cpu_to_le32(tag); + + if (flag == 1) { + stp_sspsmp_sata = 0x02; /*direct attached sata */ + } else { + if (pm8001_dev->dev_type == SAS_SATA_DEV) + stp_sspsmp_sata = 0x00; /* stp*/ + else if (pm8001_dev->dev_type == SAS_END_DEVICE || + pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || + pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) + stp_sspsmp_sata = 0x01; /*ssp or smp*/ + } + if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) + phy_id = parent_dev->ex_dev.ex_phy->phy_id; + else + phy_id = pm8001_dev->attached_phy; + + opc = OPC_INB_REG_DEV; + + linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ? + pm8001_dev->sas_device->linkrate : dev->port->linkrate; + + payload.phyid_portid = + cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0xFF) | + ((phy_id & 0xFF) << 8)); + + payload.dtype_dlr_mcn_ir_retry = cpu_to_le32((retryFlag & 0x01) | + ((linkrate & 0x0F) << 24) | + ((stp_sspsmp_sata & 0x03) << 28)); + payload.firstburstsize_ITNexustimeout = + cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); + + memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr, + SAS_ADDR_SIZE); + + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + + return rc; +} + +/** + * pm80xx_chip_phy_ctl_req - support the local phy operation + * @pm8001_ha: our hba card information. + * @num: the inbound queue number + * @phy_id: the phy id which we wanted to operate + * @phy_op: + */ +static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, + u32 phyId, u32 phy_op) +{ + struct local_phy_ctl_req payload; + struct inbound_queue_table *circularQ; + int ret; + u32 opc = OPC_INB_LOCAL_PHY_CONTROL; + memset(&payload, 0, sizeof(payload)); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + payload.tag = cpu_to_le32(1); + payload.phyop_phyid = + cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF)); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + return ret; +} + +static u32 pm80xx_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha) +{ + u32 value; +#ifdef PM8001_USE_MSIX + return 1; +#endif + value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR); + if (value) + return 1; + return 0; + +} + +/** + * pm8001_chip_isr - PM8001 isr handler. + * @pm8001_ha: our hba card information. + * @irq: irq number. + * @stat: stat. + */ +static irqreturn_t +pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ + pm80xx_chip_interrupt_disable(pm8001_ha, vec); + process_oq(pm8001_ha, vec); + pm80xx_chip_interrupt_enable(pm8001_ha, vec); + return IRQ_HANDLED; +} + +const struct pm8001_dispatch pm8001_80xx_dispatch = { + .name = "pmc80xx", + .chip_init = pm80xx_chip_init, + .chip_soft_rst = pm80xx_chip_soft_rst, + .chip_rst = pm80xx_hw_chip_rst, + .chip_iounmap = pm8001_chip_iounmap, + .isr = pm80xx_chip_isr, + .is_our_interupt = pm80xx_chip_is_our_interupt, + .isr_process_oq = process_oq, + .interrupt_enable = pm80xx_chip_interrupt_enable, + .interrupt_disable = pm80xx_chip_interrupt_disable, + .make_prd = pm8001_chip_make_sg, + .smp_req = pm80xx_chip_smp_req, + .ssp_io_req = pm80xx_chip_ssp_io_req, + .sata_req = pm80xx_chip_sata_req, + .phy_start_req = pm80xx_chip_phy_start_req, + .phy_stop_req = pm80xx_chip_phy_stop_req, + .reg_dev_req = pm80xx_chip_reg_dev_req, + .dereg_dev_req = pm8001_chip_dereg_dev_req, + .phy_ctl_req = pm80xx_chip_phy_ctl_req, + .task_abort = pm8001_chip_abort_task, + .ssp_tm_req = pm8001_chip_ssp_tm_req, + .get_nvmd_req = pm8001_chip_get_nvmd_req, + .set_nvmd_req = pm8001_chip_set_nvmd_req, + .fw_flash_update_req = pm8001_chip_fw_flash_update_req, + .set_dev_state_req = pm8001_chip_set_dev_state_req, +}; diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h new file mode 100644 index 000000000000..2b760ba75d7b --- /dev/null +++ b/drivers/scsi/pm8001/pm80xx_hwi.h @@ -0,0 +1,1523 @@ +/* + * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +#ifndef _PMC8001_REG_H_ +#define _PMC8001_REG_H_ + +#include <linux/types.h> +#include <scsi/libsas.h> + +/* for Request Opcode of IOMB */ +#define OPC_INB_ECHO 1 /* 0x000 */ +#define OPC_INB_PHYSTART 4 /* 0x004 */ +#define OPC_INB_PHYSTOP 5 /* 0x005 */ +#define OPC_INB_SSPINIIOSTART 6 /* 0x006 */ +#define OPC_INB_SSPINITMSTART 7 /* 0x007 */ +/* 0x8 RESV IN SPCv */ +#define OPC_INB_RSVD 8 /* 0x008 */ +#define OPC_INB_DEV_HANDLE_ACCEPT 9 /* 0x009 */ +#define OPC_INB_SSPTGTIOSTART 10 /* 0x00A */ +#define OPC_INB_SSPTGTRSPSTART 11 /* 0x00B */ +/* 0xC, 0xD, 0xE removed in SPCv */ +#define OPC_INB_SSP_ABORT 15 /* 0x00F */ +#define OPC_INB_DEREG_DEV_HANDLE 16 /* 0x010 */ +#define OPC_INB_GET_DEV_HANDLE 17 /* 0x011 */ +#define OPC_INB_SMP_REQUEST 18 /* 0x012 */ +/* 0x13 SMP_RESPONSE is removed in SPCv */ +#define OPC_INB_SMP_ABORT 20 /* 0x014 */ +/* 0x16 RESV IN SPCv */ +#define OPC_INB_RSVD1 22 /* 0x016 */ +#define OPC_INB_SATA_HOST_OPSTART 23 /* 0x017 */ +#define OPC_INB_SATA_ABORT 24 /* 0x018 */ +#define OPC_INB_LOCAL_PHY_CONTROL 25 /* 0x019 */ +/* 0x1A RESV IN SPCv */ +#define OPC_INB_RSVD2 26 /* 0x01A */ +#define OPC_INB_FW_FLASH_UPDATE 32 /* 0x020 */ +#define OPC_INB_GPIO 34 /* 0x022 */ +#define OPC_INB_SAS_DIAG_MODE_START_END 35 /* 0x023 */ +#define OPC_INB_SAS_DIAG_EXECUTE 36 /* 0x024 */ +/* 0x25 RESV IN SPCv */ +#define OPC_INB_RSVD3 37 /* 0x025 */ +#define OPC_INB_GET_TIME_STAMP 38 /* 0x026 */ +#define OPC_INB_PORT_CONTROL 39 /* 0x027 */ +#define OPC_INB_GET_NVMD_DATA 40 /* 0x028 */ +#define OPC_INB_SET_NVMD_DATA 41 /* 0x029 */ +#define OPC_INB_SET_DEVICE_STATE 42 /* 0x02A */ +#define OPC_INB_GET_DEVICE_STATE 43 /* 0x02B */ +#define OPC_INB_SET_DEV_INFO 44 /* 0x02C */ +/* 0x2D RESV IN SPCv */ +#define OPC_INB_RSVD4 45 /* 0x02D */ +#define OPC_INB_SGPIO_REGISTER 46 /* 0x02E */ +#define OPC_INB_PCIE_DIAG_EXEC 47 /* 0x02F */ +#define OPC_INB_SET_CONTROLLER_CONFIG 48 /* 0x030 */ +#define OPC_INB_GET_CONTROLLER_CONFIG 49 /* 0x031 */ +#define OPC_INB_REG_DEV 50 /* 0x032 */ +#define OPC_INB_SAS_HW_EVENT_ACK 51 /* 0x033 */ +#define OPC_INB_GET_DEVICE_INFO 52 /* 0x034 */ +#define OPC_INB_GET_PHY_PROFILE 53 /* 0x035 */ +#define OPC_INB_FLASH_OP_EXT 54 /* 0x036 */ +#define OPC_INB_SET_PHY_PROFILE 55 /* 0x037 */ +#define OPC_INB_KEK_MANAGEMENT 256 /* 0x100 */ +#define OPC_INB_DEK_MANAGEMENT 257 /* 0x101 */ +#define OPC_INB_SSP_INI_DIF_ENC_IO 258 /* 0x102 */ +#define OPC_INB_SATA_DIF_ENC_IO 259 /* 0x103 */ + +/* for Response Opcode of IOMB */ +#define OPC_OUB_ECHO 1 /* 0x001 */ +#define OPC_OUB_RSVD 4 /* 0x004 */ +#define OPC_OUB_SSP_COMP 5 /* 0x005 */ +#define OPC_OUB_SMP_COMP 6 /* 0x006 */ +#define OPC_OUB_LOCAL_PHY_CNTRL 7 /* 0x007 */ +#define OPC_OUB_RSVD1 10 /* 0x00A */ +#define OPC_OUB_DEREG_DEV 11 /* 0x00B */ +#define OPC_OUB_GET_DEV_HANDLE 12 /* 0x00C */ +#define OPC_OUB_SATA_COMP 13 /* 0x00D */ +#define OPC_OUB_SATA_EVENT 14 /* 0x00E */ +#define OPC_OUB_SSP_EVENT 15 /* 0x00F */ +#define OPC_OUB_RSVD2 16 /* 0x010 */ +/* 0x11 - SMP_RECEIVED Notification removed in SPCv*/ +#define OPC_OUB_SSP_RECV_EVENT 18 /* 0x012 */ +#define OPC_OUB_RSVD3 19 /* 0x013 */ +#define OPC_OUB_FW_FLASH_UPDATE 20 /* 0x014 */ +#define OPC_OUB_GPIO_RESPONSE 22 /* 0x016 */ +#define OPC_OUB_GPIO_EVENT 23 /* 0x017 */ +#define OPC_OUB_GENERAL_EVENT 24 /* 0x018 */ +#define OPC_OUB_SSP_ABORT_RSP 26 /* 0x01A */ +#define OPC_OUB_SATA_ABORT_RSP 27 /* 0x01B */ +#define OPC_OUB_SAS_DIAG_MODE_START_END 28 /* 0x01C */ +#define OPC_OUB_SAS_DIAG_EXECUTE 29 /* 0x01D */ +#define OPC_OUB_GET_TIME_STAMP 30 /* 0x01E */ +#define OPC_OUB_RSVD4 31 /* 0x01F */ +#define OPC_OUB_PORT_CONTROL 32 /* 0x020 */ +#define OPC_OUB_SKIP_ENTRY 33 /* 0x021 */ +#define OPC_OUB_SMP_ABORT_RSP 34 /* 0x022 */ +#define OPC_OUB_GET_NVMD_DATA 35 /* 0x023 */ +#define OPC_OUB_SET_NVMD_DATA 36 /* 0x024 */ +#define OPC_OUB_DEVICE_HANDLE_REMOVAL 37 /* 0x025 */ +#define OPC_OUB_SET_DEVICE_STATE 38 /* 0x026 */ +#define OPC_OUB_GET_DEVICE_STATE 39 /* 0x027 */ +#define OPC_OUB_SET_DEV_INFO 40 /* 0x028 */ +#define OPC_OUB_RSVD5 41 /* 0x029 */ +#define OPC_OUB_HW_EVENT 1792 /* 0x700 */ +#define OPC_OUB_DEV_HANDLE_ARRIV 1824 /* 0x720 */ +#define OPC_OUB_THERM_HW_EVENT 1840 /* 0x730 */ +#define OPC_OUB_SGPIO_RESP 2094 /* 0x82E */ +#define OPC_OUB_PCIE_DIAG_EXECUTE 2095 /* 0x82F */ +#define OPC_OUB_DEV_REGIST 2098 /* 0x832 */ +#define OPC_OUB_SAS_HW_EVENT_ACK 2099 /* 0x833 */ +#define OPC_OUB_GET_DEVICE_INFO 2100 /* 0x834 */ +/* spcv specific commands */ +#define OPC_OUB_PHY_START_RESP 2052 /* 0x804 */ +#define OPC_OUB_PHY_STOP_RESP 2053 /* 0x805 */ +#define OPC_OUB_SET_CONTROLLER_CONFIG 2096 /* 0x830 */ +#define OPC_OUB_GET_CONTROLLER_CONFIG 2097 /* 0x831 */ +#define OPC_OUB_GET_PHY_PROFILE 2101 /* 0x835 */ +#define OPC_OUB_FLASH_OP_EXT 2102 /* 0x836 */ +#define OPC_OUB_SET_PHY_PROFILE 2103 /* 0x837 */ +#define OPC_OUB_KEK_MANAGEMENT_RESP 2304 /* 0x900 */ +#define OPC_OUB_DEK_MANAGEMENT_RESP 2305 /* 0x901 */ +#define OPC_OUB_SSP_COALESCED_COMP_RESP 2306 /* 0x902 */ + +/* for phy start*/ +#define SSC_DISABLE_15 (0x01 << 16) +#define SSC_DISABLE_30 (0x02 << 16) +#define SSC_DISABLE_60 (0x04 << 16) +#define SAS_ASE (0x01 << 15) +#define SPINHOLD_DISABLE (0x00 << 14) +#define SPINHOLD_ENABLE (0x01 << 14) +#define LINKMODE_SAS (0x01 << 12) +#define LINKMODE_DSATA (0x02 << 12) +#define LINKMODE_AUTO (0x03 << 12) +#define LINKRATE_15 (0x01 << 8) +#define LINKRATE_30 (0x02 << 8) +#define LINKRATE_60 (0x06 << 8) + +/* Thermal related */ +#define THERMAL_ENABLE 0x1 +#define THERMAL_LOG_ENABLE 0x1 +#define THERMAL_OP_CODE 0x6 +#define LTEMPHIL 70 +#define RTEMPHIL 100 + +/* Encryption info */ +#define SCRATCH_PAD3_ENC_DISABLED 0x00000000 +#define SCRATCH_PAD3_ENC_DIS_ERR 0x00000001 +#define SCRATCH_PAD3_ENC_ENA_ERR 0x00000002 +#define SCRATCH_PAD3_ENC_READY 0x00000003 +#define SCRATCH_PAD3_ENC_MASK SCRATCH_PAD3_ENC_READY + +#define SCRATCH_PAD3_XTS_ENABLED (1 << 14) +#define SCRATCH_PAD3_SMA_ENABLED (1 << 4) +#define SCRATCH_PAD3_SMB_ENABLED (1 << 5) +#define SCRATCH_PAD3_SMF_ENABLED 0 +#define SCRATCH_PAD3_SM_MASK 0x000000F0 +#define SCRATCH_PAD3_ERR_CODE 0x00FF0000 + +#define SEC_MODE_SMF 0x0 +#define SEC_MODE_SMA 0x100 +#define SEC_MODE_SMB 0x200 +#define CIPHER_MODE_ECB 0x00000001 +#define CIPHER_MODE_XTS 0x00000002 +#define KEK_MGMT_SUBOP_KEYCARDUPDATE 0x4 + +/* SAS protocol timer configuration page */ +#define SAS_PROTOCOL_TIMER_CONFIG_PAGE 0x04 +#define STP_MCT_TMO 32 +#define SSP_MCT_TMO 32 +#define SAS_MAX_OPEN_TIME 5 +#define SMP_MAX_CONN_TIMER 0xFF +#define STP_FRM_TIMER 0 +#define STP_IDLE_TIME 5 /* 5 us; controller default */ +#define SAS_MFD 0 +#define SAS_OPNRJT_RTRY_INTVL 2 +#define SAS_DOPNRJT_RTRY_TMO 128 +#define SAS_COPNRJT_RTRY_TMO 128 + +/* + Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second. + Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128 + is DOPNRJT_RTRY_TMO +*/ +#define SAS_DOPNRJT_RTRY_THR 23438 +#define SAS_COPNRJT_RTRY_THR 23438 +#define SAS_MAX_AIP 0x200000 +#define IT_NEXUS_TIMEOUT 0x7D0 +#define PORT_RECOVERY_TIMEOUT ((IT_NEXUS_TIMEOUT/100) + 30) + +struct mpi_msg_hdr { + __le32 header; /* Bits [11:0] - Message operation code */ + /* Bits [15:12] - Message Category */ + /* Bits [21:16] - Outboundqueue ID for the + operation completion message */ + /* Bits [23:22] - Reserved */ + /* Bits [28:24] - Buffer Count, indicates how + many buffer are allocated for the massage */ + /* Bits [30:29] - Reserved */ + /* Bits [31] - Message Valid bit */ +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY Start Command + * use to describe enable the phy (128 bytes) + */ +struct phy_start_req { + __le32 tag; + __le32 ase_sh_lm_slr_phyid; + struct sas_identify_frame sas_identify; /* 28 Bytes */ + __le32 spasti; + u32 reserved[21]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY Start Command + * use to disable the phy (128 bytes) + */ +struct phy_stop_req { + __le32 tag; + __le32 phy_id; + u32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/* set device bits fis - device to host */ +struct set_dev_bits_fis { + u8 fis_type; /* 0xA1*/ + u8 n_i_pmport; + /* b7 : n Bit. Notification bit. If set device needs attention. */ + /* b6 : i Bit. Interrupt Bit */ + /* b5-b4: reserved2 */ + /* b3-b0: PM Port */ + u8 status; + u8 error; + u32 _r_a; +} __attribute__ ((packed)); +/* PIO setup FIS - device to host */ +struct pio_setup_fis { + u8 fis_type; /* 0x5f */ + u8 i_d_pmPort; + /* b7 : reserved */ + /* b6 : i bit. Interrupt bit */ + /* b5 : d bit. data transfer direction. set to 1 for device to host + xfer */ + /* b4 : reserved */ + /* b3-b0: PM Port */ + u8 status; + u8 error; + u8 lbal; + u8 lbam; + u8 lbah; + u8 device; + u8 lbal_exp; + u8 lbam_exp; + u8 lbah_exp; + u8 _r_a; + u8 sector_count; + u8 sector_count_exp; + u8 _r_b; + u8 e_status; + u8 _r_c[2]; + u8 transfer_count; +} __attribute__ ((packed)); + +/* + * brief the data structure of SATA Completion Response + * use to describe the sata task response (64 bytes) + */ +struct sata_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + u32 sata_resp[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SAS HW Event Notification + * use to alert the host about the hardware event(64 bytes) + */ +/* updated outbound struct for spcv */ + +struct hw_event_resp { + __le32 lr_status_evt_portid; + __le32 evt_param; + __le32 phyid_npip_portstate; + struct sas_identify_frame sas_identify; + struct dev_to_host_fis sata_fis; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure for thermal event notification + */ + +struct thermal_hw_event { + __le32 thermal_event; + __le32 rht_lht; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of REGISTER DEVICE Command + * use to describe MPI REGISTER DEVICE Command (64 bytes) + */ + +struct reg_dev_req { + __le32 tag; + __le32 phyid_portid; + __le32 dtype_dlr_mcn_ir_retry; + __le32 firstburstsize_ITNexustimeout; + u8 sas_addr[SAS_ADDR_SIZE]; + __le32 upper_device_id; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of DEREGISTER DEVICE Command + * use to request spc to remove all internal resources associated + * with the device id (64 bytes) + */ + +struct dereg_dev_req { + __le32 tag; + __le32 device_id; + u32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of DEVICE_REGISTRATION Response + * use to notify the completion of the device registration (64 bytes) + */ +struct dev_reg_resp { + __le32 tag; + __le32 status; + __le32 device_id; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of Local PHY Control Command + * use to issue PHY CONTROL to local phy (64 bytes) + */ +struct local_phy_ctl_req { + __le32 tag; + __le32 phyop_phyid; + u32 reserved1[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Local Phy Control Response + * use to describe MPI Local Phy Control Response (64 bytes) + */ + struct local_phy_ctl_resp { + __le32 tag; + __le32 phyop_phyid; + __le32 status; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +#define OP_BITS 0x0000FF00 +#define ID_BITS 0x000000FF + +/* + * brief the data structure of PORT Control Command + * use to control port properties (64 bytes) + */ + +struct port_ctl_req { + __le32 tag; + __le32 portop_portid; + __le32 param0; + __le32 param1; + u32 reserved1[27]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of HW Event Ack Command + * use to acknowledge receive HW event (64 bytes) + */ +struct hw_event_ack_req { + __le32 tag; + __le32 phyid_sea_portid; + __le32 param0; + __le32 param1; + u32 reserved1[27]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY_START Response Command + * indicates the completion of PHY_START command (64 bytes) + */ +struct phy_start_resp { + __le32 tag; + __le32 status; + __le32 phyid; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY_STOP Response Command + * indicates the completion of PHY_STOP command (64 bytes) + */ +struct phy_stop_resp { + __le32 tag; + __le32 status; + __le32 phyid; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SSP Completion Response + * use to indicate a SSP Completion (n bytes) + */ +struct ssp_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + __le32 ssptag_rescv_rescpad; + struct ssp_response_iu ssp_resp_iu; + __le32 residual_count; +} __attribute__((packed, aligned(4))); + +#define SSP_RESCV_BIT 0x00010000 + +/* + * brief the data structure of SATA EVNET response + * use to indicate a SATA Completion (64 bytes) + */ +struct sata_event_resp { + __le32 tag; + __le32 event; + __le32 port_id; + __le32 device_id; + u32 reserved; + __le32 event_param0; + __le32 event_param1; + __le32 sata_addr_h32; + __le32 sata_addr_l32; + __le32 e_udt1_udt0_crc; + __le32 e_udt5_udt4_udt3_udt2; + __le32 a_udt1_udt0_crc; + __le32 a_udt5_udt4_udt3_udt2; + __le32 hwdevid_diferr; + __le32 err_framelen_byteoffset; + __le32 err_dataframe; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SSP EVNET esponse + * use to indicate a SSP Completion (64 bytes) + */ +struct ssp_event_resp { + __le32 tag; + __le32 event; + __le32 port_id; + __le32 device_id; + __le32 ssp_tag; + __le32 event_param0; + __le32 event_param1; + __le32 sas_addr_h32; + __le32 sas_addr_l32; + __le32 e_udt1_udt0_crc; + __le32 e_udt5_udt4_udt3_udt2; + __le32 a_udt1_udt0_crc; + __le32 a_udt5_udt4_udt3_udt2; + __le32 hwdevid_diferr; + __le32 err_framelen_byteoffset; + __le32 err_dataframe; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of General Event Notification Response + * use to describe MPI General Event Notification Response (64 bytes) + */ +struct general_event_resp { + __le32 status; + __le32 inb_IOMB_payload[14]; +} __attribute__((packed, aligned(4))); + +#define GENERAL_EVENT_PAYLOAD 14 +#define OPCODE_BITS 0x00000fff + +/* + * brief the data structure of SMP Request Command + * use to describe MPI SMP REQUEST Command (64 bytes) + */ +struct smp_req { + __le32 tag; + __le32 device_id; + __le32 len_ip_ir; + /* Bits [0] - Indirect response */ + /* Bits [1] - Indirect Payload */ + /* Bits [15:2] - Reserved */ + /* Bits [23:16] - direct payload Len */ + /* Bits [31:24] - Reserved */ + u8 smp_req16[16]; + union { + u8 smp_req[32]; + struct { + __le64 long_req_addr;/* sg dma address, LE */ + __le32 long_req_size;/* LE */ + u32 _r_a; + __le64 long_resp_addr;/* sg dma address, LE */ + __le32 long_resp_size;/* LE */ + u32 _r_b; + } long_smp_req;/* sequencer extension */ + }; + __le32 rsvd[16]; +} __attribute__((packed, aligned(4))); +/* + * brief the data structure of SMP Completion Response + * use to describe MPI SMP Completion Response (64 bytes) + */ +struct smp_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + u8 _r_a[252]; +} __attribute__((packed, aligned(4))); + +/* + *brief the data structure of SSP SMP SATA Abort Command + * use to describe MPI SSP SMP & SATA Abort Command (64 bytes) + */ +struct task_abort_req { + __le32 tag; + __le32 device_id; + __le32 tag_to_abort; + __le32 abort_all; + u32 reserved[27]; +} __attribute__((packed, aligned(4))); + +/* These flags used for SSP SMP & SATA Abort */ +#define ABORT_MASK 0x3 +#define ABORT_SINGLE 0x0 +#define ABORT_ALL 0x1 + +/** + * brief the data structure of SSP SATA SMP Abort Response + * use to describe SSP SMP & SATA Abort Response ( 64 bytes) + */ +struct task_abort_resp { + __le32 tag; + __le32 status; + __le32 scp; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Start/End Command + * use to describe MPI SAS Diagnostic Start/End Command (64 bytes) + */ +struct sas_diag_start_end_req { + __le32 tag; + __le32 operation_phyid; + u32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Execute Command + * use to describe MPI SAS Diagnostic Execute Command (64 bytes) + */ +struct sas_diag_execute_req { + __le32 tag; + __le32 cmdtype_cmddesc_phyid; + __le32 pat1_pat2; + __le32 threshold; + __le32 codepat_errmsk; + __le32 pmon; + __le32 pERF1CTL; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +#define SAS_DIAG_PARAM_BYTES 24 + +/* + * brief the data structure of Set Device State Command + * use to describe MPI Set Device State Command (64 bytes) + */ +struct set_dev_state_req { + __le32 tag; + __le32 device_id; + __le32 nds; + u32 reserved[28]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SATA Start Command + * use to describe MPI SATA IO Start Command (64 bytes) + * Note: This structure is common for normal / encryption I/O + */ + +struct sata_start_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 ncqtag_atap_dir_m_dad; + struct host_to_dev_fis sata_fis; + u32 reserved1; + u32 reserved2; /* dword 11. rsvd for normal I/O. */ + /* EPLE Descl for enc I/O */ + u32 addr_low; /* dword 12. rsvd for enc I/O */ + u32 addr_high; /* dword 13. reserved for enc I/O */ + __le32 len; /* dword 14: length for normal I/O. */ + /* EPLE Desch for enc I/O */ + __le32 esgl; /* dword 15. rsvd for enc I/O */ + __le32 atapi_scsi_cdb[4]; /* dword 16-19. rsvd for enc I/O */ + /* The below fields are reserved for normal I/O */ + __le32 key_index_mode; /* dword 20 */ + __le32 sector_cnt_enss;/* dword 21 */ + __le32 keytagl; /* dword 22 */ + __le32 keytagh; /* dword 23 */ + __le32 twk_val0; /* dword 24 */ + __le32 twk_val1; /* dword 25 */ + __le32 twk_val2; /* dword 26 */ + __le32 twk_val3; /* dword 27 */ + __le32 enc_addr_low; /* dword 28. Encryption SGL address high */ + __le32 enc_addr_high; /* dword 29. Encryption SGL address low */ + __le32 enc_len; /* dword 30. Encryption length */ + __le32 enc_esgl; /* dword 31. Encryption esgl bit */ +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SSP INI TM Start Command + * use to describe MPI SSP INI TM Start Command (64 bytes) + */ +struct ssp_ini_tm_start_req { + __le32 tag; + __le32 device_id; + __le32 relate_tag; + __le32 tmf; + u8 lun[8]; + __le32 ds_ads_m; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +struct ssp_info_unit { + u8 lun[8];/* SCSI Logical Unit Number */ + u8 reserved1;/* reserved */ + u8 efb_prio_attr; + /* B7 : enabledFirstBurst */ + /* B6-3 : taskPriority */ + /* B2-0 : taskAttribute */ + u8 reserved2; /* reserved */ + u8 additional_cdb_len; + /* B7-2 : additional_cdb_len */ + /* B1-0 : reserved */ + u8 cdb[16];/* The SCSI CDB up to 16 bytes length */ +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SSP INI IO Start Command + * use to describe MPI SSP INI IO Start Command (64 bytes) + * Note: This structure is common for normal / encryption I/O + */ +struct ssp_ini_io_start_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 dad_dir_m_tlr; + struct ssp_info_unit ssp_iu; + __le32 addr_low; /* dword 12: sgl low for normal I/O. */ + /* epl_descl for encryption I/O */ + __le32 addr_high; /* dword 13: sgl hi for normal I/O */ + /* dpl_descl for encryption I/O */ + __le32 len; /* dword 14: len for normal I/O. */ + /* edpl_desch for encryption I/O */ + __le32 esgl; /* dword 15: ESGL bit for normal I/O. */ + /* user defined tag mask for enc I/O */ + /* The below fields are reserved for normal I/O */ + u8 udt[12]; /* dword 16-18 */ + __le32 sectcnt_ios; /* dword 19 */ + __le32 key_cmode; /* dword 20 */ + __le32 ks_enss; /* dword 21 */ + __le32 keytagl; /* dword 22 */ + __le32 keytagh; /* dword 23 */ + __le32 twk_val0; /* dword 24 */ + __le32 twk_val1; /* dword 25 */ + __le32 twk_val2; /* dword 26 */ + __le32 twk_val3; /* dword 27 */ + __le32 enc_addr_low; /* dword 28: Encryption sgl addr low */ + __le32 enc_addr_high; /* dword 29: Encryption sgl addr hi */ + __le32 enc_len; /* dword 30: Encryption length */ + __le32 enc_esgl; /* dword 31: ESGL bit for encryption */ +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for SSP_INI_DIF_ENC_IO COMMAND + * use to initiate SSP I/O operation with optional DIF/ENC + */ +struct ssp_dif_enc_io_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 dirMTlr; + __le32 sspiu0; + __le32 sspiu1; + __le32 sspiu2; + __le32 sspiu3; + __le32 sspiu4; + __le32 sspiu5; + __le32 sspiu6; + __le32 epl_des; + __le32 dpl_desl_ndplr; + __le32 dpl_desh; + __le32 uum_uuv_bss_difbits; + u8 udt[12]; + __le32 sectcnt_ios; + __le32 key_cmode; + __le32 ks_enss; + __le32 keytagl; + __le32 keytagh; + __le32 twk_val0; + __le32 twk_val1; + __le32 twk_val2; + __le32 twk_val3; + __le32 addr_low; + __le32 addr_high; + __le32 len; + __le32 esgl; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Firmware download + * use to describe MPI FW DOWNLOAD Command (64 bytes) + */ +struct fw_flash_Update_req { + __le32 tag; + __le32 cur_image_offset; + __le32 cur_image_len; + __le32 total_image_len; + u32 reserved0[7]; + __le32 sgl_addr_lo; + __le32 sgl_addr_hi; + __le32 len; + __le32 ext_reserved; + u32 reserved1[16]; +} __attribute__((packed, aligned(4))); + +#define FWFLASH_IOMB_RESERVED_LEN 0x07 +/** + * brief the data structure of FW_FLASH_UPDATE Response + * use to describe MPI FW_FLASH_UPDATE Response (64 bytes) + * + */ + struct fw_flash_Update_resp { + __le32 tag; + __le32 status; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Get NVM Data Command + * use to get data from NVM in HBA(64 bytes) + */ +struct get_nvm_data_req { + __le32 tag; + __le32 len_ir_vpdd; + __le32 vpd_offset; + u32 reserved[8]; + __le32 resp_addr_lo; + __le32 resp_addr_hi; + __le32 resp_len; + u32 reserved1[17]; +} __attribute__((packed, aligned(4))); + +struct set_nvm_data_req { + __le32 tag; + __le32 len_ir_vpdd; + __le32 vpd_offset; + u32 reserved[8]; + __le32 resp_addr_lo; + __le32 resp_addr_hi; + __le32 resp_len; + u32 reserved1[17]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for SET CONTROLLER CONFIG COMMAND + * use to modify controller configuration + */ +struct set_ctrl_cfg_req { + __le32 tag; + __le32 cfg_pg[14]; + u32 reserved[16]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for GET CONTROLLER CONFIG COMMAND + * use to get controller configuration page + */ +struct get_ctrl_cfg_req { + __le32 tag; + __le32 pgcd; + __le32 int_vec; + u32 reserved[28]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for KEK_MANAGEMENT COMMAND + * use for KEK management + */ +struct kek_mgmt_req { + __le32 tag; + __le32 new_curidx_ksop; + u32 reserved; + __le32 kblob[12]; + u32 reserved1[16]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for DEK_MANAGEMENT COMMAND + * use for DEK management + */ +struct dek_mgmt_req { + __le32 tag; + __le32 kidx_dsop; + __le32 dekidx; + __le32 addr_l; + __le32 addr_h; + __le32 nent; + __le32 dbf_tblsize; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for SET PHY PROFILE COMMAND + * use to retrive phy specific information + */ +struct set_phy_profile_req { + __le32 tag; + __le32 ppc_phyid; + u32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for GET PHY PROFILE COMMAND + * use to retrive phy specific information + */ +struct get_phy_profile_req { + __le32 tag; + __le32 ppc_phyid; + __le32 profile[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for EXT FLASH PARTITION + * use to manage ext flash partition + */ +struct ext_flash_partition_req { + __le32 tag; + __le32 cmd; + __le32 offset; + __le32 len; + u32 reserved[7]; + __le32 addr_low; + __le32 addr_high; + __le32 len1; + __le32 ext; + u32 reserved1[16]; +} __attribute__((packed, aligned(4))); + +#define TWI_DEVICE 0x0 +#define C_SEEPROM 0x1 +#define VPD_FLASH 0x4 +#define AAP1_RDUMP 0x5 +#define IOP_RDUMP 0x6 +#define EXPAN_ROM 0x7 + +#define IPMode 0x80000000 +#define NVMD_TYPE 0x0000000F +#define NVMD_STAT 0x0000FFFF +#define NVMD_LEN 0xFF000000 +/** + * brief the data structure of Get NVMD Data Response + * use to describe MPI Get NVMD Data Response (64 bytes) + */ +struct get_nvm_data_resp { + __le32 tag; + __le32 ir_tda_bn_dps_das_nvm; + __le32 dlen_status; + __le32 nvm_data[12]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Start/End Response + * use to describe MPI SAS Diagnostic Start/End Response (64 bytes) + * + */ +struct sas_diag_start_end_resp { + __le32 tag; + __le32 status; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Execute Response + * use to describe MPI SAS Diagnostic Execute Response (64 bytes) + * + */ +struct sas_diag_execute_resp { + __le32 tag; + __le32 cmdtype_cmddesc_phyid; + __le32 Status; + __le32 ReportData; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Set Device State Response + * use to describe MPI Set Device State Response (64 bytes) + * + */ +struct set_dev_state_resp { + __le32 tag; + __le32 status; + __le32 device_id; + __le32 pds_nds; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +/* new outbound structure for spcv - begins */ +/** + * brief the data structure for SET CONTROLLER CONFIG COMMAND + * use to modify controller configuration + */ +struct set_ctrl_cfg_resp { + __le32 tag; + __le32 status; + __le32 err_qlfr_pgcd; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +struct get_ctrl_cfg_resp { + __le32 tag; + __le32 status; + __le32 err_qlfr; + __le32 confg_page[12]; +} __attribute__((packed, aligned(4))); + +struct kek_mgmt_resp { + __le32 tag; + __le32 status; + __le32 kidx_new_curr_ksop; + __le32 err_qlfr; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +struct dek_mgmt_resp { + __le32 tag; + __le32 status; + __le32 kekidx_tbls_dsop; + __le32 dekidx; + __le32 err_qlfr; + u32 reserved[10]; +} __attribute__((packed, aligned(4))); + +struct get_phy_profile_resp { + __le32 tag; + __le32 status; + __le32 ppc_phyid; + __le32 ppc_specific_rsp[12]; +} __attribute__((packed, aligned(4))); + +struct flash_op_ext_resp { + __le32 tag; + __le32 cmd; + __le32 status; + __le32 epart_size; + __le32 epart_sect_size; + u32 reserved[10]; +} __attribute__((packed, aligned(4))); + +struct set_phy_profile_resp { + __le32 tag; + __le32 status; + __le32 ppc_phyid; + __le32 ppc_specific_rsp[12]; +} __attribute__((packed, aligned(4))); + +struct ssp_coalesced_comp_resp { + __le32 coal_cnt; + __le32 tag0; + __le32 ssp_tag0; + __le32 tag1; + __le32 ssp_tag1; + __le32 add_tag_ssp_tag[10]; +} __attribute__((packed, aligned(4))); + +/* new outbound structure for spcv - ends */ + +/* brief data structure for SAS protocol timer configuration page. + * + */ +struct SASProtocolTimerConfig { + __le32 pageCode; /* 0 */ + __le32 MST_MSI; /* 1 */ + __le32 STP_SSP_MCT_TMO; /* 2 */ + __le32 STP_FRM_TMO; /* 3 */ + __le32 STP_IDLE_TMO; /* 4 */ + __le32 OPNRJT_RTRY_INTVL; /* 5 */ + __le32 Data_Cmd_OPNRJT_RTRY_TMO; /* 6 */ + __le32 Data_Cmd_OPNRJT_RTRY_THR; /* 7 */ + __le32 MAX_AIP; /* 8 */ +} __attribute__((packed, aligned(4))); + +typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t; + +#define NDS_BITS 0x0F +#define PDS_BITS 0xF0 + +/* + * HW Events type + */ + +#define HW_EVENT_RESET_START 0x01 +#define HW_EVENT_CHIP_RESET_COMPLETE 0x02 +#define HW_EVENT_PHY_STOP_STATUS 0x03 +#define HW_EVENT_SAS_PHY_UP 0x04 +#define HW_EVENT_SATA_PHY_UP 0x05 +#define HW_EVENT_SATA_SPINUP_HOLD 0x06 +#define HW_EVENT_PHY_DOWN 0x07 +#define HW_EVENT_PORT_INVALID 0x08 +#define HW_EVENT_BROADCAST_CHANGE 0x09 +#define HW_EVENT_PHY_ERROR 0x0A +#define HW_EVENT_BROADCAST_SES 0x0B +#define HW_EVENT_INBOUND_CRC_ERROR 0x0C +#define HW_EVENT_HARD_RESET_RECEIVED 0x0D +#define HW_EVENT_MALFUNCTION 0x0E +#define HW_EVENT_ID_FRAME_TIMEOUT 0x0F +#define HW_EVENT_BROADCAST_EXP 0x10 +#define HW_EVENT_PHY_START_STATUS 0x11 +#define HW_EVENT_LINK_ERR_INVALID_DWORD 0x12 +#define HW_EVENT_LINK_ERR_DISPARITY_ERROR 0x13 +#define HW_EVENT_LINK_ERR_CODE_VIOLATION 0x14 +#define HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH 0x15 +#define HW_EVENT_LINK_ERR_PHY_RESET_FAILED 0x16 +#define HW_EVENT_PORT_RECOVERY_TIMER_TMO 0x17 +#define HW_EVENT_PORT_RECOVER 0x18 +#define HW_EVENT_PORT_RESET_TIMER_TMO 0x19 +#define HW_EVENT_PORT_RESET_COMPLETE 0x20 +#define EVENT_BROADCAST_ASYNCH_EVENT 0x21 + +/* port state */ +#define PORT_NOT_ESTABLISHED 0x00 +#define PORT_VALID 0x01 +#define PORT_LOSTCOMM 0x02 +#define PORT_IN_RESET 0x04 +#define PORT_3RD_PARTY_RESET 0x07 +#define PORT_INVALID 0x08 + +/* + * SSP/SMP/SATA IO Completion Status values + */ + +#define IO_SUCCESS 0x00 +#define IO_ABORTED 0x01 +#define IO_OVERFLOW 0x02 +#define IO_UNDERFLOW 0x03 +#define IO_FAILED 0x04 +#define IO_ABORT_RESET 0x05 +#define IO_NOT_VALID 0x06 +#define IO_NO_DEVICE 0x07 +#define IO_ILLEGAL_PARAMETER 0x08 +#define IO_LINK_FAILURE 0x09 +#define IO_PROG_ERROR 0x0A + +#define IO_EDC_IN_ERROR 0x0B +#define IO_EDC_OUT_ERROR 0x0C +#define IO_ERROR_HW_TIMEOUT 0x0D +#define IO_XFER_ERROR_BREAK 0x0E +#define IO_XFER_ERROR_PHY_NOT_READY 0x0F +#define IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED 0x10 +#define IO_OPEN_CNX_ERROR_ZONE_VIOLATION 0x11 +#define IO_OPEN_CNX_ERROR_BREAK 0x12 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS 0x13 +#define IO_OPEN_CNX_ERROR_BAD_DESTINATION 0x14 +#define IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED 0x15 +#define IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY 0x16 +#define IO_OPEN_CNX_ERROR_WRONG_DESTINATION 0x17 +/* This error code 0x18 is not used on SPCv */ +#define IO_OPEN_CNX_ERROR_UNKNOWN_ERROR 0x18 +#define IO_XFER_ERROR_NAK_RECEIVED 0x19 +#define IO_XFER_ERROR_ACK_NAK_TIMEOUT 0x1A +#define IO_XFER_ERROR_PEER_ABORTED 0x1B +#define IO_XFER_ERROR_RX_FRAME 0x1C +#define IO_XFER_ERROR_DMA 0x1D +#define IO_XFER_ERROR_CREDIT_TIMEOUT 0x1E +#define IO_XFER_ERROR_SATA_LINK_TIMEOUT 0x1F +#define IO_XFER_ERROR_SATA 0x20 + +/* This error code 0x22 is not used on SPCv */ +#define IO_XFER_ERROR_ABORTED_DUE_TO_SRST 0x22 +#define IO_XFER_ERROR_REJECTED_NCQ_MODE 0x21 +#define IO_XFER_ERROR_ABORTED_NCQ_MODE 0x23 +#define IO_XFER_OPEN_RETRY_TIMEOUT 0x24 +/* This error code 0x25 is not used on SPCv */ +#define IO_XFER_SMP_RESP_CONNECTION_ERROR 0x25 +#define IO_XFER_ERROR_UNEXPECTED_PHASE 0x26 +#define IO_XFER_ERROR_XFER_RDY_OVERRUN 0x27 +#define IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED 0x28 +#define IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT 0x30 + +/* The following error code 0x31 and 0x32 are not using (obsolete) */ +#define IO_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NAK 0x31 +#define IO_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK 0x32 + +#define IO_XFER_ERROR_OFFSET_MISMATCH 0x34 +#define IO_XFER_ERROR_XFER_ZERO_DATA_LEN 0x35 +#define IO_XFER_CMD_FRAME_ISSUED 0x36 +#define IO_ERROR_INTERNAL_SMP_RESOURCE 0x37 +#define IO_PORT_IN_RESET 0x38 +#define IO_DS_NON_OPERATIONAL 0x39 +#define IO_DS_IN_RECOVERY 0x3A +#define IO_TM_TAG_NOT_FOUND 0x3B +#define IO_XFER_PIO_SETUP_ERROR 0x3C +#define IO_SSP_EXT_IU_ZERO_LEN_ERROR 0x3D +#define IO_DS_IN_ERROR 0x3E +#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY 0x3F +#define IO_ABORT_IN_PROGRESS 0x40 +#define IO_ABORT_DELAYED 0x41 +#define IO_INVALID_LENGTH 0x42 + +/********** additional response event values *****************/ + +#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY_ALT 0x43 +#define IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED 0x44 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO 0x45 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST 0x46 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE 0x47 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED 0x48 +#define IO_DS_INVALID 0x49 +/* WARNING: the value is not contiguous from here */ +#define IO_XFER_ERR_LAST_PIO_DATAIN_CRC_ERR 0x52 +#define IO_XFER_DMA_ACTIVATE_TIMEOUT 0x53 +#define IO_XFER_ERROR_INTERNAL_CRC_ERROR 0x54 +#define MPI_IO_RQE_BUSY_FULL 0x55 +#define IO_XFER_ERR_EOB_DATA_OVERRUN 0x56 +#define IO_XFR_ERROR_INVALID_SSP_RSP_FRAME 0x57 +#define IO_OPEN_CNX_ERROR_OPEN_PREEMPTED 0x58 + +#define MPI_ERR_IO_RESOURCE_UNAVAILABLE 0x1004 +#define MPI_ERR_ATAPI_DEVICE_BUSY 0x1024 + +#define IO_XFR_ERROR_DEK_KEY_CACHE_MISS 0x2040 +/* + * An encryption IO request failed due to DEK Key Tag mismatch. + * The key tag supplied in the encryption IOMB does not match with + * the Key Tag in the referenced DEK Entry. + */ +#define IO_XFR_ERROR_DEK_KEY_TAG_MISMATCH 0x2041 +#define IO_XFR_ERROR_CIPHER_MODE_INVALID 0x2042 +/* + * An encryption I/O request failed because the initial value (IV) + * in the unwrapped DEK blob didn't match the IV used to unwrap it. + */ +#define IO_XFR_ERROR_DEK_IV_MISMATCH 0x2043 +/* An encryption I/O request failed due to an internal RAM ECC or + * interface error while unwrapping the DEK. */ +#define IO_XFR_ERROR_DEK_RAM_INTERFACE_ERROR 0x2044 +/* An encryption I/O request failed due to an internal RAM ECC or + * interface error while unwrapping the DEK. */ +#define IO_XFR_ERROR_INTERNAL_RAM 0x2045 +/* + * An encryption I/O request failed + * because the DEK index specified in the I/O was outside the bounds of + * the total number of entries in the host DEK table. + */ +#define IO_XFR_ERROR_DEK_INDEX_OUT_OF_BOUNDS0x2046 + +/* define DIF IO response error status code */ +#define IO_XFR_ERROR_DIF_MISMATCH 0x3000 +#define IO_XFR_ERROR_DIF_APPLICATION_TAG_MISMATCH 0x3001 +#define IO_XFR_ERROR_DIF_REFERENCE_TAG_MISMATCH 0x3002 +#define IO_XFR_ERROR_DIF_CRC_MISMATCH 0x3003 + +/* define operator management response status and error qualifier code */ +#define OPR_MGMT_OP_NOT_SUPPORTED 0x2060 +#define OPR_MGMT_MPI_ENC_ERR_OPR_PARAM_ILLEGAL 0x2061 +#define OPR_MGMT_MPI_ENC_ERR_OPR_ID_NOT_FOUND 0x2062 +#define OPR_MGMT_MPI_ENC_ERR_OPR_ROLE_NOT_MATCH 0x2063 +#define OPR_MGMT_MPI_ENC_ERR_OPR_MAX_NUM_EXCEEDED 0x2064 +#define OPR_MGMT_MPI_ENC_ERR_KEK_UNWRAP_FAIL 0x2022 +#define OPR_MGMT_MPI_ENC_ERR_NVRAM_OPERATION_FAILURE 0x2023 +/***************** additional response event values ***************/ + +/* WARNING: This error code must always be the last number. + * If you add error code, modify this code also + * It is used as an index + */ +#define IO_ERROR_UNKNOWN_GENERIC 0x2023 + +/* MSGU CONFIGURATION TABLE*/ + +#define SPCv_MSGU_CFG_TABLE_UPDATE 0x01 +#define SPCv_MSGU_CFG_TABLE_RESET 0x02 +#define SPCv_MSGU_CFG_TABLE_FREEZE 0x04 +#define SPCv_MSGU_CFG_TABLE_UNFREEZE 0x08 +#define MSGU_IBDB_SET 0x00 +#define MSGU_HOST_INT_STATUS 0x08 +#define MSGU_HOST_INT_MASK 0x0C +#define MSGU_IOPIB_INT_STATUS 0x18 +#define MSGU_IOPIB_INT_MASK 0x1C +#define MSGU_IBDB_CLEAR 0x20 + +#define MSGU_MSGU_CONTROL 0x24 +#define MSGU_ODR 0x20 +#define MSGU_ODCR 0x28 + +#define MSGU_ODMR 0x30 +#define MSGU_ODMR_U 0x34 +#define MSGU_ODMR_CLR 0x38 +#define MSGU_ODMR_CLR_U 0x3C +#define MSGU_OD_RSVD 0x40 + +#define MSGU_SCRATCH_PAD_0 0x44 +#define MSGU_SCRATCH_PAD_1 0x48 +#define MSGU_SCRATCH_PAD_2 0x4C +#define MSGU_SCRATCH_PAD_3 0x50 +#define MSGU_HOST_SCRATCH_PAD_0 0x54 +#define MSGU_HOST_SCRATCH_PAD_1 0x58 +#define MSGU_HOST_SCRATCH_PAD_2 0x5C +#define MSGU_HOST_SCRATCH_PAD_3 0x60 +#define MSGU_HOST_SCRATCH_PAD_4 0x64 +#define MSGU_HOST_SCRATCH_PAD_5 0x68 +#define MSGU_HOST_SCRATCH_PAD_6 0x6C +#define MSGU_HOST_SCRATCH_PAD_7 0x70 + +/* bit definition for ODMR register */ +#define ODMR_MASK_ALL 0xFFFFFFFF/* mask all + interrupt vector */ +#define ODMR_CLEAR_ALL 0 /* clear all + interrupt vector */ +/* bit definition for ODCR register */ +#define ODCR_CLEAR_ALL 0xFFFFFFFF /* mask all + interrupt vector*/ +/* MSIX Interupts */ +#define MSIX_TABLE_OFFSET 0x2000 +#define MSIX_TABLE_ELEMENT_SIZE 0x10 +#define MSIX_INTERRUPT_CONTROL_OFFSET 0xC +#define MSIX_TABLE_BASE (MSIX_TABLE_OFFSET + \ + MSIX_INTERRUPT_CONTROL_OFFSET) +#define MSIX_INTERRUPT_DISABLE 0x1 +#define MSIX_INTERRUPT_ENABLE 0x0 + +/* state definition for Scratch Pad1 register */ +#define SCRATCH_PAD_RAAE_READY 0x3 +#define SCRATCH_PAD_ILA_READY 0xC +#define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0 +#define SCRATCH_PAD_IOP0_READY 0xC00 +#define SCRATCH_PAD_IOP1_READY 0x3000 + +/* boot loader state */ +#define SCRATCH_PAD1_BOOTSTATE_MASK 0x70 /* Bit 4-6 */ +#define SCRATCH_PAD1_BOOTSTATE_SUCESS 0x0 /* Load successful */ +#define SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM 0x10 /* HDA SEEPROM */ +#define SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP 0x20 /* HDA BootStrap Pins */ +#define SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET 0x30 /* HDA Soft Reset */ +#define SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR 0x40 /* HDA critical error */ +#define SCRATCH_PAD1_BOOTSTATE_R1 0x50 /* Reserved */ +#define SCRATCH_PAD1_BOOTSTATE_R2 0x60 /* Reserved */ +#define SCRATCH_PAD1_BOOTSTATE_FATAL 0x70 /* Fatal Error */ + + /* state definition for Scratch Pad2 register */ +#define SCRATCH_PAD2_POR 0x00 /* power on state */ +#define SCRATCH_PAD2_SFR 0x01 /* soft reset state */ +#define SCRATCH_PAD2_ERR 0x02 /* error state */ +#define SCRATCH_PAD2_RDY 0x03 /* ready state */ +#define SCRATCH_PAD2_FWRDY_RST 0x04 /* FW rdy for soft reset flag */ +#define SCRATCH_PAD2_IOPRDY_RST 0x08 /* IOP ready for soft reset */ +#define SCRATCH_PAD2_STATE_MASK 0xFFFFFFF4 /* ScratchPad 2 + Mask, bit1-0 State */ +#define SCRATCH_PAD2_RESERVED 0x000003FC/* Scratch Pad1 + Reserved bit 2 to 9 */ + +#define SCRATCH_PAD_ERROR_MASK 0xFFFFFC00 /* Error mask bits */ +#define SCRATCH_PAD_STATE_MASK 0x00000003 /* State Mask bits */ + +/* main configuration offset - byte offset */ +#define MAIN_SIGNATURE_OFFSET 0x00 /* DWORD 0x00 */ +#define MAIN_INTERFACE_REVISION 0x04 /* DWORD 0x01 */ +#define MAIN_FW_REVISION 0x08 /* DWORD 0x02 */ +#define MAIN_MAX_OUTSTANDING_IO_OFFSET 0x0C /* DWORD 0x03 */ +#define MAIN_MAX_SGL_OFFSET 0x10 /* DWORD 0x04 */ +#define MAIN_CNTRL_CAP_OFFSET 0x14 /* DWORD 0x05 */ +#define MAIN_GST_OFFSET 0x18 /* DWORD 0x06 */ +#define MAIN_IBQ_OFFSET 0x1C /* DWORD 0x07 */ +#define MAIN_OBQ_OFFSET 0x20 /* DWORD 0x08 */ +#define MAIN_IQNPPD_HPPD_OFFSET 0x24 /* DWORD 0x09 */ + +/* 0x28 - 0x4C - RSVD */ +#define MAIN_EVENT_CRC_CHECK 0x48 /* DWORD 0x12 */ +#define MAIN_EVENT_LOG_ADDR_HI 0x50 /* DWORD 0x14 */ +#define MAIN_EVENT_LOG_ADDR_LO 0x54 /* DWORD 0x15 */ +#define MAIN_EVENT_LOG_BUFF_SIZE 0x58 /* DWORD 0x16 */ +#define MAIN_EVENT_LOG_OPTION 0x5C /* DWORD 0x17 */ +#define MAIN_PCS_EVENT_LOG_ADDR_HI 0x60 /* DWORD 0x18 */ +#define MAIN_PCS_EVENT_LOG_ADDR_LO 0x64 /* DWORD 0x19 */ +#define MAIN_PCS_EVENT_LOG_BUFF_SIZE 0x68 /* DWORD 0x1A */ +#define MAIN_PCS_EVENT_LOG_OPTION 0x6C /* DWORD 0x1B */ +#define MAIN_FATAL_ERROR_INTERRUPT 0x70 /* DWORD 0x1C */ +#define MAIN_FATAL_ERROR_RDUMP0_OFFSET 0x74 /* DWORD 0x1D */ +#define MAIN_FATAL_ERROR_RDUMP0_LENGTH 0x78 /* DWORD 0x1E */ +#define MAIN_FATAL_ERROR_RDUMP1_OFFSET 0x7C /* DWORD 0x1F */ +#define MAIN_FATAL_ERROR_RDUMP1_LENGTH 0x80 /* DWORD 0x20 */ +#define MAIN_GPIO_LED_FLAGS_OFFSET 0x84 /* DWORD 0x21 */ +#define MAIN_ANALOG_SETUP_OFFSET 0x88 /* DWORD 0x22 */ + +#define MAIN_INT_VECTOR_TABLE_OFFSET 0x8C /* DWORD 0x23 */ +#define MAIN_SAS_PHY_ATTR_TABLE_OFFSET 0x90 /* DWORD 0x24 */ +#define MAIN_PORT_RECOVERY_TIMER 0x94 /* DWORD 0x25 */ +#define MAIN_INT_REASSERTION_DELAY 0x98 /* DWORD 0x26 */ + +/* Gereral Status Table offset - byte offset */ +#define GST_GSTLEN_MPIS_OFFSET 0x00 +#define GST_IQ_FREEZE_STATE0_OFFSET 0x04 +#define GST_IQ_FREEZE_STATE1_OFFSET 0x08 +#define GST_MSGUTCNT_OFFSET 0x0C +#define GST_IOPTCNT_OFFSET 0x10 +/* 0x14 - 0x34 - RSVD */ +#define GST_GPIO_INPUT_VAL 0x38 +/* 0x3c - 0x40 - RSVD */ +#define GST_RERRINFO_OFFSET0 0x44 +#define GST_RERRINFO_OFFSET1 0x48 +#define GST_RERRINFO_OFFSET2 0x4c +#define GST_RERRINFO_OFFSET3 0x50 +#define GST_RERRINFO_OFFSET4 0x54 +#define GST_RERRINFO_OFFSET5 0x58 +#define GST_RERRINFO_OFFSET6 0x5c +#define GST_RERRINFO_OFFSET7 0x60 + +/* General Status Table - MPI state */ +#define GST_MPI_STATE_UNINIT 0x00 +#define GST_MPI_STATE_INIT 0x01 +#define GST_MPI_STATE_TERMINATION 0x02 +#define GST_MPI_STATE_ERROR 0x03 +#define GST_MPI_STATE_MASK 0x07 + +/* Per SAS PHY Attributes */ + +#define PSPA_PHYSTATE0_OFFSET 0x00 /* Dword V */ +#define PSPA_OB_HW_EVENT_PID0_OFFSET 0x04 /* DWORD V+1 */ +#define PSPA_PHYSTATE1_OFFSET 0x08 /* Dword V+2 */ +#define PSPA_OB_HW_EVENT_PID1_OFFSET 0x0C /* DWORD V+3 */ +#define PSPA_PHYSTATE2_OFFSET 0x10 /* Dword V+4 */ +#define PSPA_OB_HW_EVENT_PID2_OFFSET 0x14 /* DWORD V+5 */ +#define PSPA_PHYSTATE3_OFFSET 0x18 /* Dword V+6 */ +#define PSPA_OB_HW_EVENT_PID3_OFFSET 0x1C /* DWORD V+7 */ +#define PSPA_PHYSTATE4_OFFSET 0x20 /* Dword V+8 */ +#define PSPA_OB_HW_EVENT_PID4_OFFSET 0x24 /* DWORD V+9 */ +#define PSPA_PHYSTATE5_OFFSET 0x28 /* Dword V+10 */ +#define PSPA_OB_HW_EVENT_PID5_OFFSET 0x2C /* DWORD V+11 */ +#define PSPA_PHYSTATE6_OFFSET 0x30 /* Dword V+12 */ +#define PSPA_OB_HW_EVENT_PID6_OFFSET 0x34 /* DWORD V+13 */ +#define PSPA_PHYSTATE7_OFFSET 0x38 /* Dword V+14 */ +#define PSPA_OB_HW_EVENT_PID7_OFFSET 0x3C /* DWORD V+15 */ +#define PSPA_PHYSTATE8_OFFSET 0x40 /* DWORD V+16 */ +#define PSPA_OB_HW_EVENT_PID8_OFFSET 0x44 /* DWORD V+17 */ +#define PSPA_PHYSTATE9_OFFSET 0x48 /* DWORD V+18 */ +#define PSPA_OB_HW_EVENT_PID9_OFFSET 0x4C /* DWORD V+19 */ +#define PSPA_PHYSTATE10_OFFSET 0x50 /* DWORD V+20 */ +#define PSPA_OB_HW_EVENT_PID10_OFFSET 0x54 /* DWORD V+21 */ +#define PSPA_PHYSTATE11_OFFSET 0x58 /* DWORD V+22 */ +#define PSPA_OB_HW_EVENT_PID11_OFFSET 0x5C /* DWORD V+23 */ +#define PSPA_PHYSTATE12_OFFSET 0x60 /* DWORD V+24 */ +#define PSPA_OB_HW_EVENT_PID12_OFFSET 0x64 /* DWORD V+25 */ +#define PSPA_PHYSTATE13_OFFSET 0x68 /* DWORD V+26 */ +#define PSPA_OB_HW_EVENT_PID13_OFFSET 0x6c /* DWORD V+27 */ +#define PSPA_PHYSTATE14_OFFSET 0x70 /* DWORD V+28 */ +#define PSPA_OB_HW_EVENT_PID14_OFFSET 0x74 /* DWORD V+29 */ +#define PSPA_PHYSTATE15_OFFSET 0x78 /* DWORD V+30 */ +#define PSPA_OB_HW_EVENT_PID15_OFFSET 0x7c /* DWORD V+31 */ +/* end PSPA */ + +/* inbound queue configuration offset - byte offset */ +#define IB_PROPERITY_OFFSET 0x00 +#define IB_BASE_ADDR_HI_OFFSET 0x04 +#define IB_BASE_ADDR_LO_OFFSET 0x08 +#define IB_CI_BASE_ADDR_HI_OFFSET 0x0C +#define IB_CI_BASE_ADDR_LO_OFFSET 0x10 +#define IB_PIPCI_BAR 0x14 +#define IB_PIPCI_BAR_OFFSET 0x18 +#define IB_RESERVED_OFFSET 0x1C + +/* outbound queue configuration offset - byte offset */ +#define OB_PROPERITY_OFFSET 0x00 +#define OB_BASE_ADDR_HI_OFFSET 0x04 +#define OB_BASE_ADDR_LO_OFFSET 0x08 +#define OB_PI_BASE_ADDR_HI_OFFSET 0x0C +#define OB_PI_BASE_ADDR_LO_OFFSET 0x10 +#define OB_CIPCI_BAR 0x14 +#define OB_CIPCI_BAR_OFFSET 0x18 +#define OB_INTERRUPT_COALES_OFFSET 0x1C +#define OB_DYNAMIC_COALES_OFFSET 0x20 +#define OB_PROPERTY_INT_ENABLE 0x40000000 + +#define MBIC_NMI_ENABLE_VPE0_IOP 0x000418 +#define MBIC_NMI_ENABLE_VPE0_AAP1 0x000418 +/* PCIE registers - BAR2(0x18), BAR1(win) 0x010000 */ +#define PCIE_EVENT_INTERRUPT_ENABLE 0x003040 +#define PCIE_EVENT_INTERRUPT 0x003044 +#define PCIE_ERROR_INTERRUPT_ENABLE 0x003048 +#define PCIE_ERROR_INTERRUPT 0x00304C + +/* SPCV soft reset */ +#define SPC_REG_SOFT_RESET 0x00001000 +#define SPCv_NORMAL_RESET_VALUE 0x1 + +#define SPCv_SOFT_RESET_READ_MASK 0xC0 +#define SPCv_SOFT_RESET_NO_RESET 0x0 +#define SPCv_SOFT_RESET_NORMAL_RESET_OCCURED 0x40 +#define SPCv_SOFT_RESET_HDA_MODE_OCCURED 0x80 +#define SPCv_SOFT_RESET_CHIP_RESET_OCCURED 0xC0 + +/* signature definition for host scratch pad0 register */ +#define SPC_SOFT_RESET_SIGNATURE 0x252acbcd +/* Signature for Soft Reset */ + +/* SPC Reset register - BAR4(0x20), BAR2(win) (need dynamic mapping) */ +#define SPC_REG_RESET 0x000000/* reset register */ + +/* bit definition for SPC_RESET register */ +#define SPC_REG_RESET_OSSP 0x00000001 +#define SPC_REG_RESET_RAAE 0x00000002 +#define SPC_REG_RESET_PCS_SPBC 0x00000004 +#define SPC_REG_RESET_PCS_IOP_SS 0x00000008 +#define SPC_REG_RESET_PCS_AAP1_SS 0x00000010 +#define SPC_REG_RESET_PCS_AAP2_SS 0x00000020 +#define SPC_REG_RESET_PCS_LM 0x00000040 +#define SPC_REG_RESET_PCS 0x00000080 +#define SPC_REG_RESET_GSM 0x00000100 +#define SPC_REG_RESET_DDR2 0x00010000 +#define SPC_REG_RESET_BDMA_CORE 0x00020000 +#define SPC_REG_RESET_BDMA_SXCBI 0x00040000 +#define SPC_REG_RESET_PCIE_AL_SXCBI 0x00080000 +#define SPC_REG_RESET_PCIE_PWR 0x00100000 +#define SPC_REG_RESET_PCIE_SFT 0x00200000 +#define SPC_REG_RESET_PCS_SXCBI 0x00400000 +#define SPC_REG_RESET_LMS_SXCBI 0x00800000 +#define SPC_REG_RESET_PMIC_SXCBI 0x01000000 +#define SPC_REG_RESET_PMIC_CORE 0x02000000 +#define SPC_REG_RESET_PCIE_PC_SXCBI 0x04000000 +#define SPC_REG_RESET_DEVICE 0x80000000 + +/* registers for BAR Shifting - BAR2(0x18), BAR1(win) */ +#define SPCV_IBW_AXI_TRANSLATION_LOW 0x001010 + +#define MBIC_AAP1_ADDR_BASE 0x060000 +#define MBIC_IOP_ADDR_BASE 0x070000 +#define GSM_ADDR_BASE 0x0700000 +/* Dynamic map through Bar4 - 0x00700000 */ +#define GSM_CONFIG_RESET 0x00000000 +#define RAM_ECC_DB_ERR 0x00000018 +#define GSM_READ_ADDR_PARITY_INDIC 0x00000058 +#define GSM_WRITE_ADDR_PARITY_INDIC 0x00000060 +#define GSM_WRITE_DATA_PARITY_INDIC 0x00000068 +#define GSM_READ_ADDR_PARITY_CHECK 0x00000038 +#define GSM_WRITE_ADDR_PARITY_CHECK 0x00000040 +#define GSM_WRITE_DATA_PARITY_CHECK 0x00000048 + +#define RB6_ACCESS_REG 0x6A0000 +#define HDAC_EXEC_CMD 0x0002 +#define HDA_C_PA 0xcb +#define HDA_SEQ_ID_BITS 0x00ff0000 +#define HDA_GSM_OFFSET_BITS 0x00FFFFFF +#define HDA_GSM_CMD_OFFSET_BITS 0x42C0 +#define HDA_GSM_RSP_OFFSET_BITS 0x42E0 + +#define MBIC_AAP1_ADDR_BASE 0x060000 +#define MBIC_IOP_ADDR_BASE 0x070000 +#define GSM_ADDR_BASE 0x0700000 +#define SPC_TOP_LEVEL_ADDR_BASE 0x000000 +#define GSM_CONFIG_RESET_VALUE 0x00003b00 +#define GPIO_ADDR_BASE 0x00090000 +#define GPIO_GPIO_0_0UTPUT_CTL_OFFSET 0x0000010c + +/* RB6 offset */ +#define SPC_RB6_OFFSET 0x80C0 +/* Magic number of soft reset for RB6 */ +#define RB6_MAGIC_NUMBER_RST 0x1234 + +/* Device Register status */ +#define DEVREG_SUCCESS 0x00 +#define DEVREG_FAILURE_OUT_OF_RESOURCE 0x01 +#define DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED 0x02 +#define DEVREG_FAILURE_INVALID_PHY_ID 0x03 +#define DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED 0x04 +#define DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE 0x05 +#define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06 +#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07 + +#endif diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig index 317a7fdc3b82..23d607218ae8 100644 --- a/drivers/scsi/qla2xxx/Kconfig +++ b/drivers/scsi/qla2xxx/Kconfig @@ -24,7 +24,9 @@ config SCSI_QLA_FC Firmware images can be retrieved from: - ftp://ftp.qlogic.com/outgoing/linux/firmware/ + http://ldriver.qlogic.com/firmware/ + + They are also included in the linux-firmware tree as well. config TCM_QLA2XXX tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs" diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 729b74389f83..937fed8cb038 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -3003,12 +3003,10 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt, /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { - lcmd_pkt->cntrl_flags = - __constant_cpu_to_le16(TMF_WRITE_DATA); + lcmd_pkt->cntrl_flags = TMF_WRITE_DATA; vha->qla_stats.output_bytes += scsi_bufflen(cmd); } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { - lcmd_pkt->cntrl_flags = - __constant_cpu_to_le16(TMF_READ_DATA); + lcmd_pkt->cntrl_flags = TMF_READ_DATA; vha->qla_stats.input_bytes += scsi_bufflen(cmd); } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 5307bf86d5e0..ad72c1d85111 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -644,7 +644,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr) qla2x00_rel_sp(sp->fcport->vha, sp); } -void +static void qla2x00_sp_compl(void *data, void *ptr, int res) { struct qla_hw_data *ha = (struct qla_hw_data *)data; diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c index 14fec976f634..fad71ed067ec 100644 --- a/drivers/scsi/qla4xxx/ql4_iocb.c +++ b/drivers/scsi/qla4xxx/ql4_iocb.c @@ -507,6 +507,7 @@ static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb, mrb->mbox_cmd = in_mbox[0]; wmb(); + ha->iocb_cnt += mrb->iocb_cnt; ha->isp_ops->queue_iocb(ha); exit_mbox_iocb: spin_unlock_irqrestore(&ha->hardware_lock, flags); diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index a47f99957ba8..4d231c12463e 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -2216,14 +2216,14 @@ static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess, fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain); fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt); fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size); - fw_ddb_entry->tcp_xmt_wsf = cpu_to_le16(conn->tcp_xmit_wsf); - fw_ddb_entry->tcp_rcv_wsf = cpu_to_le16(conn->tcp_recv_wsf); + fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf); + fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf); fw_ddb_entry->ipv4_tos = conn->ipv4_tos; fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label); fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout); fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port); - fw_ddb_entry->stat_sn = cpu_to_le16(conn->statsn); - fw_ddb_entry->exp_stat_sn = cpu_to_le16(conn->exp_statsn); + fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn); + fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn); fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_type); fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx); fw_ddb_entry->tsid = cpu_to_le16(sess->tsid); @@ -5504,9 +5504,9 @@ static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data) * If this is invoked as a result of a userspace call then the entry is marked * as nonpersistent using flash_state field. **/ -int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, - struct dev_db_entry *fw_ddb_entry, - uint16_t *idx, int user) +static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + uint16_t *idx, int user) { struct iscsi_bus_flash_session *fnode_sess = NULL; struct iscsi_bus_flash_conn *fnode_conn = NULL; @@ -5605,6 +5605,7 @@ static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, ql4_printk(KERN_ERR, ha, "%s: A non-persistent entry %s found\n", __func__, dev->kobj.name); + put_device(dev); goto exit_ddb_add; } @@ -6112,8 +6113,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, int parent_type, parent_index = 0xffff; int rc = 0; - dev = iscsi_find_flashnode_conn(fnode_sess, NULL, - iscsi_is_flashnode_conn_dev); + dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) return -EIO; @@ -6276,8 +6276,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, rc = sprintf(buf, "\n"); break; case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: - if ((fnode_sess->discovery_parent_idx) >= 0 && - (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES)) + if (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES) parent_index = fnode_sess->discovery_parent_idx; rc = sprintf(buf, "%u\n", parent_index); @@ -6287,8 +6286,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, parent_type = ISCSI_DISC_PARENT_ISNS; else if (fnode_sess->discovery_parent_type == DDB_NO_LINK) parent_type = ISCSI_DISC_PARENT_UNKNOWN; - else if (fnode_sess->discovery_parent_type >= 0 && - fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES) + else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES) parent_type = ISCSI_DISC_PARENT_SENDTGT; else parent_type = ISCSI_DISC_PARENT_UNKNOWN; @@ -6349,6 +6347,8 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, rc = -ENOSYS; break; } + + put_device(dev); return rc; } @@ -6368,20 +6368,11 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, { struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); struct scsi_qla_host *ha = to_qla_host(shost); - struct dev_db_entry *fw_ddb_entry = NULL; struct iscsi_flashnode_param_info *fnode_param; struct nlattr *attr; int rc = QLA_ERROR; uint32_t rem = len; - fw_ddb_entry = kzalloc(sizeof(*fw_ddb_entry), GFP_KERNEL); - if (!fw_ddb_entry) { - DEBUG2(ql4_printk(KERN_ERR, ha, - "%s: Unable to allocate ddb buffer\n", - __func__)); - return -ENOMEM; - } - nla_for_each_attr(attr, data, len, rem) { fnode_param = nla_data(attr); @@ -6590,16 +6581,11 @@ static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess) struct dev_db_entry *fw_ddb_entry = NULL; dma_addr_t fw_ddb_entry_dma; uint16_t *ddb_cookie = NULL; - size_t ddb_size; + size_t ddb_size = 0; void *pddb = NULL; int target_id; int rc = 0; - if (!fnode_sess) { - rc = -EINVAL; - goto exit_ddb_del; - } - if (fnode_sess->is_boot_target) { rc = -EPERM; DEBUG2(ql4_printk(KERN_ERR, ha, @@ -6631,8 +6617,7 @@ static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess) dev_db_start_offset += (fnode_sess->target_id * sizeof(*fw_ddb_entry)); - dev_db_start_offset += (void *)&(fw_ddb_entry->cookie) - - (void *)fw_ddb_entry; + dev_db_start_offset += offsetof(struct dev_db_entry, cookie); ddb_size = sizeof(*ddb_cookie); } diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h index 83e0fec35d56..fe873cf7570d 100644 --- a/drivers/scsi/qla4xxx/ql4_version.h +++ b/drivers/scsi/qla4xxx/ql4_version.h @@ -5,4 +5,4 @@ * See LICENSE.qla4xxx for copyright and licensing details. */ -#define QLA4XXX_DRIVER_VERSION "5.03.00-k8" +#define QLA4XXX_DRIVER_VERSION "5.03.00-k9" diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 5add6f4e7928..0a537a0515ca 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -1997,24 +1997,39 @@ out: return ret; } -static unsigned int map_state(sector_t lba, unsigned int *num) +static unsigned long lba_to_map_index(sector_t lba) +{ + if (scsi_debug_unmap_alignment) { + lba += scsi_debug_unmap_granularity - + scsi_debug_unmap_alignment; + } + do_div(lba, scsi_debug_unmap_granularity); + + return lba; +} + +static sector_t map_index_to_lba(unsigned long index) { - unsigned int granularity, alignment, mapped; - sector_t block, next, end; + return index * scsi_debug_unmap_granularity - + scsi_debug_unmap_alignment; +} - granularity = scsi_debug_unmap_granularity; - alignment = granularity - scsi_debug_unmap_alignment; - block = lba + alignment; - do_div(block, granularity); +static unsigned int map_state(sector_t lba, unsigned int *num) +{ + sector_t end; + unsigned int mapped; + unsigned long index; + unsigned long next; - mapped = test_bit(block, map_storep); + index = lba_to_map_index(lba); + mapped = test_bit(index, map_storep); if (mapped) - next = find_next_zero_bit(map_storep, map_size, block); + next = find_next_zero_bit(map_storep, map_size, index); else - next = find_next_bit(map_storep, map_size, block); + next = find_next_bit(map_storep, map_size, index); - end = next * granularity - scsi_debug_unmap_alignment; + end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next)); *num = end - lba; return mapped; @@ -2022,47 +2037,37 @@ static unsigned int map_state(sector_t lba, unsigned int *num) static void map_region(sector_t lba, unsigned int len) { - unsigned int granularity, alignment; sector_t end = lba + len; - granularity = scsi_debug_unmap_granularity; - alignment = granularity - scsi_debug_unmap_alignment; - while (lba < end) { - sector_t block, rem; - - block = lba + alignment; - rem = do_div(block, granularity); + unsigned long index = lba_to_map_index(lba); - if (block < map_size) - set_bit(block, map_storep); + if (index < map_size) + set_bit(index, map_storep); - lba += granularity - rem; + lba = map_index_to_lba(index + 1); } } static void unmap_region(sector_t lba, unsigned int len) { - unsigned int granularity, alignment; sector_t end = lba + len; - granularity = scsi_debug_unmap_granularity; - alignment = granularity - scsi_debug_unmap_alignment; - while (lba < end) { - sector_t block, rem; - - block = lba + alignment; - rem = do_div(block, granularity); + unsigned long index = lba_to_map_index(lba); - if (rem == 0 && lba + granularity < end && block < map_size) { - clear_bit(block, map_storep); - if (scsi_debug_lbprz) + if (lba == map_index_to_lba(index) && + lba + scsi_debug_unmap_granularity <= end && + index < map_size) { + clear_bit(index, map_storep); + if (scsi_debug_lbprz) { memset(fake_storep + - block * scsi_debug_sector_size, 0, - scsi_debug_sector_size); + lba * scsi_debug_sector_size, 0, + scsi_debug_sector_size * + scsi_debug_unmap_granularity); + } } - lba += granularity - rem; + lba = map_index_to_lba(index + 1); } } @@ -2089,7 +2094,7 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, write_lock_irqsave(&atomic_rw, iflags); ret = do_device_access(SCpnt, devip, lba, num, 1); - if (scsi_debug_unmap_granularity) + if (scsi_debug_lbp()) map_region(lba, num); write_unlock_irqrestore(&atomic_rw, iflags); if (-1 == ret) @@ -2122,7 +2127,7 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba, write_lock_irqsave(&atomic_rw, iflags); - if (unmap && scsi_debug_unmap_granularity) { + if (unmap && scsi_debug_lbp()) { unmap_region(lba, num); goto out; } @@ -2146,7 +2151,7 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba, fake_storep + (lba * scsi_debug_sector_size), scsi_debug_sector_size); - if (scsi_debug_unmap_granularity) + if (scsi_debug_lbp()) map_region(lba, num); out: write_unlock_irqrestore(&atomic_rw, iflags); @@ -3389,8 +3394,6 @@ static int __init scsi_debug_init(void) /* Logical Block Provisioning */ if (scsi_debug_lbp()) { - unsigned int map_bytes; - scsi_debug_unmap_max_blocks = clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU); @@ -3401,16 +3404,16 @@ static int __init scsi_debug_init(void) clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU); if (scsi_debug_unmap_alignment && - scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) { + scsi_debug_unmap_granularity <= + scsi_debug_unmap_alignment) { printk(KERN_ERR - "%s: ERR: unmap_granularity < unmap_alignment\n", + "%s: ERR: unmap_granularity <= unmap_alignment\n", __func__); return -EINVAL; } - map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity); - map_bytes = map_size >> 3; - map_storep = vmalloc(map_bytes); + map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1; + map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long)); printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n", map_size); @@ -3421,7 +3424,7 @@ static int __init scsi_debug_init(void) goto free_vm; } - memset(map_storep, 0x0, map_bytes); + bitmap_zero(map_storep, map_size); /* Map first 1KB for partition table */ if (scsi_debug_num_parts) diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index c1b05a83d403..f43de1e56420 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -25,6 +25,7 @@ #include <linux/interrupt.h> #include <linux/blkdev.h> #include <linux/delay.h> +#include <linux/jiffies.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -791,32 +792,48 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, struct scsi_device *sdev = scmd->device; struct Scsi_Host *shost = sdev->host; DECLARE_COMPLETION_ONSTACK(done); - unsigned long timeleft; + unsigned long timeleft = timeout; struct scsi_eh_save ses; + const unsigned long stall_for = msecs_to_jiffies(100); int rtn; +retry: scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes); shost->eh_action = &done; scsi_log_send(scmd); scmd->scsi_done = scsi_eh_done; - shost->hostt->queuecommand(shost, scmd); - - timeleft = wait_for_completion_timeout(&done, timeout); + rtn = shost->hostt->queuecommand(shost, scmd); + if (rtn) { + if (timeleft > stall_for) { + scsi_eh_restore_cmnd(scmd, &ses); + timeleft -= stall_for; + msleep(jiffies_to_msecs(stall_for)); + goto retry; + } + /* signal not to enter either branch of the if () below */ + timeleft = 0; + rtn = NEEDS_RETRY; + } else { + timeleft = wait_for_completion_timeout(&done, timeout); + } shost->eh_action = NULL; - scsi_log_completion(scmd, SUCCESS); + scsi_log_completion(scmd, rtn); SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd: %p, timeleft: %ld\n", __func__, scmd, timeleft)); /* - * If there is time left scsi_eh_done got called, and we will - * examine the actual status codes to see whether the command - * actually did complete normally, else tell the host to forget - * about this command. + * If there is time left scsi_eh_done got called, and we will examine + * the actual status codes to see whether the command actually did + * complete normally, else if we have a zero return and no time left, + * the command must still be pending, so abort it and return FAILED. + * If we never actually managed to issue the command, because + * ->queuecommand() kept returning non zero, use the rtn = FAILED + * value above (so don't execute either branch of the if) */ if (timeleft) { rtn = scsi_eh_completed_normally(scmd); @@ -837,7 +854,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, rtn = FAILED; break; } - } else { + } else if (!rtn) { scsi_abort_eh_cmnd(scmd); rtn = FAILED; } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index c31187d79343..86d522004a20 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -276,11 +276,10 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, } EXPORT_SYMBOL(scsi_execute); - -int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, +int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, int retries, - int *resid) + int *resid, int flags) { char *sense = NULL; int result; @@ -291,14 +290,14 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, return DRIVER_ERROR << 24; } result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, - sense, timeout, retries, 0, resid); + sense, timeout, retries, flags, resid); if (sshdr) scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); kfree(sense); return result; } -EXPORT_SYMBOL(scsi_execute_req); +EXPORT_SYMBOL(scsi_execute_req_flags); /* * Function: scsi_init_cmd_errh() diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index 8f6b12cbd224..42539ee2cb11 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c @@ -144,33 +144,83 @@ static int scsi_bus_restore(struct device *dev) #ifdef CONFIG_PM_RUNTIME +static int sdev_blk_runtime_suspend(struct scsi_device *sdev, + int (*cb)(struct device *)) +{ + int err; + + err = blk_pre_runtime_suspend(sdev->request_queue); + if (err) + return err; + if (cb) + err = cb(&sdev->sdev_gendev); + blk_post_runtime_suspend(sdev->request_queue, err); + + return err; +} + +static int sdev_runtime_suspend(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + int (*cb)(struct device *) = pm ? pm->runtime_suspend : NULL; + struct scsi_device *sdev = to_scsi_device(dev); + int err; + + if (sdev->request_queue->dev) + return sdev_blk_runtime_suspend(sdev, cb); + + err = scsi_dev_type_suspend(dev, cb); + if (err == -EAGAIN) + pm_schedule_suspend(dev, jiffies_to_msecs( + round_jiffies_up_relative(HZ/10))); + return err; +} + static int scsi_runtime_suspend(struct device *dev) { int err = 0; - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; dev_dbg(dev, "scsi_runtime_suspend\n"); - if (scsi_is_sdev_device(dev)) { - err = scsi_dev_type_suspend(dev, - pm ? pm->runtime_suspend : NULL); - if (err == -EAGAIN) - pm_schedule_suspend(dev, jiffies_to_msecs( - round_jiffies_up_relative(HZ/10))); - } + if (scsi_is_sdev_device(dev)) + err = sdev_runtime_suspend(dev); /* Insert hooks here for targets, hosts, and transport classes */ return err; } -static int scsi_runtime_resume(struct device *dev) +static int sdev_blk_runtime_resume(struct scsi_device *sdev, + int (*cb)(struct device *)) { int err = 0; + + blk_pre_runtime_resume(sdev->request_queue); + if (cb) + err = cb(&sdev->sdev_gendev); + blk_post_runtime_resume(sdev->request_queue, err); + + return err; +} + +static int sdev_runtime_resume(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + int (*cb)(struct device *) = pm ? pm->runtime_resume : NULL; + + if (sdev->request_queue->dev) + return sdev_blk_runtime_resume(sdev, cb); + else + return scsi_dev_type_resume(dev, cb); +} + +static int scsi_runtime_resume(struct device *dev) +{ + int err = 0; dev_dbg(dev, "scsi_runtime_resume\n"); if (scsi_is_sdev_device(dev)) - err = scsi_dev_type_resume(dev, pm ? pm->runtime_resume : NULL); + err = sdev_runtime_resume(dev); /* Insert hooks here for targets, hosts, and transport classes */ @@ -185,10 +235,18 @@ static int scsi_runtime_idle(struct device *dev) /* Insert hooks here for targets, hosts, and transport classes */ - if (scsi_is_sdev_device(dev)) - err = pm_schedule_suspend(dev, 100); - else + if (scsi_is_sdev_device(dev)) { + struct scsi_device *sdev = to_scsi_device(dev); + + if (sdev->request_queue->dev) { + pm_runtime_mark_last_busy(dev); + err = pm_runtime_autosuspend(dev); + } else { + err = pm_runtime_suspend(dev); + } + } else { err = pm_runtime_suspend(dev); + } return err; } diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 47799a33d6ca..133926b1bb78 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -1019,8 +1019,7 @@ exit_match_index: /** * iscsi_get_flashnode_by_index -finds flashnode session entry by index * @shost: pointer to host data - * @data: pointer to data containing value to use for comparison - * @fn: function pointer that does actual comparison + * @idx: index to match * * Finds the flashnode session object for the passed index * @@ -1029,13 +1028,13 @@ exit_match_index: * %NULL on failure */ static struct iscsi_bus_flash_session * -iscsi_get_flashnode_by_index(struct Scsi_Host *shost, void *data, - int (*fn)(struct device *dev, void *data)) +iscsi_get_flashnode_by_index(struct Scsi_Host *shost, uint32_t idx) { struct iscsi_bus_flash_session *fnode_sess = NULL; struct device *dev; - dev = device_find_child(&shost->shost_gendev, data, fn); + dev = device_find_child(&shost->shost_gendev, &idx, + flashnode_match_index); if (dev) fnode_sess = iscsi_dev_to_flash_session(dev); @@ -1059,18 +1058,13 @@ struct device * iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data, int (*fn)(struct device *dev, void *data)) { - struct device *dev; - - dev = device_find_child(&shost->shost_gendev, data, fn); - return dev; + return device_find_child(&shost->shost_gendev, data, fn); } EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess); /** * iscsi_find_flashnode_conn - finds flashnode connection entry * @fnode_sess: pointer to parent flashnode session entry - * @data: pointer to data containing value to use for comparison - * @fn: function pointer that does actual comparison * * Finds the flashnode connection object comparing the data passed using logic * defined in passed function pointer @@ -1080,14 +1074,10 @@ EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess); * %NULL on failure */ struct device * -iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess, - void *data, - int (*fn)(struct device *dev, void *data)) +iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess) { - struct device *dev; - - dev = device_find_child(&fnode_sess->dev, data, fn); - return dev; + return device_find_child(&fnode_sess->dev, NULL, + iscsi_is_flashnode_conn_dev); } EXPORT_SYMBOL_GPL(iscsi_find_flashnode_conn); @@ -2808,7 +2798,7 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport, struct iscsi_bus_flash_session *fnode_sess; struct iscsi_bus_flash_conn *fnode_conn; struct device *dev; - uint32_t *idx; + uint32_t idx; int err = 0; if (!transport->set_flashnode_param) { @@ -2824,25 +2814,27 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport, goto put_host; } - idx = &ev->u.set_flashnode.flashnode_idx; - fnode_sess = iscsi_get_flashnode_by_index(shost, idx, - flashnode_match_index); + idx = ev->u.set_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", - __func__, *idx, ev->u.set_flashnode.host_no); + __func__, idx, ev->u.set_flashnode.host_no); err = -ENODEV; goto put_host; } - dev = iscsi_find_flashnode_conn(fnode_sess, NULL, - iscsi_is_flashnode_conn_dev); + dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) { err = -ENODEV; - goto put_host; + goto put_sess; } fnode_conn = iscsi_dev_to_flash_conn(dev); err = transport->set_flashnode_param(fnode_sess, fnode_conn, data, len); + put_device(dev); + +put_sess: + put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); @@ -2891,7 +2883,7 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport, { struct Scsi_Host *shost; struct iscsi_bus_flash_session *fnode_sess; - uint32_t *idx; + uint32_t idx; int err = 0; if (!transport->del_flashnode) { @@ -2907,17 +2899,17 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport, goto put_host; } - idx = &ev->u.del_flashnode.flashnode_idx; - fnode_sess = iscsi_get_flashnode_by_index(shost, idx, - flashnode_match_index); + idx = ev->u.del_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", - __func__, *idx, ev->u.del_flashnode.host_no); + __func__, idx, ev->u.del_flashnode.host_no); err = -ENODEV; goto put_host; } err = transport->del_flashnode(fnode_sess); + put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); @@ -2933,7 +2925,7 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport, struct iscsi_bus_flash_session *fnode_sess; struct iscsi_bus_flash_conn *fnode_conn; struct device *dev; - uint32_t *idx; + uint32_t idx; int err = 0; if (!transport->login_flashnode) { @@ -2949,25 +2941,27 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport, goto put_host; } - idx = &ev->u.login_flashnode.flashnode_idx; - fnode_sess = iscsi_get_flashnode_by_index(shost, idx, - flashnode_match_index); + idx = ev->u.login_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", - __func__, *idx, ev->u.login_flashnode.host_no); + __func__, idx, ev->u.login_flashnode.host_no); err = -ENODEV; goto put_host; } - dev = iscsi_find_flashnode_conn(fnode_sess, NULL, - iscsi_is_flashnode_conn_dev); + dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) { err = -ENODEV; - goto put_host; + goto put_sess; } fnode_conn = iscsi_dev_to_flash_conn(dev); err = transport->login_flashnode(fnode_sess, fnode_conn); + put_device(dev); + +put_sess: + put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); @@ -2983,7 +2977,7 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport, struct iscsi_bus_flash_session *fnode_sess; struct iscsi_bus_flash_conn *fnode_conn; struct device *dev; - uint32_t *idx; + uint32_t idx; int err = 0; if (!transport->logout_flashnode) { @@ -2999,26 +2993,28 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport, goto put_host; } - idx = &ev->u.logout_flashnode.flashnode_idx; - fnode_sess = iscsi_get_flashnode_by_index(shost, idx, - flashnode_match_index); + idx = ev->u.logout_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", - __func__, *idx, ev->u.logout_flashnode.host_no); + __func__, idx, ev->u.logout_flashnode.host_no); err = -ENODEV; goto put_host; } - dev = iscsi_find_flashnode_conn(fnode_sess, NULL, - iscsi_is_flashnode_conn_dev); + dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) { err = -ENODEV; - goto put_host; + goto put_sess; } fnode_conn = iscsi_dev_to_flash_conn(dev); err = transport->logout_flashnode(fnode_sess, fnode_conn); + put_device(dev); + +put_sess: + put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); @@ -3985,8 +3981,10 @@ static __init int iscsi_transport_init(void) } iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh"); - if (!iscsi_eh_timer_workq) + if (!iscsi_eh_timer_workq) { + err = -ENOMEM; goto release_nls; + } return 0; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index e6689776b4f6..c1c555242d0d 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -142,6 +142,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr, char *buffer_data; struct scsi_mode_data data; struct scsi_sense_hdr sshdr; + const char *temp = "temporary "; int len; if (sdp->type != TYPE_DISK) @@ -150,6 +151,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr, * it's not worth the risk */ return -EINVAL; + if (strncmp(buf, temp, sizeof(temp) - 1) == 0) { + buf += sizeof(temp) - 1; + sdkp->cache_override = 1; + } else { + sdkp->cache_override = 0; + } + for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) { len = strlen(sd_cache_types[i]); if (strncmp(sd_cache_types[i], buf, len) == 0 && @@ -162,6 +170,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr, return -EINVAL; rcd = ct & 0x01 ? 1 : 0; wce = ct & 0x02 ? 1 : 0; + + if (sdkp->cache_override) { + sdkp->WCE = wce; + sdkp->RCD = rcd; + return count; + } + if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT, SD_MAX_RETRIES, &data, NULL)) return -EINVAL; @@ -1121,10 +1136,6 @@ static int sd_open(struct block_device *bdev, fmode_t mode) sdev = sdkp->device; - retval = scsi_autopm_get_device(sdev); - if (retval) - goto error_autopm; - /* * If the device is in error recovery, wait until it is done. * If the device is offline, then disallow any access to it. @@ -1169,8 +1180,6 @@ static int sd_open(struct block_device *bdev, fmode_t mode) return 0; error_out: - scsi_autopm_put_device(sdev); -error_autopm: scsi_disk_put(sdkp); return retval; } @@ -1205,7 +1214,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode) * XXX is followed by a "rmmod sd_mod"? */ - scsi_autopm_put_device(sdev); scsi_disk_put(sdkp); } @@ -1366,14 +1374,9 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) retval = -ENODEV; if (scsi_block_when_processing_errors(sdp)) { - retval = scsi_autopm_get_device(sdp); - if (retval) - goto out; - sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES, sshdr); - scsi_autopm_put_device(sdp); } /* failed to execute TUR, assume media not present */ @@ -1423,8 +1426,9 @@ static int sd_sync_cache(struct scsi_disk *sdkp) * Leave the rest of the command zero to indicate * flush everything. */ - res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, - SD_FLUSH_TIMEOUT, SD_MAX_RETRIES, NULL); + res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, + &sshdr, SD_FLUSH_TIMEOUT, + SD_MAX_RETRIES, NULL, REQ_PM); if (res == 0) break; } @@ -2318,6 +2322,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) int old_rcd = sdkp->RCD; int old_dpofua = sdkp->DPOFUA; + + if (sdkp->cache_override) + return; + first_len = 4; if (sdp->skip_ms_page_8) { if (sdp->type == TYPE_RBC) @@ -2811,6 +2819,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie) sdkp->capacity = 0; sdkp->media_present = 1; sdkp->write_prot = 0; + sdkp->cache_override = 0; sdkp->WCE = 0; sdkp->RCD = 0; sdkp->ATO = 0; @@ -2837,6 +2846,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie) sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", sdp->removable ? "removable " : ""); + blk_pm_runtime_init(sdp->request_queue, dev); scsi_autopm_put_device(sdp); put_device(&sdkp->dev); } @@ -3020,8 +3030,8 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start) if (!scsi_device_online(sdp)) return -ENODEV; - res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, - SD_TIMEOUT, SD_MAX_RETRIES, NULL); + res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, + SD_TIMEOUT, SD_MAX_RETRIES, NULL, REQ_PM); if (res) { sd_printk(KERN_WARNING, sdkp, "START_STOP FAILED\n"); sd_print_result(sdkp, res); diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 74a1e4ca5401..2386aeb41fe8 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -73,6 +73,7 @@ struct scsi_disk { u8 protection_type;/* Data Integrity Field */ u8 provisioning_mode; unsigned ATO : 1; /* state of disk ATO bit */ + unsigned cache_override : 1; /* temp override of WCE,RCD */ unsigned WCE : 1; /* state of disk WCE bit */ unsigned RCD : 1; /* state of disk RCD bit, unused */ unsigned DPOFUA : 1; /* state of disk DPOFUA bit */ diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c index 04998f36e507..6174ca4ea275 100644 --- a/drivers/scsi/sd_dif.c +++ b/drivers/scsi/sd_dif.c @@ -93,14 +93,6 @@ static int sd_dif_type1_verify(struct blk_integrity_exchg *bix, csum_fn *fn) if (sdt->app_tag == 0xffff) return 0; - /* Bad ref tag received from disk */ - if (sdt->ref_tag == 0xffffffff) { - printk(KERN_ERR - "%s: bad phys ref tag on sector %lu\n", - bix->disk_name, (unsigned long)sector); - return -EIO; - } - if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { printk(KERN_ERR "%s: ref tag error on sector %lu (rcvd %u)\n", diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index 0371047c5922..35faf24c6044 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -57,3 +57,14 @@ config SCSI_UFSHCD_PCI If you have a controller with this interface, say Y or M here. If unsure, say N. + +config SCSI_UFSHCD_PLATFORM + tristate "Platform bus based UFS Controller support" + depends on SCSI_UFSHCD + ---help--- + This selects the UFS host controller support. Select this if + you have an UFS controller on Platform bus. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile index 9eda0dfbd6df..1e5bd48457d6 100644 --- a/drivers/scsi/ufs/Makefile +++ b/drivers/scsi/ufs/Makefile @@ -1,3 +1,4 @@ # UFSHCD makefile obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o +obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c new file mode 100644 index 000000000000..03319acd9c72 --- /dev/null +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -0,0 +1,217 @@ +/* + * Universal Flash Storage Host controller Platform bus based glue driver + * + * This code is based on drivers/scsi/ufs/ufshcd-pltfrm.c + * Copyright (C) 2011-2013 Samsung India Software Operations + * + * Authors: + * Santosh Yaraganavi <santosh.sy@samsung.com> + * Vinayak Holikatti <h.vinayak@samsung.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * See the COPYING file in the top-level directory or visit + * <http://www.gnu.org/licenses/gpl-2.0.html> + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * This program is provided "AS IS" and "WITH ALL FAULTS" and + * without warranty of any kind. You are solely responsible for + * determining the appropriateness of using and distributing + * the program and assume all risks associated with your exercise + * of rights with respect to the program, including but not limited + * to infringement of third party rights, the risks and costs of + * program errors, damage to or loss of data, programs or equipment, + * and unavailability or interruption of operations. Under no + * circumstances will the contributor of this Program be liable for + * any damages of any kind arising from your use or distribution of + * this program. + */ + +#include "ufshcd.h" +#include <linux/platform_device.h> + +#ifdef CONFIG_PM +/** + * ufshcd_pltfrm_suspend - suspend power management function + * @dev: pointer to device handle + * + * + * Returns 0 + */ +static int ufshcd_pltfrm_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct ufs_hba *hba = platform_get_drvdata(pdev); + + /* + * TODO: + * 1. Call ufshcd_suspend + * 2. Do bus specific power management + */ + + disable_irq(hba->irq); + + return 0; +} + +/** + * ufshcd_pltfrm_resume - resume power management function + * @dev: pointer to device handle + * + * Returns 0 + */ +static int ufshcd_pltfrm_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct ufs_hba *hba = platform_get_drvdata(pdev); + + /* + * TODO: + * 1. Call ufshcd_resume. + * 2. Do bus specific wake up + */ + + enable_irq(hba->irq); + + return 0; +} +#else +#define ufshcd_pltfrm_suspend NULL +#define ufshcd_pltfrm_resume NULL +#endif + +/** + * ufshcd_pltfrm_probe - probe routine of the driver + * @pdev: pointer to Platform device handle + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_pltfrm_probe(struct platform_device *pdev) +{ + struct ufs_hba *hba; + void __iomem *mmio_base; + struct resource *mem_res; + struct resource *irq_res; + resource_size_t mem_size; + int err; + struct device *dev = &pdev->dev; + + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem_res) { + dev_err(&pdev->dev, + "Memory resource not available\n"); + err = -ENODEV; + goto out_error; + } + + mem_size = resource_size(mem_res); + if (!request_mem_region(mem_res->start, mem_size, "ufshcd")) { + dev_err(&pdev->dev, + "Cannot reserve the memory resource\n"); + err = -EBUSY; + goto out_error; + } + + mmio_base = ioremap_nocache(mem_res->start, mem_size); + if (!mmio_base) { + dev_err(&pdev->dev, "memory map failed\n"); + err = -ENOMEM; + goto out_release_regions; + } + + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!irq_res) { + dev_err(&pdev->dev, "IRQ resource not available\n"); + err = -ENODEV; + goto out_iounmap; + } + + err = dma_set_coherent_mask(dev, dev->coherent_dma_mask); + if (err) { + dev_err(&pdev->dev, "set dma mask failed\n"); + goto out_iounmap; + } + + err = ufshcd_init(&pdev->dev, &hba, mmio_base, irq_res->start); + if (err) { + dev_err(&pdev->dev, "Intialization failed\n"); + goto out_iounmap; + } + + platform_set_drvdata(pdev, hba); + + return 0; + +out_iounmap: + iounmap(mmio_base); +out_release_regions: + release_mem_region(mem_res->start, mem_size); +out_error: + return err; +} + +/** + * ufshcd_pltfrm_remove - remove platform driver routine + * @pdev: pointer to platform device handle + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_pltfrm_remove(struct platform_device *pdev) +{ + struct resource *mem_res; + resource_size_t mem_size; + struct ufs_hba *hba = platform_get_drvdata(pdev); + + disable_irq(hba->irq); + + /* Some buggy controllers raise interrupt after + * the resources are removed. So first we unregister the + * irq handler and then the resources used by driver + */ + + free_irq(hba->irq, hba); + ufshcd_remove(hba); + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem_res) + dev_err(&pdev->dev, "ufshcd: Memory resource not available\n"); + else { + mem_size = resource_size(mem_res); + release_mem_region(mem_res->start, mem_size); + } + platform_set_drvdata(pdev, NULL); + return 0; +} + +static const struct of_device_id ufs_of_match[] = { + { .compatible = "jedec,ufs-1.1"}, +}; + +static const struct dev_pm_ops ufshcd_dev_pm_ops = { + .suspend = ufshcd_pltfrm_suspend, + .resume = ufshcd_pltfrm_resume, +}; + +static struct platform_driver ufshcd_pltfrm_driver = { + .probe = ufshcd_pltfrm_probe, + .remove = ufshcd_pltfrm_remove, + .driver = { + .name = "ufshcd", + .owner = THIS_MODULE, + .pm = &ufshcd_dev_pm_ops, + .of_match_table = ufs_of_match, + }, +}; + +module_platform_driver(ufshcd_pltfrm_driver); + +MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>"); +MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>"); +MODULE_DESCRIPTION("UFS host controller Pltform bus based glue driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(UFSHCD_DRIVER_VERSION); diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 60fd40c4e4c2..c32a478df81b 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -478,7 +478,7 @@ static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp) ucd_cmd_ptr->header.dword_2 = 0; ucd_cmd_ptr->exp_data_transfer_len = - cpu_to_be32(lrbp->cmd->transfersize); + cpu_to_be32(lrbp->cmd->sdb.length); memcpy(ucd_cmd_ptr->cdb, lrbp->cmd->cmnd, diff --git a/drivers/ssb/driver_chipcommon_sflash.c b/drivers/ssb/driver_chipcommon_sflash.c index 720665ca2bb1..205f1c499c46 100644 --- a/drivers/ssb/driver_chipcommon_sflash.c +++ b/drivers/ssb/driver_chipcommon_sflash.c @@ -16,7 +16,7 @@ struct ssb_sflash_tbl_e { u16 numblocks; }; -static struct ssb_sflash_tbl_e ssb_sflash_st_tbl[] = { +static const struct ssb_sflash_tbl_e ssb_sflash_st_tbl[] = { { "M25P20", 0x11, 0x10000, 4, }, { "M25P40", 0x12, 0x10000, 8, }, @@ -27,7 +27,7 @@ static struct ssb_sflash_tbl_e ssb_sflash_st_tbl[] = { { 0 }, }; -static struct ssb_sflash_tbl_e ssb_sflash_sst_tbl[] = { +static const struct ssb_sflash_tbl_e ssb_sflash_sst_tbl[] = { { "SST25WF512", 1, 0x1000, 16, }, { "SST25VF512", 0x48, 0x1000, 16, }, { "SST25WF010", 2, 0x1000, 32, }, @@ -45,7 +45,7 @@ static struct ssb_sflash_tbl_e ssb_sflash_sst_tbl[] = { { 0 }, }; -static struct ssb_sflash_tbl_e ssb_sflash_at_tbl[] = { +static const struct ssb_sflash_tbl_e ssb_sflash_at_tbl[] = { { "AT45DB011", 0xc, 256, 512, }, { "AT45DB021", 0x14, 256, 1024, }, { "AT45DB041", 0x1c, 256, 2048, }, @@ -73,7 +73,8 @@ static void ssb_sflash_cmd(struct ssb_chipcommon *cc, u32 opcode) /* Initialize serial flash access */ int ssb_sflash_init(struct ssb_chipcommon *cc) { - struct ssb_sflash_tbl_e *e; + struct ssb_sflash *sflash = &cc->dev->bus->mipscore.sflash; + const struct ssb_sflash_tbl_e *e; u32 id, id2; switch (cc->capabilities & SSB_CHIPCO_CAP_FLASHT) { @@ -131,6 +132,12 @@ int ssb_sflash_init(struct ssb_chipcommon *cc) return -ENOTSUPP; } + sflash->window = SSB_FLASH2; + sflash->blocksize = e->blocksize; + sflash->numblocks = e->numblocks; + sflash->size = sflash->blocksize * sflash->numblocks; + sflash->present = true; + pr_info("Found %s serial flash (blocksize: 0x%X, blocks: %d)\n", e->name, e->blocksize, e->numblocks); diff --git a/drivers/tty/serial/68328serial.c b/drivers/tty/serial/68328serial.c index ef2e08e9b590..5dc9c4bfa66e 100644 --- a/drivers/tty/serial/68328serial.c +++ b/drivers/tty/serial/68328serial.c @@ -14,7 +14,6 @@ * 2.4/2.5 port David McCullough */ -#include <asm/dbg.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/serial.h> diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c index 52a3ecd40421..6fa2ae77fffd 100644 --- a/drivers/tty/serial/bcm63xx_uart.c +++ b/drivers/tty/serial/bcm63xx_uart.c @@ -30,7 +30,6 @@ #include <linux/serial.h> #include <linux/serial_core.h> -#include <bcm63xx_clk.h> #include <bcm63xx_irq.h> #include <bcm63xx_regs.h> #include <bcm63xx_io.h> diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c index 6953dc82850c..a4fdce74f883 100644 --- a/drivers/tty/tty_audit.c +++ b/drivers/tty/tty_audit.c @@ -60,24 +60,22 @@ static void tty_audit_buf_put(struct tty_audit_buf *buf) tty_audit_buf_free(buf); } -static void tty_audit_log(const char *description, struct task_struct *tsk, - kuid_t loginuid, unsigned sessionid, int major, - int minor, unsigned char *data, size_t size) +static void tty_audit_log(const char *description, int major, int minor, + unsigned char *data, size_t size) { struct audit_buffer *ab; + struct task_struct *tsk = current; + uid_t uid = from_kuid(&init_user_ns, task_uid(tsk)); + uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(tsk)); + u32 sessionid = audit_get_sessionid(tsk); ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_TTY); if (ab) { char name[sizeof(tsk->comm)]; - kuid_t uid = task_uid(tsk); - - audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u " - "major=%d minor=%d comm=", description, - tsk->pid, - from_kuid(&init_user_ns, uid), - from_kuid(&init_user_ns, loginuid), - sessionid, - major, minor); + + audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u major=%d" + " minor=%d comm=", description, tsk->pid, uid, + loginuid, sessionid, major, minor); get_task_comm(name, tsk); audit_log_untrustedstring(ab, name); audit_log_format(ab, " data="); @@ -90,11 +88,9 @@ static void tty_audit_log(const char *description, struct task_struct *tsk, * tty_audit_buf_push - Push buffered data out * * Generate an audit message from the contents of @buf, which is owned by - * @tsk with @loginuid. @buf->mutex must be locked. + * the current task. @buf->mutex must be locked. */ -static void tty_audit_buf_push(struct task_struct *tsk, kuid_t loginuid, - unsigned int sessionid, - struct tty_audit_buf *buf) +static void tty_audit_buf_push(struct tty_audit_buf *buf) { if (buf->valid == 0) return; @@ -102,25 +98,11 @@ static void tty_audit_buf_push(struct task_struct *tsk, kuid_t loginuid, buf->valid = 0; return; } - tty_audit_log("tty", tsk, loginuid, sessionid, buf->major, buf->minor, - buf->data, buf->valid); + tty_audit_log("tty", buf->major, buf->minor, buf->data, buf->valid); buf->valid = 0; } /** - * tty_audit_buf_push_current - Push buffered data out - * - * Generate an audit message from the contents of @buf, which is owned by - * the current task. @buf->mutex must be locked. - */ -static void tty_audit_buf_push_current(struct tty_audit_buf *buf) -{ - kuid_t auid = audit_get_loginuid(current); - unsigned int sessionid = audit_get_sessionid(current); - tty_audit_buf_push(current, auid, sessionid, buf); -} - -/** * tty_audit_exit - Handle a task exit * * Make sure all buffered data is written out and deallocate the buffer. @@ -130,15 +112,13 @@ void tty_audit_exit(void) { struct tty_audit_buf *buf; - spin_lock_irq(¤t->sighand->siglock); buf = current->signal->tty_audit_buf; current->signal->tty_audit_buf = NULL; - spin_unlock_irq(¤t->sighand->siglock); if (!buf) return; mutex_lock(&buf->mutex); - tty_audit_buf_push_current(buf); + tty_audit_buf_push(buf); mutex_unlock(&buf->mutex); tty_audit_buf_put(buf); @@ -151,9 +131,8 @@ void tty_audit_exit(void) */ void tty_audit_fork(struct signal_struct *sig) { - spin_lock_irq(¤t->sighand->siglock); sig->audit_tty = current->signal->audit_tty; - spin_unlock_irq(¤t->sighand->siglock); + sig->audit_tty_log_passwd = current->signal->audit_tty_log_passwd; } /** @@ -163,20 +142,21 @@ void tty_audit_tiocsti(struct tty_struct *tty, char ch) { struct tty_audit_buf *buf; int major, minor, should_audit; + unsigned long flags; - spin_lock_irq(¤t->sighand->siglock); + spin_lock_irqsave(¤t->sighand->siglock, flags); should_audit = current->signal->audit_tty; buf = current->signal->tty_audit_buf; if (buf) atomic_inc(&buf->count); - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); major = tty->driver->major; minor = tty->driver->minor_start + tty->index; if (buf) { mutex_lock(&buf->mutex); if (buf->major == major && buf->minor == minor) - tty_audit_buf_push_current(buf); + tty_audit_buf_push(buf); mutex_unlock(&buf->mutex); tty_audit_buf_put(buf); } @@ -187,24 +167,20 @@ void tty_audit_tiocsti(struct tty_struct *tty, char ch) auid = audit_get_loginuid(current); sessionid = audit_get_sessionid(current); - tty_audit_log("ioctl=TIOCSTI", current, auid, sessionid, major, - minor, &ch, 1); + tty_audit_log("ioctl=TIOCSTI", major, minor, &ch, 1); } } /** - * tty_audit_push_task - Flush task's pending audit data - * @tsk: task pointer - * @loginuid: sender login uid - * @sessionid: sender session id + * tty_audit_push_current - Flush current's pending audit data * - * Called with a ref on @tsk held. Try to lock sighand and get a - * reference to the tty audit buffer if available. + * Try to lock sighand and get a reference to the tty audit buffer if available. * Flush the buffer or return an appropriate error code. */ -int tty_audit_push_task(struct task_struct *tsk, kuid_t loginuid, u32 sessionid) +int tty_audit_push_current(void) { struct tty_audit_buf *buf = ERR_PTR(-EPERM); + struct task_struct *tsk = current; unsigned long flags; if (!lock_task_sighand(tsk, &flags)) @@ -225,7 +201,7 @@ int tty_audit_push_task(struct task_struct *tsk, kuid_t loginuid, u32 sessionid) return PTR_ERR(buf); mutex_lock(&buf->mutex); - tty_audit_buf_push(tsk, loginuid, sessionid, buf); + tty_audit_buf_push(buf); mutex_unlock(&buf->mutex); tty_audit_buf_put(buf); @@ -243,10 +219,11 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty, unsigned icanon) { struct tty_audit_buf *buf, *buf2; + unsigned long flags; buf = NULL; buf2 = NULL; - spin_lock_irq(¤t->sighand->siglock); + spin_lock_irqsave(¤t->sighand->siglock, flags); if (likely(!current->signal->audit_tty)) goto out; buf = current->signal->tty_audit_buf; @@ -254,7 +231,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty, atomic_inc(&buf->count); goto out; } - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); buf2 = tty_audit_buf_alloc(tty->driver->major, tty->driver->minor_start + tty->index, @@ -264,7 +241,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty, return NULL; } - spin_lock_irq(¤t->sighand->siglock); + spin_lock_irqsave(¤t->sighand->siglock, flags); if (!current->signal->audit_tty) goto out; buf = current->signal->tty_audit_buf; @@ -276,7 +253,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty, atomic_inc(&buf->count); /* Fall through */ out: - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); if (buf2) tty_audit_buf_free(buf2); return buf; @@ -292,10 +269,18 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, { struct tty_audit_buf *buf; int major, minor; + int audit_log_tty_passwd; + unsigned long flags; if (unlikely(size == 0)) return; + spin_lock_irqsave(¤t->sighand->siglock, flags); + audit_log_tty_passwd = current->signal->audit_tty_log_passwd; + spin_unlock_irqrestore(¤t->sighand->siglock, flags); + if (!audit_log_tty_passwd && icanon && !L_ECHO(tty)) + return; + if (tty->driver->type == TTY_DRIVER_TYPE_PTY && tty->driver->subtype == PTY_TYPE_MASTER) return; @@ -309,7 +294,7 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, minor = tty->driver->minor_start + tty->index; if (buf->major != major || buf->minor != minor || buf->icanon != icanon) { - tty_audit_buf_push_current(buf); + tty_audit_buf_push(buf); buf->major = major; buf->minor = minor; buf->icanon = icanon; @@ -325,7 +310,7 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, data += run; size -= run; if (buf->valid == N_TTY_BUF_SIZE) - tty_audit_buf_push_current(buf); + tty_audit_buf_push(buf); } while (size != 0); mutex_unlock(&buf->mutex); tty_audit_buf_put(buf); @@ -339,16 +324,17 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, void tty_audit_push(struct tty_struct *tty) { struct tty_audit_buf *buf; + unsigned long flags; - spin_lock_irq(¤t->sighand->siglock); + spin_lock_irqsave(¤t->sighand->siglock, flags); if (likely(!current->signal->audit_tty)) { - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); return; } buf = current->signal->tty_audit_buf; if (buf) atomic_inc(&buf->count); - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); if (buf) { int major, minor; @@ -357,7 +343,7 @@ void tty_audit_push(struct tty_struct *tty) minor = tty->driver->minor_start + tty->index; mutex_lock(&buf->mutex); if (buf->major == major && buf->minor == minor) - tty_audit_buf_push_current(buf); + tty_audit_buf_push(buf); mutex_unlock(&buf->mutex); tty_audit_buf_put(buf); } diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c index ddabaa867b0d..700cac067b46 100644 --- a/drivers/video/au1100fb.c +++ b/drivers/video/au1100fb.c @@ -111,30 +111,16 @@ static int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi) switch (blank_mode) { case VESA_NO_BLANKING: - /* Turn on panel */ - fbdev->regs->lcd_control |= LCD_CONTROL_GO; -#ifdef CONFIG_MIPS_PB1100 - if (fbdev->panel_idx == 1) { - au_writew(au_readw(PB1100_G_CONTROL) - | (PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD), - PB1100_G_CONTROL); - } -#endif + /* Turn on panel */ + fbdev->regs->lcd_control |= LCD_CONTROL_GO; au_sync(); break; case VESA_VSYNC_SUSPEND: case VESA_HSYNC_SUSPEND: case VESA_POWERDOWN: - /* Turn off panel */ - fbdev->regs->lcd_control &= ~LCD_CONTROL_GO; -#ifdef CONFIG_MIPS_PB1100 - if (fbdev->panel_idx == 1) { - au_writew(au_readw(PB1100_G_CONTROL) - & ~(PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD), - PB1100_G_CONTROL); - } -#endif + /* Turn off panel */ + fbdev->regs->lcd_control &= ~LCD_CONTROL_GO; au_sync(); break; default: diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index dd4d9cb86243..f03bf501527f 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -141,7 +141,7 @@ config XEN_GRANT_DEV_ALLOC config SWIOTLB_XEN def_bool y - depends on PCI + depends on PCI && X86 select SWIOTLB config XEN_TMEM diff --git a/drivers/xen/events.c b/drivers/xen/events.c index d8cc8127f19c..6a6bbe4ede92 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -167,6 +167,8 @@ static void xen_irq_info_common_init(struct irq_info *info, info->cpu = cpu; evtchn_to_irq[evtchn] = irq; + + irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN); } static void xen_irq_info_evtchn_init(unsigned irq, @@ -874,7 +876,6 @@ int bind_evtchn_to_irq(unsigned int evtchn) struct irq_info *info = info_for_irq(irq); WARN_ON(info == NULL || info->type != IRQT_EVTCHN); } - irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN); out: mutex_unlock(&irq_mapping_update_lock); |