diff options
Diffstat (limited to 'net')
249 files changed, 11808 insertions, 6022 deletions
diff --git a/net/802/hippi.c b/net/802/hippi.c index 887e73d520e4..1997b7dd265e 100644 --- a/net/802/hippi.c +++ b/net/802/hippi.c @@ -65,7 +65,7 @@ static int hippi_header(struct sk_buff *skb, struct net_device *dev, hip->le.src_addr_type = 2; /* 12 bit SC address */ memcpy(hip->le.src_switch_addr, dev->dev_addr + 3, 3); - memset(&hip->le.reserved, 0, 16); + memset_startat(&hip->le, 0, reserved); hip->snap.dsap = HIPPI_EXTENDED_SAP; hip->snap.ssap = HIPPI_EXTENDED_SAP; diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index abaa5d96ded2..788076b002b3 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -319,8 +319,8 @@ static void vlan_transfer_features(struct net_device *dev, { struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); - vlandev->gso_max_size = dev->gso_max_size; - vlandev->gso_max_segs = dev->gso_max_segs; + netif_set_gso_max_size(vlandev, dev->gso_max_size); + netif_set_gso_max_segs(vlandev, dev->gso_max_segs); if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto)) vlandev->hard_header_len = dev->hard_header_len; diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 59bc13b5f14f..acf8c791f320 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -476,10 +476,9 @@ static struct sk_buff *vlan_gro_receive(struct list_head *head, type = vhdr->h_vlan_encapsulated_proto; - rcu_read_lock(); ptype = gro_find_receive_by_type(type); if (!ptype) - goto out_unlock; + goto out; flush = 0; @@ -501,8 +500,6 @@ static struct sk_buff *vlan_gro_receive(struct list_head *head, ipv6_gro_receive, inet_gro_receive, head, skb); -out_unlock: - rcu_read_unlock(); out: skb_gro_flush_final(skb, pp, flush); @@ -516,14 +513,12 @@ static int vlan_gro_complete(struct sk_buff *skb, int nhoff) struct packet_offload *ptype; int err = -ENOENT; - rcu_read_lock(); ptype = gro_find_complete_by_type(type); if (ptype) err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete, ipv6_gro_complete, inet_gro_complete, skb, nhoff + sizeof(*vhdr)); - rcu_read_unlock(); return err; } diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index a54535cbcf4c..26d031a43cc1 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -573,8 +573,8 @@ static int vlan_dev_init(struct net_device *dev) NETIF_F_ALL_FCOE; dev->features |= dev->hw_features | NETIF_F_LLTX; - dev->gso_max_size = real_dev->gso_max_size; - dev->gso_max_segs = real_dev->gso_max_segs; + netif_set_gso_max_size(dev, real_dev->gso_max_size); + netif_set_gso_max_segs(dev, real_dev->gso_max_segs); if (dev->features & NETIF_F_VLAN_FEATURES) netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n"); @@ -616,7 +616,7 @@ static int vlan_dev_init(struct net_device *dev) return -ENOMEM; /* Get vlan's reference to real_dev */ - dev_hold(real_dev); + dev_hold_track(real_dev, &vlan->dev_tracker, GFP_KERNEL); return 0; } @@ -848,7 +848,7 @@ static void vlan_dev_free(struct net_device *dev) vlan->vlan_pcpu_stats = NULL; /* Get rid of the vlan's reference to real_dev */ - dev_put(vlan->real_dev); + dev_put_track(vlan->real_dev, &vlan->dev_tracker); } void vlan_setup(struct net_device *dev) diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c index ec87dea23719..08bf6c839e25 100644 --- a/net/8021q/vlanproc.c +++ b/net/8021q/vlanproc.c @@ -252,7 +252,7 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset) stats = dev_get_stats(vlandev, &temp); seq_printf(seq, - "%s VID: %d REORDER_HDR: %i dev->priv_flags: %hx\n", + "%s VID: %d REORDER_HDR: %i dev->priv_flags: %llx\n", vlandev->name, vlan->vlan_id, (int)(vlan->flags & 1), vlandev->priv_flags); diff --git a/net/Kconfig b/net/Kconfig index 074472dfa94a..8a1f9d0287de 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -455,4 +455,9 @@ config ETHTOOL_NETLINK netlink. It provides better extensibility and some new features, e.g. notification messages. +config NETDEV_ADDR_LIST_TEST + tristate "Unit tests for device address list" + default KUNIT_ALL_TESTS + depends on KUNIT + endif # if NET diff --git a/net/Kconfig.debug b/net/Kconfig.debug new file mode 100644 index 000000000000..2f50611df858 --- /dev/null +++ b/net/Kconfig.debug @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config NET_DEV_REFCNT_TRACKER + bool "Enable net device refcount tracking" + depends on DEBUG_KERNEL && STACKTRACE_SUPPORT + select REF_TRACKER + default n + help + Enable debugging feature to track device references. + This adds memory and cpu costs. + +config NET_NS_REFCNT_TRACKER + bool "Enable networking namespace refcount tracking" + depends on DEBUG_KERNEL && STACKTRACE_SUPPORT + select REF_TRACKER + default n + help + Enable debugging feature to track netns references. + This adds memory and cpu costs. diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c index d0a043a51848..256fadb94df3 100644 --- a/net/ax25/ax25_dev.c +++ b/net/ax25/ax25_dev.c @@ -58,7 +58,7 @@ void ax25_dev_device_up(struct net_device *dev) dev->ax25_ptr = ax25_dev; ax25_dev->dev = dev; - dev_hold(dev); + dev_hold_track(dev, &ax25_dev->dev_tracker, GFP_ATOMIC); ax25_dev->forward = NULL; ax25_dev->values[AX25_VALUES_IPDEFMODE] = AX25_DEF_IPDEFMODE; @@ -114,7 +114,7 @@ void ax25_dev_device_down(struct net_device *dev) ax25_dev_list = s->next; spin_unlock_bh(&ax25_dev_lock); dev->ax25_ptr = NULL; - dev_put(dev); + dev_put_track(dev, &ax25_dev->dev_tracker); kfree(ax25_dev); return; } @@ -124,7 +124,7 @@ void ax25_dev_device_down(struct net_device *dev) s->next = ax25_dev->next; spin_unlock_bh(&ax25_dev_lock); dev->ax25_ptr = NULL; - dev_put(dev); + dev_put_track(dev, &ax25_dev->dev_tracker); kfree(ax25_dev); return; } @@ -188,7 +188,7 @@ void __exit ax25_dev_free(void) ax25_dev = ax25_dev_list; while (ax25_dev != NULL) { s = ax25_dev; - dev_put(ax25_dev->dev); + dev_put_track(ax25_dev->dev, &ax25_dev->dev_tracker); ax25_dev = ax25_dev->next; kfree(s); } diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile index 291770fc9551..a52bba8500e1 100644 --- a/net/bluetooth/Makefile +++ b/net/bluetooth/Makefile @@ -15,7 +15,7 @@ bluetooth_6lowpan-y := 6lowpan.o bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \ ecdh_helper.o hci_request.o mgmt_util.o mgmt_config.o hci_codec.o \ - eir.o + eir.o hci_sync.o bluetooth-$(CONFIG_BT_BREDR) += sco.o bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o diff --git a/net/bluetooth/aosp.c b/net/bluetooth/aosp.c index a1b7762335a5..432ae3aac9e3 100644 --- a/net/bluetooth/aosp.c +++ b/net/bluetooth/aosp.c @@ -8,9 +8,43 @@ #include "aosp.h" +/* Command complete parameters of LE_Get_Vendor_Capabilities_Command + * The parameters grow over time. The base version that declares the + * version_supported field is v0.95. Refer to + * https://cs.android.com/android/platform/superproject/+/master:system/ + * bt/gd/hci/controller.cc;l=452?q=le_get_vendor_capabilities_handler + */ +struct aosp_rp_le_get_vendor_capa { + /* v0.95: 15 octets */ + __u8 status; + __u8 max_advt_instances; + __u8 offloaded_resolution_of_private_address; + __le16 total_scan_results_storage; + __u8 max_irk_list_sz; + __u8 filtering_support; + __u8 max_filter; + __u8 activity_energy_info_support; + __le16 version_supported; + __le16 total_num_of_advt_tracked; + __u8 extended_scan_support; + __u8 debug_logging_supported; + /* v0.96: 16 octets */ + __u8 le_address_generation_offloading_support; + /* v0.98: 21 octets */ + __le32 a2dp_source_offload_capability_mask; + __u8 bluetooth_quality_report_support; + /* v1.00: 25 octets */ + __le32 dynamic_audio_buffer_support; +} __packed; + +#define VENDOR_CAPA_BASE_SIZE 15 +#define VENDOR_CAPA_0_98_SIZE 21 + void aosp_do_open(struct hci_dev *hdev) { struct sk_buff *skb; + struct aosp_rp_le_get_vendor_capa *rp; + u16 version_supported; if (!hdev->aosp_capable) return; @@ -20,9 +54,54 @@ void aosp_do_open(struct hci_dev *hdev) /* LE Get Vendor Capabilities Command */ skb = __hci_cmd_sync(hdev, hci_opcode_pack(0x3f, 0x153), 0, NULL, HCI_CMD_TIMEOUT); - if (IS_ERR(skb)) + if (IS_ERR(skb)) { + bt_dev_err(hdev, "AOSP get vendor capabilities (%ld)", + PTR_ERR(skb)); return; + } + + /* A basic length check */ + if (skb->len < VENDOR_CAPA_BASE_SIZE) + goto length_error; + + rp = (struct aosp_rp_le_get_vendor_capa *)skb->data; + + version_supported = le16_to_cpu(rp->version_supported); + /* AOSP displays the verion number like v0.98, v1.00, etc. */ + bt_dev_info(hdev, "AOSP extensions version v%u.%02u", + version_supported >> 8, version_supported & 0xff); + + /* Do not support very old versions. */ + if (version_supported < 95) { + bt_dev_warn(hdev, "AOSP capabilities version %u too old", + version_supported); + goto done; + } + + if (version_supported < 98) { + bt_dev_warn(hdev, "AOSP quality report is not supported"); + goto done; + } + + if (skb->len < VENDOR_CAPA_0_98_SIZE) + goto length_error; + + /* The bluetooth_quality_report_support is defined at version + * v0.98. Refer to + * https://cs.android.com/android/platform/superproject/+/ + * master:system/bt/gd/hci/controller.cc;l=477 + */ + if (rp->bluetooth_quality_report_support) { + hdev->aosp_quality_report = true; + bt_dev_info(hdev, "AOSP quality report is supported"); + } + + goto done; + +length_error: + bt_dev_err(hdev, "AOSP capabilities length %d too short", skb->len); +done: kfree_skb(skb); } @@ -33,3 +112,90 @@ void aosp_do_close(struct hci_dev *hdev) bt_dev_dbg(hdev, "Cleanup of AOSP extension"); } + +/* BQR command */ +#define BQR_OPCODE hci_opcode_pack(0x3f, 0x015e) + +/* BQR report action */ +#define REPORT_ACTION_ADD 0x00 +#define REPORT_ACTION_DELETE 0x01 +#define REPORT_ACTION_CLEAR 0x02 + +/* BQR event masks */ +#define QUALITY_MONITORING BIT(0) +#define APPRAOCHING_LSTO BIT(1) +#define A2DP_AUDIO_CHOPPY BIT(2) +#define SCO_VOICE_CHOPPY BIT(3) + +#define DEFAULT_BQR_EVENT_MASK (QUALITY_MONITORING | APPRAOCHING_LSTO | \ + A2DP_AUDIO_CHOPPY | SCO_VOICE_CHOPPY) + +/* Reporting at milliseconds so as not to stress the controller too much. + * Range: 0 ~ 65535 ms + */ +#define DEFALUT_REPORT_INTERVAL_MS 5000 + +struct aosp_bqr_cp { + __u8 report_action; + __u32 event_mask; + __u16 min_report_interval; +} __packed; + +static int enable_quality_report(struct hci_dev *hdev) +{ + struct sk_buff *skb; + struct aosp_bqr_cp cp; + + cp.report_action = REPORT_ACTION_ADD; + cp.event_mask = DEFAULT_BQR_EVENT_MASK; + cp.min_report_interval = DEFALUT_REPORT_INTERVAL_MS; + + skb = __hci_cmd_sync(hdev, BQR_OPCODE, sizeof(cp), &cp, + HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Enabling Android BQR failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + + kfree_skb(skb); + return 0; +} + +static int disable_quality_report(struct hci_dev *hdev) +{ + struct sk_buff *skb; + struct aosp_bqr_cp cp = { 0 }; + + cp.report_action = REPORT_ACTION_CLEAR; + + skb = __hci_cmd_sync(hdev, BQR_OPCODE, sizeof(cp), &cp, + HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Disabling Android BQR failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + + kfree_skb(skb); + return 0; +} + +bool aosp_has_quality_report(struct hci_dev *hdev) +{ + return hdev->aosp_quality_report; +} + +int aosp_set_quality_report(struct hci_dev *hdev, bool enable) +{ + if (!aosp_has_quality_report(hdev)) + return -EOPNOTSUPP; + + bt_dev_dbg(hdev, "quality report enable %d", enable); + + /* Enable or disable the quality report feature. */ + if (enable) + return enable_quality_report(hdev); + else + return disable_quality_report(hdev); +} diff --git a/net/bluetooth/aosp.h b/net/bluetooth/aosp.h index 328fc6d39f70..2fd8886d51b2 100644 --- a/net/bluetooth/aosp.h +++ b/net/bluetooth/aosp.h @@ -8,9 +8,22 @@ void aosp_do_open(struct hci_dev *hdev); void aosp_do_close(struct hci_dev *hdev); +bool aosp_has_quality_report(struct hci_dev *hdev); +int aosp_set_quality_report(struct hci_dev *hdev, bool enable); + #else static inline void aosp_do_open(struct hci_dev *hdev) {} static inline void aosp_do_close(struct hci_dev *hdev) {} +static inline bool aosp_has_quality_report(struct hci_dev *hdev) +{ + return false; +} + +static inline int aosp_set_quality_report(struct hci_dev *hdev, bool enable) +{ + return -EOPNOTSUPP; +} + #endif diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c index 0a2d78e811cf..83eb84e8e688 100644 --- a/net/bluetooth/cmtp/core.c +++ b/net/bluetooth/cmtp/core.c @@ -501,9 +501,7 @@ static int __init cmtp_init(void) { BT_INFO("CMTP (CAPI Emulation) ver %s", VERSION); - cmtp_init_sockets(); - - return 0; + return cmtp_init_sockets(); } static void __exit cmtp_exit(void) diff --git a/net/bluetooth/hci_codec.c b/net/bluetooth/hci_codec.c index f0421d0edaa3..38201532f58e 100644 --- a/net/bluetooth/hci_codec.c +++ b/net/bluetooth/hci_codec.c @@ -25,9 +25,11 @@ static int hci_codec_list_add(struct list_head *list, } entry->transport = sent->transport; entry->len = len; - entry->num_caps = rp->num_caps; - if (rp->num_caps) + entry->num_caps = 0; + if (rp) { + entry->num_caps = rp->num_caps; memcpy(entry->caps, caps, len); + } list_add(&entry->list, list); return 0; @@ -58,6 +60,18 @@ static void hci_read_codec_capabilities(struct hci_dev *hdev, __u8 transport, __u32 len; cmd->transport = i; + + /* If Read_Codec_Capabilities command is not supported + * then just add codec to the list without caps + */ + if (!(hdev->commands[45] & 0x08)) { + hci_dev_lock(hdev); + hci_codec_list_add(&hdev->local_codecs, cmd, + NULL, NULL, 0); + hci_dev_unlock(hdev); + continue; + } + skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_CODEC_CAPS, sizeof(*cmd), cmd, HCI_CMD_TIMEOUT); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index bd669c95b9a7..cd6e1cf7e396 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -108,7 +108,7 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn) break; } - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); } static void hci_conn_cleanup(struct hci_conn *conn) @@ -900,25 +900,15 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status) hci_conn_del(conn); - /* The suspend notifier is waiting for all devices to disconnect and an - * LE connect cancel will result in an hci_le_conn_failed. Once the last - * connection is deleted, we should also wake the suspend queue to - * complete suspend operations. - */ - if (list_empty(&hdev->conn_hash.list) && - test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) { - wake_up(&hdev->suspend_wait_q); - } - /* Since we may have temporarily stopped the background scanning in * favor of connection establishment, we should restart it. */ - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); - /* Re-enable advertising in case this was a failed connection + /* Enable advertising in case this was a failed connection * attempt as a peripheral. */ - hci_req_reenable_advertising(hdev); + hci_enable_advertising(hdev); } static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode) @@ -1411,7 +1401,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, conn->conn_timeout = conn_timeout; conn->conn_reason = conn_reason; - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); done: hci_conn_hold(conn); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 8d33aa64846b..fdc0dcf8ee36 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -62,824 +62,6 @@ DEFINE_MUTEX(hci_cb_list_lock); /* HCI ID Numbering */ static DEFINE_IDA(hci_index_ida); -static int hci_reset_req(struct hci_request *req, unsigned long opt) -{ - BT_DBG("%s %ld", req->hdev->name, opt); - - /* Reset device */ - set_bit(HCI_RESET, &req->hdev->flags); - hci_req_add(req, HCI_OP_RESET, 0, NULL); - return 0; -} - -static void bredr_init(struct hci_request *req) -{ - req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; - - /* Read Local Supported Features */ - hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); - - /* Read Local Version */ - hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); - - /* Read BD Address */ - hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL); -} - -static void amp_init1(struct hci_request *req) -{ - req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; - - /* Read Local Version */ - hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); - - /* Read Local Supported Commands */ - hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); - - /* Read Local AMP Info */ - hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL); - - /* Read Data Blk size */ - hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL); - - /* Read Flow Control Mode */ - hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL); - - /* Read Location Data */ - hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL); -} - -static int amp_init2(struct hci_request *req) -{ - /* Read Local Supported Features. Not all AMP controllers - * support this so it's placed conditionally in the second - * stage init. - */ - if (req->hdev->commands[14] & 0x20) - hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); - - return 0; -} - -static int hci_init1_req(struct hci_request *req, unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - - BT_DBG("%s %ld", hdev->name, opt); - - /* Reset */ - if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) - hci_reset_req(req, 0); - - switch (hdev->dev_type) { - case HCI_PRIMARY: - bredr_init(req); - break; - case HCI_AMP: - amp_init1(req); - break; - default: - bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type); - break; - } - - return 0; -} - -static void bredr_setup(struct hci_request *req) -{ - __le16 param; - __u8 flt_type; - - /* Read Buffer Size (ACL mtu, max pkt, etc.) */ - hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL); - - /* Read Class of Device */ - hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL); - - /* Read Local Name */ - hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL); - - /* Read Voice Setting */ - hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL); - - /* Read Number of Supported IAC */ - hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL); - - /* Read Current IAC LAP */ - hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL); - - /* Clear Event Filters */ - flt_type = HCI_FLT_CLEAR_ALL; - hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type); - - /* Connection accept timeout ~20 secs */ - param = cpu_to_le16(0x7d00); - hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m); -} - -static void le_setup(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - - /* Read LE Buffer Size */ - hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL); - - /* Read LE Local Supported Features */ - hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL); - - /* Read LE Supported States */ - hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL); - - /* LE-only controllers have LE implicitly enabled */ - if (!lmp_bredr_capable(hdev)) - hci_dev_set_flag(hdev, HCI_LE_ENABLED); -} - -static void hci_setup_event_mask(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - - /* The second byte is 0xff instead of 0x9f (two reserved bits - * disabled) since a Broadcom 1.2 dongle doesn't respond to the - * command otherwise. - */ - u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; - - /* CSR 1.1 dongles does not accept any bitfield so don't try to set - * any event mask for pre 1.2 devices. - */ - if (hdev->hci_ver < BLUETOOTH_VER_1_2) - return; - - if (lmp_bredr_capable(hdev)) { - events[4] |= 0x01; /* Flow Specification Complete */ - } else { - /* Use a different default for LE-only devices */ - memset(events, 0, sizeof(events)); - events[1] |= 0x20; /* Command Complete */ - events[1] |= 0x40; /* Command Status */ - events[1] |= 0x80; /* Hardware Error */ - - /* If the controller supports the Disconnect command, enable - * the corresponding event. In addition enable packet flow - * control related events. - */ - if (hdev->commands[0] & 0x20) { - events[0] |= 0x10; /* Disconnection Complete */ - events[2] |= 0x04; /* Number of Completed Packets */ - events[3] |= 0x02; /* Data Buffer Overflow */ - } - - /* If the controller supports the Read Remote Version - * Information command, enable the corresponding event. - */ - if (hdev->commands[2] & 0x80) - events[1] |= 0x08; /* Read Remote Version Information - * Complete - */ - - if (hdev->le_features[0] & HCI_LE_ENCRYPTION) { - events[0] |= 0x80; /* Encryption Change */ - events[5] |= 0x80; /* Encryption Key Refresh Complete */ - } - } - - if (lmp_inq_rssi_capable(hdev) || - test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) - events[4] |= 0x02; /* Inquiry Result with RSSI */ - - if (lmp_ext_feat_capable(hdev)) - events[4] |= 0x04; /* Read Remote Extended Features Complete */ - - if (lmp_esco_capable(hdev)) { - events[5] |= 0x08; /* Synchronous Connection Complete */ - events[5] |= 0x10; /* Synchronous Connection Changed */ - } - - if (lmp_sniffsubr_capable(hdev)) - events[5] |= 0x20; /* Sniff Subrating */ - - if (lmp_pause_enc_capable(hdev)) - events[5] |= 0x80; /* Encryption Key Refresh Complete */ - - if (lmp_ext_inq_capable(hdev)) - events[5] |= 0x40; /* Extended Inquiry Result */ - - if (lmp_no_flush_capable(hdev)) - events[7] |= 0x01; /* Enhanced Flush Complete */ - - if (lmp_lsto_capable(hdev)) - events[6] |= 0x80; /* Link Supervision Timeout Changed */ - - if (lmp_ssp_capable(hdev)) { - events[6] |= 0x01; /* IO Capability Request */ - events[6] |= 0x02; /* IO Capability Response */ - events[6] |= 0x04; /* User Confirmation Request */ - events[6] |= 0x08; /* User Passkey Request */ - events[6] |= 0x10; /* Remote OOB Data Request */ - events[6] |= 0x20; /* Simple Pairing Complete */ - events[7] |= 0x04; /* User Passkey Notification */ - events[7] |= 0x08; /* Keypress Notification */ - events[7] |= 0x10; /* Remote Host Supported - * Features Notification - */ - } - - if (lmp_le_capable(hdev)) - events[7] |= 0x20; /* LE Meta-Event */ - - hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events); -} - -static int hci_init2_req(struct hci_request *req, unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - - if (hdev->dev_type == HCI_AMP) - return amp_init2(req); - - if (lmp_bredr_capable(hdev)) - bredr_setup(req); - else - hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED); - - if (lmp_le_capable(hdev)) - le_setup(req); - - /* All Bluetooth 1.2 and later controllers should support the - * HCI command for reading the local supported commands. - * - * Unfortunately some controllers indicate Bluetooth 1.2 support, - * but do not have support for this command. If that is the case, - * the driver can quirk the behavior and skip reading the local - * supported commands. - */ - if (hdev->hci_ver > BLUETOOTH_VER_1_1 && - !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks)) - hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); - - if (lmp_ssp_capable(hdev)) { - /* When SSP is available, then the host features page - * should also be available as well. However some - * controllers list the max_page as 0 as long as SSP - * has not been enabled. To achieve proper debugging - * output, force the minimum max_page to 1 at least. - */ - hdev->max_page = 0x01; - - if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { - u8 mode = 0x01; - - hci_req_add(req, HCI_OP_WRITE_SSP_MODE, - sizeof(mode), &mode); - } else { - struct hci_cp_write_eir cp; - - memset(hdev->eir, 0, sizeof(hdev->eir)); - memset(&cp, 0, sizeof(cp)); - - hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); - } - } - - if (lmp_inq_rssi_capable(hdev) || - test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) { - u8 mode; - - /* If Extended Inquiry Result events are supported, then - * they are clearly preferred over Inquiry Result with RSSI - * events. - */ - mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01; - - hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode); - } - - if (lmp_inq_tx_pwr_capable(hdev)) - hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL); - - if (lmp_ext_feat_capable(hdev)) { - struct hci_cp_read_local_ext_features cp; - - cp.page = 0x01; - hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES, - sizeof(cp), &cp); - } - - if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) { - u8 enable = 1; - hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable), - &enable); - } - - return 0; -} - -static void hci_setup_link_policy(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - struct hci_cp_write_def_link_policy cp; - u16 link_policy = 0; - - if (lmp_rswitch_capable(hdev)) - link_policy |= HCI_LP_RSWITCH; - if (lmp_hold_capable(hdev)) - link_policy |= HCI_LP_HOLD; - if (lmp_sniff_capable(hdev)) - link_policy |= HCI_LP_SNIFF; - if (lmp_park_capable(hdev)) - link_policy |= HCI_LP_PARK; - - cp.policy = cpu_to_le16(link_policy); - hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp); -} - -static void hci_set_le_support(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - struct hci_cp_write_le_host_supported cp; - - /* LE-only devices do not support explicit enablement */ - if (!lmp_bredr_capable(hdev)) - return; - - memset(&cp, 0, sizeof(cp)); - - if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { - cp.le = 0x01; - cp.simul = 0x00; - } - - if (cp.le != lmp_host_le_capable(hdev)) - hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), - &cp); -} - -static void hci_set_event_mask_page_2(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; - bool changed = false; - - /* If Connectionless Peripheral Broadcast central role is supported - * enable all necessary events for it. - */ - if (lmp_cpb_central_capable(hdev)) { - events[1] |= 0x40; /* Triggered Clock Capture */ - events[1] |= 0x80; /* Synchronization Train Complete */ - events[2] |= 0x10; /* Peripheral Page Response Timeout */ - events[2] |= 0x20; /* CPB Channel Map Change */ - changed = true; - } - - /* If Connectionless Peripheral Broadcast peripheral role is supported - * enable all necessary events for it. - */ - if (lmp_cpb_peripheral_capable(hdev)) { - events[2] |= 0x01; /* Synchronization Train Received */ - events[2] |= 0x02; /* CPB Receive */ - events[2] |= 0x04; /* CPB Timeout */ - events[2] |= 0x08; /* Truncated Page Complete */ - changed = true; - } - - /* Enable Authenticated Payload Timeout Expired event if supported */ - if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) { - events[2] |= 0x80; - changed = true; - } - - /* Some Broadcom based controllers indicate support for Set Event - * Mask Page 2 command, but then actually do not support it. Since - * the default value is all bits set to zero, the command is only - * required if the event mask has to be changed. In case no change - * to the event mask is needed, skip this command. - */ - if (changed) - hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, - sizeof(events), events); -} - -static int hci_init3_req(struct hci_request *req, unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - u8 p; - - hci_setup_event_mask(req); - - if (hdev->commands[6] & 0x20 && - !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) { - struct hci_cp_read_stored_link_key cp; - - bacpy(&cp.bdaddr, BDADDR_ANY); - cp.read_all = 0x01; - hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp); - } - - if (hdev->commands[5] & 0x10) - hci_setup_link_policy(req); - - if (hdev->commands[8] & 0x01) - hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL); - - if (hdev->commands[18] & 0x04 && - !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) - hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL); - - /* Some older Broadcom based Bluetooth 1.2 controllers do not - * support the Read Page Scan Type command. Check support for - * this command in the bit mask of supported commands. - */ - if (hdev->commands[13] & 0x01) - hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL); - - if (lmp_le_capable(hdev)) { - u8 events[8]; - - memset(events, 0, sizeof(events)); - - if (hdev->le_features[0] & HCI_LE_ENCRYPTION) - events[0] |= 0x10; /* LE Long Term Key Request */ - - /* If controller supports the Connection Parameters Request - * Link Layer Procedure, enable the corresponding event. - */ - if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC) - events[0] |= 0x20; /* LE Remote Connection - * Parameter Request - */ - - /* If the controller supports the Data Length Extension - * feature, enable the corresponding event. - */ - if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) - events[0] |= 0x40; /* LE Data Length Change */ - - /* If the controller supports LL Privacy feature, enable - * the corresponding event. - */ - if (hdev->le_features[0] & HCI_LE_LL_PRIVACY) - events[1] |= 0x02; /* LE Enhanced Connection - * Complete - */ - - /* If the controller supports Extended Scanner Filter - * Policies, enable the corresponding event. - */ - if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY) - events[1] |= 0x04; /* LE Direct Advertising - * Report - */ - - /* If the controller supports Channel Selection Algorithm #2 - * feature, enable the corresponding event. - */ - if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2) - events[2] |= 0x08; /* LE Channel Selection - * Algorithm - */ - - /* If the controller supports the LE Set Scan Enable command, - * enable the corresponding advertising report event. - */ - if (hdev->commands[26] & 0x08) - events[0] |= 0x02; /* LE Advertising Report */ - - /* If the controller supports the LE Create Connection - * command, enable the corresponding event. - */ - if (hdev->commands[26] & 0x10) - events[0] |= 0x01; /* LE Connection Complete */ - - /* If the controller supports the LE Connection Update - * command, enable the corresponding event. - */ - if (hdev->commands[27] & 0x04) - events[0] |= 0x04; /* LE Connection Update - * Complete - */ - - /* If the controller supports the LE Read Remote Used Features - * command, enable the corresponding event. - */ - if (hdev->commands[27] & 0x20) - events[0] |= 0x08; /* LE Read Remote Used - * Features Complete - */ - - /* If the controller supports the LE Read Local P-256 - * Public Key command, enable the corresponding event. - */ - if (hdev->commands[34] & 0x02) - events[0] |= 0x80; /* LE Read Local P-256 - * Public Key Complete - */ - - /* If the controller supports the LE Generate DHKey - * command, enable the corresponding event. - */ - if (hdev->commands[34] & 0x04) - events[1] |= 0x01; /* LE Generate DHKey Complete */ - - /* If the controller supports the LE Set Default PHY or - * LE Set PHY commands, enable the corresponding event. - */ - if (hdev->commands[35] & (0x20 | 0x40)) - events[1] |= 0x08; /* LE PHY Update Complete */ - - /* If the controller supports LE Set Extended Scan Parameters - * and LE Set Extended Scan Enable commands, enable the - * corresponding event. - */ - if (use_ext_scan(hdev)) - events[1] |= 0x10; /* LE Extended Advertising - * Report - */ - - /* If the controller supports the LE Extended Advertising - * command, enable the corresponding event. - */ - if (ext_adv_capable(hdev)) - events[2] |= 0x02; /* LE Advertising Set - * Terminated - */ - - hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events), - events); - - /* Read LE Advertising Channel TX Power */ - if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) { - /* HCI TS spec forbids mixing of legacy and extended - * advertising commands wherein READ_ADV_TX_POWER is - * also included. So do not call it if extended adv - * is supported otherwise controller will return - * COMMAND_DISALLOWED for extended commands. - */ - hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL); - } - - if (hdev->commands[38] & 0x80) { - /* Read LE Min/Max Tx Power*/ - hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER, - 0, NULL); - } - - if (hdev->commands[26] & 0x40) { - /* Read LE Accept List Size */ - hci_req_add(req, HCI_OP_LE_READ_ACCEPT_LIST_SIZE, - 0, NULL); - } - - if (hdev->commands[26] & 0x80) { - /* Clear LE Accept List */ - hci_req_add(req, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL); - } - - if (hdev->commands[34] & 0x40) { - /* Read LE Resolving List Size */ - hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE, - 0, NULL); - } - - if (hdev->commands[34] & 0x20) { - /* Clear LE Resolving List */ - hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL); - } - - if (hdev->commands[35] & 0x04) { - __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout); - - /* Set RPA timeout */ - hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2, - &rpa_timeout); - } - - if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) { - /* Read LE Maximum Data Length */ - hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL); - - /* Read LE Suggested Default Data Length */ - hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL); - } - - if (ext_adv_capable(hdev)) { - /* Read LE Number of Supported Advertising Sets */ - hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, - 0, NULL); - } - - hci_set_le_support(req); - } - - /* Read features beyond page 1 if available */ - for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) { - struct hci_cp_read_local_ext_features cp; - - cp.page = p; - hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES, - sizeof(cp), &cp); - } - - return 0; -} - -static int hci_init4_req(struct hci_request *req, unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - - /* Some Broadcom based Bluetooth controllers do not support the - * Delete Stored Link Key command. They are clearly indicating its - * absence in the bit mask of supported commands. - * - * Check the supported commands and only if the command is marked - * as supported send it. If not supported assume that the controller - * does not have actual support for stored link keys which makes this - * command redundant anyway. - * - * Some controllers indicate that they support handling deleting - * stored link keys, but they don't. The quirk lets a driver - * just disable this command. - */ - if (hdev->commands[6] & 0x80 && - !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) { - struct hci_cp_delete_stored_link_key cp; - - bacpy(&cp.bdaddr, BDADDR_ANY); - cp.delete_all = 0x01; - hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, - sizeof(cp), &cp); - } - - /* Set event mask page 2 if the HCI command for it is supported */ - if (hdev->commands[22] & 0x04) - hci_set_event_mask_page_2(req); - - /* Read local pairing options if the HCI command is supported */ - if (hdev->commands[41] & 0x08) - hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL); - - /* Get MWS transport configuration if the HCI command is supported */ - if (hdev->commands[30] & 0x08) - hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL); - - /* Check for Synchronization Train support */ - if (lmp_sync_train_capable(hdev)) - hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL); - - /* Enable Secure Connections if supported and configured */ - if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) && - bredr_sc_enabled(hdev)) { - u8 support = 0x01; - - hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT, - sizeof(support), &support); - } - - /* Set erroneous data reporting if supported to the wideband speech - * setting value - */ - if (hdev->commands[18] & 0x08 && - !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) { - bool enabled = hci_dev_test_flag(hdev, - HCI_WIDEBAND_SPEECH_ENABLED); - - if (enabled != - (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) { - struct hci_cp_write_def_err_data_reporting cp; - - cp.err_data_reporting = enabled ? - ERR_DATA_REPORTING_ENABLED : - ERR_DATA_REPORTING_DISABLED; - - hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, - sizeof(cp), &cp); - } - } - - /* Set Suggested Default Data Length to maximum if supported */ - if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) { - struct hci_cp_le_write_def_data_len cp; - - cp.tx_len = cpu_to_le16(hdev->le_max_tx_len); - cp.tx_time = cpu_to_le16(hdev->le_max_tx_time); - hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp); - } - - /* Set Default PHY parameters if command is supported */ - if (hdev->commands[35] & 0x20) { - struct hci_cp_le_set_default_phy cp; - - cp.all_phys = 0x00; - cp.tx_phys = hdev->le_tx_def_phys; - cp.rx_phys = hdev->le_rx_def_phys; - - hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp); - } - - return 0; -} - -static int __hci_init(struct hci_dev *hdev) -{ - int err; - - err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL); - if (err < 0) - return err; - - if (hci_dev_test_flag(hdev, HCI_SETUP)) - hci_debugfs_create_basic(hdev); - - err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL); - if (err < 0) - return err; - - /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode - * BR/EDR/LE type controllers. AMP controllers only need the - * first two stages of init. - */ - if (hdev->dev_type != HCI_PRIMARY) - return 0; - - err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL); - if (err < 0) - return err; - - err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL); - if (err < 0) - return err; - - /* Read local codec list if the HCI command is supported */ - if (hdev->commands[45] & 0x04) - hci_read_supported_codecs_v2(hdev); - else if (hdev->commands[29] & 0x20) - hci_read_supported_codecs(hdev); - - /* This function is only called when the controller is actually in - * configured state. When the controller is marked as unconfigured, - * this initialization procedure is not run. - * - * It means that it is possible that a controller runs through its - * setup phase and then discovers missing settings. If that is the - * case, then this function will not be called. It then will only - * be called during the config phase. - * - * So only when in setup phase or config phase, create the debugfs - * entries and register the SMP channels. - */ - if (!hci_dev_test_flag(hdev, HCI_SETUP) && - !hci_dev_test_flag(hdev, HCI_CONFIG)) - return 0; - - hci_debugfs_create_common(hdev); - - if (lmp_bredr_capable(hdev)) - hci_debugfs_create_bredr(hdev); - - if (lmp_le_capable(hdev)) - hci_debugfs_create_le(hdev); - - return 0; -} - -static int hci_init0_req(struct hci_request *req, unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - - BT_DBG("%s %ld", hdev->name, opt); - - /* Reset */ - if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) - hci_reset_req(req, 0); - - /* Read Local Version */ - hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); - - /* Read BD Address */ - if (hdev->set_bdaddr) - hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL); - - return 0; -} - -static int __hci_unconf_init(struct hci_dev *hdev) -{ - int err; - - if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) - return 0; - - err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL); - if (err < 0) - return err; - - if (hci_dev_test_flag(hdev, HCI_SETUP)) - hci_debugfs_create_basic(hdev); - - return 0; -} - static int hci_scan_req(struct hci_request *req, unsigned long opt) { __u8 scan = opt; @@ -975,7 +157,7 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state) switch (state) { case DISCOVERY_STOPPED: - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); if (old_state != DISCOVERY_STARTING) mgmt_discovering(hdev, 0); @@ -1289,32 +471,6 @@ done: return err; } -/** - * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address - * (BD_ADDR) for a HCI device from - * a firmware node property. - * @hdev: The HCI device - * - * Search the firmware node for 'local-bd-address'. - * - * All-zero BD addresses are rejected, because those could be properties - * that exist in the firmware tables, but were not updated by the firmware. For - * example, the DTS could define 'local-bd-address', with zero BD addresses. - */ -static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev) -{ - struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent); - bdaddr_t ba; - int ret; - - ret = fwnode_property_read_u8_array(fwnode, "local-bd-address", - (u8 *)&ba, sizeof(ba)); - if (ret < 0 || !bacmp(&ba, BDADDR_ANY)) - return; - - bacpy(&hdev->public_addr, &ba); -} - static int hci_dev_do_open(struct hci_dev *hdev) { int ret = 0; @@ -1323,205 +479,8 @@ static int hci_dev_do_open(struct hci_dev *hdev) hci_req_sync_lock(hdev); - if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { - ret = -ENODEV; - goto done; - } - - if (!hci_dev_test_flag(hdev, HCI_SETUP) && - !hci_dev_test_flag(hdev, HCI_CONFIG)) { - /* Check for rfkill but allow the HCI setup stage to - * proceed (which in itself doesn't cause any RF activity). - */ - if (hci_dev_test_flag(hdev, HCI_RFKILLED)) { - ret = -ERFKILL; - goto done; - } - - /* Check for valid public address or a configured static - * random address, but let the HCI setup proceed to - * be able to determine if there is a public address - * or not. - * - * In case of user channel usage, it is not important - * if a public address or static random address is - * available. - * - * This check is only valid for BR/EDR controllers - * since AMP controllers do not have an address. - */ - if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && - hdev->dev_type == HCI_PRIMARY && - !bacmp(&hdev->bdaddr, BDADDR_ANY) && - !bacmp(&hdev->static_addr, BDADDR_ANY)) { - ret = -EADDRNOTAVAIL; - goto done; - } - } - - if (test_bit(HCI_UP, &hdev->flags)) { - ret = -EALREADY; - goto done; - } - - if (hdev->open(hdev)) { - ret = -EIO; - goto done; - } - - set_bit(HCI_RUNNING, &hdev->flags); - hci_sock_dev_event(hdev, HCI_DEV_OPEN); - - atomic_set(&hdev->cmd_cnt, 1); - set_bit(HCI_INIT, &hdev->flags); - - if (hci_dev_test_flag(hdev, HCI_SETUP) || - test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) { - bool invalid_bdaddr; - - hci_sock_dev_event(hdev, HCI_DEV_SETUP); - - if (hdev->setup) - ret = hdev->setup(hdev); - - /* The transport driver can set the quirk to mark the - * BD_ADDR invalid before creating the HCI device or in - * its setup callback. - */ - invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, - &hdev->quirks); - - if (ret) - goto setup_failed; - - if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) { - if (!bacmp(&hdev->public_addr, BDADDR_ANY)) - hci_dev_get_bd_addr_from_property(hdev); - - if (bacmp(&hdev->public_addr, BDADDR_ANY) && - hdev->set_bdaddr) { - ret = hdev->set_bdaddr(hdev, - &hdev->public_addr); - - /* If setting of the BD_ADDR from the device - * property succeeds, then treat the address - * as valid even if the invalid BD_ADDR - * quirk indicates otherwise. - */ - if (!ret) - invalid_bdaddr = false; - } - } - -setup_failed: - /* The transport driver can set these quirks before - * creating the HCI device or in its setup callback. - * - * For the invalid BD_ADDR quirk it is possible that - * it becomes a valid address if the bootloader does - * provide it (see above). - * - * In case any of them is set, the controller has to - * start up as unconfigured. - */ - if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || - invalid_bdaddr) - hci_dev_set_flag(hdev, HCI_UNCONFIGURED); - - /* For an unconfigured controller it is required to - * read at least the version information provided by - * the Read Local Version Information command. - * - * If the set_bdaddr driver callback is provided, then - * also the original Bluetooth public device address - * will be read using the Read BD Address command. - */ - if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) - ret = __hci_unconf_init(hdev); - } - - if (hci_dev_test_flag(hdev, HCI_CONFIG)) { - /* If public address change is configured, ensure that - * the address gets programmed. If the driver does not - * support changing the public address, fail the power - * on procedure. - */ - if (bacmp(&hdev->public_addr, BDADDR_ANY) && - hdev->set_bdaddr) - ret = hdev->set_bdaddr(hdev, &hdev->public_addr); - else - ret = -EADDRNOTAVAIL; - } - - if (!ret) { - if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && - !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { - ret = __hci_init(hdev); - if (!ret && hdev->post_init) - ret = hdev->post_init(hdev); - } - } - - /* If the HCI Reset command is clearing all diagnostic settings, - * then they need to be reprogrammed after the init procedure - * completed. - */ - if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && - !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && - hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag) - ret = hdev->set_diag(hdev, true); - - msft_do_open(hdev); - aosp_do_open(hdev); - - clear_bit(HCI_INIT, &hdev->flags); - - if (!ret) { - hci_dev_hold(hdev); - hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); - hci_adv_instances_set_rpa_expired(hdev, true); - set_bit(HCI_UP, &hdev->flags); - hci_sock_dev_event(hdev, HCI_DEV_UP); - hci_leds_update_powered(hdev, true); - if (!hci_dev_test_flag(hdev, HCI_SETUP) && - !hci_dev_test_flag(hdev, HCI_CONFIG) && - !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && - !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && - hci_dev_test_flag(hdev, HCI_MGMT) && - hdev->dev_type == HCI_PRIMARY) { - ret = __hci_req_hci_power_on(hdev); - mgmt_power_on(hdev, ret); - } - } else { - /* Init failed, cleanup */ - flush_work(&hdev->tx_work); - - /* Since hci_rx_work() is possible to awake new cmd_work - * it should be flushed first to avoid unexpected call of - * hci_cmd_work() - */ - flush_work(&hdev->rx_work); - flush_work(&hdev->cmd_work); - - skb_queue_purge(&hdev->cmd_q); - skb_queue_purge(&hdev->rx_q); - - if (hdev->flush) - hdev->flush(hdev); - - if (hdev->sent_cmd) { - kfree_skb(hdev->sent_cmd); - hdev->sent_cmd = NULL; - } - - clear_bit(HCI_RUNNING, &hdev->flags); - hci_sock_dev_event(hdev, HCI_DEV_CLOSE); - - hdev->close(hdev); - hdev->flags &= BIT(HCI_RAW); - } + ret = hci_dev_open_sync(hdev); -done: hci_req_sync_unlock(hdev); return ret; } @@ -1583,155 +542,18 @@ done: return err; } -/* This function requires the caller holds hdev->lock */ -static void hci_pend_le_actions_clear(struct hci_dev *hdev) -{ - struct hci_conn_params *p; - - list_for_each_entry(p, &hdev->le_conn_params, list) { - if (p->conn) { - hci_conn_drop(p->conn); - hci_conn_put(p->conn); - p->conn = NULL; - } - list_del_init(&p->action); - } - - BT_DBG("All LE pending actions cleared"); -} - int hci_dev_do_close(struct hci_dev *hdev) { - bool auto_off; - int err = 0; + int err; BT_DBG("%s %p", hdev->name, hdev); - cancel_delayed_work(&hdev->power_off); - cancel_delayed_work(&hdev->ncmd_timer); - - hci_request_cancel_all(hdev); hci_req_sync_lock(hdev); - if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && - !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && - test_bit(HCI_UP, &hdev->flags)) { - /* Execute vendor specific shutdown routine */ - if (hdev->shutdown) - err = hdev->shutdown(hdev); - } - - if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { - cancel_delayed_work_sync(&hdev->cmd_timer); - hci_req_sync_unlock(hdev); - return err; - } - - hci_leds_update_powered(hdev, false); - - /* Flush RX and TX works */ - flush_work(&hdev->tx_work); - flush_work(&hdev->rx_work); - - if (hdev->discov_timeout > 0) { - hdev->discov_timeout = 0; - hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); - hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); - } - - if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) - cancel_delayed_work(&hdev->service_cache); - - if (hci_dev_test_flag(hdev, HCI_MGMT)) { - struct adv_info *adv_instance; - - cancel_delayed_work_sync(&hdev->rpa_expired); - - list_for_each_entry(adv_instance, &hdev->adv_instances, list) - cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); - } - - /* Avoid potential lockdep warnings from the *_flush() calls by - * ensuring the workqueue is empty up front. - */ - drain_workqueue(hdev->workqueue); - - hci_dev_lock(hdev); - - hci_discovery_set_state(hdev, DISCOVERY_STOPPED); - - auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF); - - if (!auto_off && hdev->dev_type == HCI_PRIMARY && - !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && - hci_dev_test_flag(hdev, HCI_MGMT)) - __mgmt_power_off(hdev); - - hci_inquiry_cache_flush(hdev); - hci_pend_le_actions_clear(hdev); - hci_conn_hash_flush(hdev); - hci_dev_unlock(hdev); - - smp_unregister(hdev); - - hci_sock_dev_event(hdev, HCI_DEV_DOWN); - - aosp_do_close(hdev); - msft_do_close(hdev); - - if (hdev->flush) - hdev->flush(hdev); - - /* Reset device */ - skb_queue_purge(&hdev->cmd_q); - atomic_set(&hdev->cmd_cnt, 1); - if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) && - !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { - set_bit(HCI_INIT, &hdev->flags); - __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL); - clear_bit(HCI_INIT, &hdev->flags); - } - - /* flush cmd work */ - flush_work(&hdev->cmd_work); - - /* Drop queues */ - skb_queue_purge(&hdev->rx_q); - skb_queue_purge(&hdev->cmd_q); - skb_queue_purge(&hdev->raw_q); - - /* Drop last sent command */ - if (hdev->sent_cmd) { - cancel_delayed_work_sync(&hdev->cmd_timer); - kfree_skb(hdev->sent_cmd); - hdev->sent_cmd = NULL; - } - - clear_bit(HCI_RUNNING, &hdev->flags); - hci_sock_dev_event(hdev, HCI_DEV_CLOSE); - - if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks)) - wake_up(&hdev->suspend_wait_q); - - /* After this point our queues are empty - * and no tasks are scheduled. */ - hdev->close(hdev); - - /* Clear flags */ - hdev->flags &= BIT(HCI_RAW); - hci_dev_clear_volatile_flags(hdev); - - /* Controller radio is available but is currently powered down */ - hdev->amp_status = AMP_STATUS_POWERED_DOWN; - - memset(hdev->eir, 0, sizeof(hdev->eir)); - memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); - bacpy(&hdev->random_addr, BDADDR_ANY); - hci_codec_list_clear(&hdev->local_codecs); + err = hci_dev_close_sync(hdev); hci_req_sync_unlock(hdev); - hci_dev_put(hdev); return err; } @@ -1787,7 +609,7 @@ static int hci_dev_do_reset(struct hci_dev *hdev) atomic_set(&hdev->cmd_cnt, 1); hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0; - ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL); + ret = hci_reset_sync(hdev); hci_req_sync_unlock(hdev); return ret; @@ -1850,7 +672,7 @@ done: return ret; } -static void hci_update_scan_state(struct hci_dev *hdev, u8 scan) +static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan) { bool conn_changed, discov_changed; @@ -1951,7 +773,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) * get correctly modified as this was a non-mgmt change. */ if (!err) - hci_update_scan_state(hdev, dr.dev_opt); + hci_update_passive_scan_state(hdev, dr.dev_opt); break; case HCISETLINKPOL: @@ -2133,9 +955,7 @@ static void hci_power_on(struct work_struct *work) hci_dev_test_flag(hdev, HCI_MGMT) && hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { cancel_delayed_work(&hdev->power_off); - hci_req_sync_lock(hdev); - err = __hci_req_hci_power_on(hdev); - hci_req_sync_unlock(hdev); + err = hci_powered_update_sync(hdev); mgmt_power_on(hdev, err); return; } @@ -3096,7 +1916,7 @@ bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, switch (hci_get_adv_monitor_offload_ext(hdev)) { case HCI_ADV_MONITOR_EXT_NONE: - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err); /* Message was not forwarded to controller - not an error */ return false; @@ -3160,7 +1980,7 @@ bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err) pending = hci_remove_adv_monitor(hdev, monitor, handle, err); if (!*err && !pending) - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending", hdev->name, handle, *err, pending ? "" : "not "); @@ -3192,7 +2012,7 @@ bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err) } if (update) - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending", hdev->name, *err, pending ? "" : "not "); @@ -3486,7 +2306,7 @@ void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) hci_conn_params_free(params); - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); BT_DBG("addr %pMR (type %u)", addr, addr_type); } @@ -3554,61 +2374,6 @@ void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, } } -static void hci_suspend_clear_tasks(struct hci_dev *hdev) -{ - int i; - - for (i = 0; i < __SUSPEND_NUM_TASKS; i++) - clear_bit(i, hdev->suspend_tasks); - - wake_up(&hdev->suspend_wait_q); -} - -static int hci_suspend_wait_event(struct hci_dev *hdev) -{ -#define WAKE_COND \ - (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) == \ - __SUSPEND_NUM_TASKS) - - int i; - int ret = wait_event_timeout(hdev->suspend_wait_q, - WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT); - - if (ret == 0) { - bt_dev_err(hdev, "Timed out waiting for suspend events"); - for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) { - if (test_bit(i, hdev->suspend_tasks)) - bt_dev_err(hdev, "Suspend timeout bit: %d", i); - clear_bit(i, hdev->suspend_tasks); - } - - ret = -ETIMEDOUT; - } else { - ret = 0; - } - - return ret; -} - -static void hci_prepare_suspend(struct work_struct *work) -{ - struct hci_dev *hdev = - container_of(work, struct hci_dev, suspend_prepare); - - hci_dev_lock(hdev); - hci_req_prepare_suspend(hdev, hdev->suspend_state_next); - hci_dev_unlock(hdev); -} - -static int hci_change_suspend_state(struct hci_dev *hdev, - enum suspended_state next) -{ - hdev->suspend_state_next = next; - set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks); - queue_work(hdev->req_workqueue, &hdev->suspend_prepare); - return hci_suspend_wait_event(hdev); -} - static void hci_clear_wake_reason(struct hci_dev *hdev) { hci_dev_lock(hdev); @@ -3745,7 +2510,8 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) INIT_WORK(&hdev->tx_work, hci_tx_work); INIT_WORK(&hdev->power_on, hci_power_on); INIT_WORK(&hdev->error_reset, hci_error_reset); - INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend); + + hci_cmd_sync_init(hdev); INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); @@ -3754,7 +2520,6 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) skb_queue_head_init(&hdev->raw_q); init_waitqueue_head(&hdev->req_wait_q); - init_waitqueue_head(&hdev->suspend_wait_q); INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout); INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout); @@ -3882,6 +2647,7 @@ int hci_register_dev(struct hci_dev *hdev) return id; err_wqueue: + debugfs_remove_recursive(hdev->debugfs); destroy_workqueue(hdev->workqueue); destroy_workqueue(hdev->req_workqueue); err: @@ -3904,11 +2670,10 @@ void hci_unregister_dev(struct hci_dev *hdev) cancel_work_sync(&hdev->power_on); - if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) { - hci_suspend_clear_tasks(hdev); + hci_cmd_sync_clear(hdev); + + if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) unregister_pm_notifier(&hdev->suspend_notifier); - cancel_work_sync(&hdev->suspend_prepare); - } msft_unregister(hdev); @@ -3975,7 +2740,6 @@ EXPORT_SYMBOL(hci_release_dev); int hci_suspend_dev(struct hci_dev *hdev) { int ret; - u8 state = BT_RUNNING; bt_dev_dbg(hdev, ""); @@ -3984,40 +2748,17 @@ int hci_suspend_dev(struct hci_dev *hdev) hci_dev_test_flag(hdev, HCI_UNREGISTER)) return 0; - /* If powering down, wait for completion. */ - if (mgmt_powering_down(hdev)) { - set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks); - ret = hci_suspend_wait_event(hdev); - if (ret) - goto done; - } - - /* Suspend consists of two actions: - * - First, disconnect everything and make the controller not - * connectable (disabling scanning) - * - Second, program event filter/accept list and enable scan - */ - ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT); - if (ret) - goto clear; - - state = BT_SUSPEND_DISCONNECT; + /* If powering down don't attempt to suspend */ + if (mgmt_powering_down(hdev)) + return 0; - /* Only configure accept list if device may wakeup. */ - if (hdev->wakeup && hdev->wakeup(hdev)) { - ret = hci_change_suspend_state(hdev, BT_SUSPEND_CONFIGURE_WAKE); - if (!ret) - state = BT_SUSPEND_CONFIGURE_WAKE; - } + hci_req_sync_lock(hdev); + ret = hci_suspend_sync(hdev); + hci_req_sync_unlock(hdev); -clear: hci_clear_wake_reason(hdev); - mgmt_suspending(hdev, state); + mgmt_suspending(hdev, hdev->suspend_state); -done: - /* We always allow suspend even if suspend preparation failed and - * attempt to recover in resume. - */ hci_sock_dev_event(hdev, HCI_DEV_SUSPEND); return ret; } @@ -4039,10 +2780,12 @@ int hci_resume_dev(struct hci_dev *hdev) if (mgmt_powering_down(hdev)) return 0; - ret = hci_change_suspend_state(hdev, BT_RUNNING); + hci_req_sync_lock(hdev); + ret = hci_resume_sync(hdev); + hci_req_sync_unlock(hdev); mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr, - hdev->wake_addr_type); + hdev->wake_addr_type); hci_sock_dev_event(hdev, HCI_DEV_RESUME); return ret; @@ -4270,25 +3013,6 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; } -/* Send HCI command and wait for command complete event */ -struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, - const void *param, u32 timeout) -{ - struct sk_buff *skb; - - if (!test_bit(HCI_UP, &hdev->flags)) - return ERR_PTR(-ENETDOWN); - - bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); - - hci_req_sync_lock(hdev); - skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout); - hci_req_sync_unlock(hdev); - - return skb; -} -EXPORT_SYMBOL(hci_cmd_sync); - /* Send ACL data */ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) { diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 7d0db1ca1248..efc5458b1345 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -545,9 +545,7 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) hdev->features[1][0] &= ~LMP_HOST_SSP; } - if (hci_dev_test_flag(hdev, HCI_MGMT)) - mgmt_ssp_enable_complete(hdev, sent->mode, status); - else if (!status) { + if (!status) { if (sent->mode) hci_dev_set_flag(hdev, HCI_SSP_ENABLED); else @@ -1239,6 +1237,55 @@ static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, hci_dev_unlock(hdev); } +static void hci_cc_le_remove_adv_set(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u8 status = *((__u8 *)skb->data); + u8 *instance; + int err; + + if (status) + return; + + instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET); + if (!instance) + return; + + hci_dev_lock(hdev); + + err = hci_remove_adv_instance(hdev, *instance); + if (!err) + mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev, + *instance); + + hci_dev_unlock(hdev); +} + +static void hci_cc_le_clear_adv_sets(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u8 status = *((__u8 *)skb->data); + struct adv_info *adv, *n; + int err; + + if (status) + return; + + if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS)) + return; + + hci_dev_lock(hdev); + + list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { + u8 instance = adv->instance; + + err = hci_remove_adv_instance(hdev, instance); + if (!err) + mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), + hdev, instance); + } + + hci_dev_unlock(hdev); +} + static void hci_cc_le_read_transmit_power(struct hci_dev *hdev, struct sk_buff *skb) { @@ -1326,8 +1373,10 @@ static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, &conn->le_conn_timeout, conn->conn_timeout); } else { - if (adv) { - adv->enabled = false; + if (cp->num_of_sets) { + if (adv) + adv->enabled = false; + /* If just one instance was disabled check if there are * any other instance enabled before clearing HCI_LE_ADV */ @@ -1463,16 +1512,10 @@ static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable) /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we * interrupted scanning due to a connect request. Mark - * therefore discovery as stopped. If this was not - * because of a connect request advertising might have - * been disabled because of active scanning, so - * re-enable it again if necessary. + * therefore discovery as stopped. */ if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED)) hci_discovery_set_state(hdev, DISCOVERY_STOPPED); - else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) && - hdev->discovery.state == DISCOVERY_FINDING) - hci_req_reenable_advertising(hdev); break; @@ -2371,9 +2414,14 @@ static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) { struct hci_cp_disconnect *cp; + struct hci_conn_params *params; struct hci_conn *conn; + bool mgmt_conn; - if (!status) + /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended + * otherwise cleanup the connection immediately. + */ + if (!status && !hdev->suspended) return; cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); @@ -2383,23 +2431,60 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); - if (conn) { + if (!conn) + goto unlock; + + if (status) { mgmt_disconnect_failed(hdev, &conn->dst, conn->type, conn->dst_type, status); if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { hdev->cur_adv_instance = conn->adv_instance; - hci_req_reenable_advertising(hdev); + hci_enable_advertising(hdev); } - /* If the disconnection failed for any reason, the upper layer - * does not retry to disconnect in current implementation. - * Hence, we need to do some basic cleanup here and re-enable - * advertising if necessary. - */ - hci_conn_del(conn); + goto done; + } + + mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); + + if (conn->type == ACL_LINK) { + if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) + hci_remove_link_key(hdev, &conn->dst); + } + + params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); + if (params) { + switch (params->auto_connect) { + case HCI_AUTO_CONN_LINK_LOSS: + if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT) + break; + fallthrough; + + case HCI_AUTO_CONN_DIRECT: + case HCI_AUTO_CONN_ALWAYS: + list_del_init(¶ms->action); + list_add(¶ms->action, &hdev->pend_le_conns); + break; + + default: + break; + } } + mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, + cp->reason, mgmt_conn); + + hci_disconn_cfm(conn, cp->reason); + +done: + /* If the disconnection failed for any reason, the upper layer + * does not retry to disconnect in current implementation. + * Hence, we need to do some basic cleanup here and re-enable + * advertising if necessary. + */ + hci_conn_del(conn); +unlock: hci_dev_unlock(hdev); } @@ -2977,7 +3062,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) case HCI_AUTO_CONN_ALWAYS: list_del_init(¶ms->action); list_add(¶ms->action, &hdev->pend_le_conns); - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); break; default: @@ -2987,14 +3072,6 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_disconn_cfm(conn, ev->reason); - /* The suspend notifier is waiting for all devices to disconnect so - * clear the bit from pending tasks and inform the wait queue. - */ - if (list_empty(&hdev->conn_hash.list) && - test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) { - wake_up(&hdev->suspend_wait_q); - } - /* Re-enable advertising if necessary, since it might * have been disabled by the connection. From the * HCI_LE_Set_Advertise_Enable command description in @@ -3007,7 +3084,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) */ if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { hdev->cur_adv_instance = conn->adv_instance; - hci_req_reenable_advertising(hdev); + hci_enable_advertising(hdev); } hci_conn_del(conn); @@ -3723,6 +3800,14 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, hci_cc_le_set_adv_set_random_addr(hdev, skb); break; + case HCI_OP_LE_REMOVE_ADV_SET: + hci_cc_le_remove_adv_set(hdev, skb); + break; + + case HCI_OP_LE_CLEAR_ADV_SETS: + hci_cc_le_clear_adv_sets(hdev, skb); + break; + case HCI_OP_LE_READ_TRANSMIT_POWER: hci_cc_le_read_transmit_power(hdev, skb); break; @@ -4445,7 +4530,6 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, { struct hci_ev_sync_conn_complete *ev = (void *) skb->data; struct hci_conn *conn; - unsigned int notify_evt; BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); @@ -4517,22 +4601,18 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, } bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode); - - switch (ev->air_mode) { - case 0x02: - notify_evt = HCI_NOTIFY_ENABLE_SCO_CVSD; - break; - case 0x03: - notify_evt = HCI_NOTIFY_ENABLE_SCO_TRANSP; - break; - } - /* Notify only in case of SCO over HCI transport data path which * is zero and non-zero value shall be non-HCI transport data path */ - if (conn->codec.data_path == 0) { - if (hdev->notify) - hdev->notify(hdev, notify_evt); + if (conn->codec.data_path == 0 && hdev->notify) { + switch (ev->air_mode) { + case 0x02: + hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); + break; + case 0x03: + hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP); + break; + } } hci_connect_cfm(conn, ev->status); @@ -5412,7 +5492,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, } unlock: - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); hci_dev_unlock(hdev); } @@ -5441,23 +5521,30 @@ static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, le16_to_cpu(ev->interval), le16_to_cpu(ev->latency), le16_to_cpu(ev->supervision_timeout)); - - if (use_ll_privacy(hdev) && - hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && - hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) - hci_req_disable_address_resolution(hdev); } static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data; struct hci_conn *conn; - struct adv_info *adv; + struct adv_info *adv, *n; BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); adv = hci_find_adv_instance(hdev, ev->handle); + /* The Bluetooth Core 5.3 specification clearly states that this event + * shall not be sent when the Host disables the advertising set. So in + * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event. + * + * When the Host disables an advertising set, all cleanup is done via + * its command callback and not needed to be duplicated here. + */ + if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) { + bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event"); + return; + } + if (ev->status) { if (!adv) return; @@ -5466,6 +5553,13 @@ static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_remove_adv_instance(hdev, ev->handle); mgmt_advertising_removed(NULL, hdev, ev->handle); + list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { + if (adv->enabled) + return; + } + + /* We are no longer advertising, clear HCI_LE_ADV */ + hci_dev_clear_flag(hdev, HCI_LE_ADV); return; } @@ -5529,8 +5623,9 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND) return NULL; - /* Ignore if the device is blocked */ - if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type)) + /* Ignore if the device is blocked or hdev is suspended */ + if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) || + hdev->suspended) return NULL; /* Most controller will fail if we try to create new connections @@ -5825,7 +5920,8 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) struct hci_ev_le_advertising_info *ev = ptr; s8 rssi; - if (ev->length <= HCI_MAX_AD_LENGTH) { + if (ev->length <= HCI_MAX_AD_LENGTH && + ev->data + ev->length <= skb_tail_pointer(skb)) { rssi = ev->data[ev->length]; process_adv_report(hdev, ev->evt_type, &ev->bdaddr, ev->bdaddr_type, NULL, 0, rssi, @@ -5835,6 +5931,11 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) } ptr += sizeof(*ev) + ev->length + 1; + + if (ptr > (void *) skb_tail_pointer(skb) - sizeof(*ev)) { + bt_dev_err(hdev, "Malicious advertising data. Stopping processing"); + break; + } } hci_dev_unlock(hdev); diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 92611bfc0b9e..8b3205e4b23e 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -32,10 +32,6 @@ #include "msft.h" #include "eir.h" -#define HCI_REQ_DONE 0 -#define HCI_REQ_PEND 1 -#define HCI_REQ_CANCELED 2 - void hci_req_init(struct hci_request *req, struct hci_dev *hdev) { skb_queue_head_init(&req->cmd_q); @@ -101,8 +97,8 @@ int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete) return req_run(req, NULL, complete); } -static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, - struct sk_buff *skb) +void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, + struct sk_buff *skb) { bt_dev_dbg(hdev, "result 0x%2.2x", result); @@ -126,70 +122,6 @@ void hci_req_sync_cancel(struct hci_dev *hdev, int err) } } -struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, - const void *param, u8 event, u32 timeout) -{ - struct hci_request req; - struct sk_buff *skb; - int err = 0; - - bt_dev_dbg(hdev, ""); - - hci_req_init(&req, hdev); - - hci_req_add_ev(&req, opcode, plen, param, event); - - hdev->req_status = HCI_REQ_PEND; - - err = hci_req_run_skb(&req, hci_req_sync_complete); - if (err < 0) - return ERR_PTR(err); - - err = wait_event_interruptible_timeout(hdev->req_wait_q, - hdev->req_status != HCI_REQ_PEND, timeout); - - if (err == -ERESTARTSYS) - return ERR_PTR(-EINTR); - - switch (hdev->req_status) { - case HCI_REQ_DONE: - err = -bt_to_errno(hdev->req_result); - break; - - case HCI_REQ_CANCELED: - err = -hdev->req_result; - break; - - default: - err = -ETIMEDOUT; - break; - } - - hdev->req_status = hdev->req_result = 0; - skb = hdev->req_skb; - hdev->req_skb = NULL; - - bt_dev_dbg(hdev, "end: err %d", err); - - if (err < 0) { - kfree_skb(skb); - return ERR_PTR(err); - } - - if (!skb) - return ERR_PTR(-ENODATA); - - return skb; -} -EXPORT_SYMBOL(__hci_cmd_sync_ev); - -struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, - const void *param, u32 timeout) -{ - return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout); -} -EXPORT_SYMBOL(__hci_cmd_sync); - /* Execute request and wait for completion. */ int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, unsigned long opt), @@ -436,82 +368,6 @@ static bool __hci_update_interleaved_scan(struct hci_dev *hdev) return false; } -/* This function controls the background scanning based on hdev->pend_le_conns - * list. If there are pending LE connection we start the background scanning, - * otherwise we stop it. - * - * This function requires the caller holds hdev->lock. - */ -static void __hci_update_background_scan(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - - if (!test_bit(HCI_UP, &hdev->flags) || - test_bit(HCI_INIT, &hdev->flags) || - hci_dev_test_flag(hdev, HCI_SETUP) || - hci_dev_test_flag(hdev, HCI_CONFIG) || - hci_dev_test_flag(hdev, HCI_AUTO_OFF) || - hci_dev_test_flag(hdev, HCI_UNREGISTER)) - return; - - /* No point in doing scanning if LE support hasn't been enabled */ - if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) - return; - - /* If discovery is active don't interfere with it */ - if (hdev->discovery.state != DISCOVERY_STOPPED) - return; - - /* Reset RSSI and UUID filters when starting background scanning - * since these filters are meant for service discovery only. - * - * The Start Discovery and Start Service Discovery operations - * ensure to set proper values for RSSI threshold and UUID - * filter list. So it is safe to just reset them here. - */ - hci_discovery_filter_clear(hdev); - - bt_dev_dbg(hdev, "ADV monitoring is %s", - hci_is_adv_monitoring(hdev) ? "on" : "off"); - - if (list_empty(&hdev->pend_le_conns) && - list_empty(&hdev->pend_le_reports) && - !hci_is_adv_monitoring(hdev)) { - /* If there is no pending LE connections or devices - * to be scanned for or no ADV monitors, we should stop the - * background scanning. - */ - - /* If controller is not scanning we are done. */ - if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) - return; - - hci_req_add_le_scan_disable(req, false); - - bt_dev_dbg(hdev, "stopping background scanning"); - } else { - /* If there is at least one pending LE connection, we should - * keep the background scan running. - */ - - /* If controller is connecting, we should not start scanning - * since some controllers are not able to scan and connect at - * the same time. - */ - if (hci_lookup_le_connect(hdev)) - return; - - /* If controller is currently scanning, we stop it to ensure we - * don't miss any advertising (due to duplicates filter). - */ - if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) - hci_req_add_le_scan_disable(req, false); - - hci_req_add_le_passive_scan(req); - bt_dev_dbg(hdev, "starting background scanning"); - } -} - void __hci_req_update_name(struct hci_request *req) { struct hci_dev *hdev = req->hdev; @@ -560,9 +416,6 @@ void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn) return; } - if (hdev->suspended) - set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); - if (use_ext_scan(hdev)) { struct hci_cp_le_set_ext_scan_enable cp; @@ -579,9 +432,7 @@ void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn) } /* Disable address resolution */ - if (use_ll_privacy(hdev) && - hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && - hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) { + if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) { __u8 enable = 0x00; hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); @@ -600,8 +451,7 @@ static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr, cp.bdaddr_type); hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp); - if (use_ll_privacy(req->hdev) && - hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) { + if (use_ll_privacy(req->hdev)) { struct smp_irk *irk; irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type); @@ -654,8 +504,7 @@ static int add_to_accept_list(struct hci_request *req, cp.bdaddr_type); hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp); - if (use_ll_privacy(hdev) && - hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) { + if (use_ll_privacy(hdev)) { struct smp_irk *irk; irk = hci_find_irk_by_addr(hdev, ¶ms->addr, @@ -694,8 +543,7 @@ static u8 update_accept_list(struct hci_request *req) */ bool allow_rpa = hdev->suspended; - if (use_ll_privacy(hdev) && - hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) + if (use_ll_privacy(hdev)) allow_rpa = true; /* Go through the current accept list programmed into the @@ -784,9 +632,7 @@ static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, return; } - if (use_ll_privacy(hdev) && - hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && - addr_resolv) { + if (use_ll_privacy(hdev) && addr_resolv) { u8 enable = 0x01; hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); @@ -943,8 +789,6 @@ void hci_req_add_le_passive_scan(struct hci_request *req) if (hdev->suspended) { window = hdev->le_scan_window_suspend; interval = hdev->le_scan_int_suspend; - - set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks); } else if (hci_is_le_conn_scanning(hdev)) { window = hdev->le_scan_window_connect; interval = hdev->le_scan_int_connect; @@ -977,59 +821,6 @@ void hci_req_add_le_passive_scan(struct hci_request *req) addr_resolv); } -static void hci_req_clear_event_filter(struct hci_request *req) -{ - struct hci_cp_set_event_filter f; - - if (!hci_dev_test_flag(req->hdev, HCI_BREDR_ENABLED)) - return; - - if (hci_dev_test_flag(req->hdev, HCI_EVENT_FILTER_CONFIGURED)) { - memset(&f, 0, sizeof(f)); - f.flt_type = HCI_FLT_CLEAR_ALL; - hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f); - } -} - -static void hci_req_set_event_filter(struct hci_request *req) -{ - struct bdaddr_list_with_flags *b; - struct hci_cp_set_event_filter f; - struct hci_dev *hdev = req->hdev; - u8 scan = SCAN_DISABLED; - bool scanning = test_bit(HCI_PSCAN, &hdev->flags); - - if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) - return; - - /* Always clear event filter when starting */ - hci_req_clear_event_filter(req); - - list_for_each_entry(b, &hdev->accept_list, list) { - if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP, - b->current_flags)) - continue; - - memset(&f, 0, sizeof(f)); - bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr); - f.flt_type = HCI_FLT_CONN_SETUP; - f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR; - f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON; - - bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr); - hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f); - scan = SCAN_PAGE; - } - - if (scan && !scanning) { - set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks); - hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); - } else if (!scan && scanning) { - set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); - hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); - } -} - static void cancel_adv_timeout(struct hci_dev *hdev) { if (hdev->adv_instance_timeout) { @@ -1088,185 +879,6 @@ int hci_req_resume_adv_instances(struct hci_dev *hdev) return hci_req_run(&req, NULL); } -static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode) -{ - bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode, - status); - if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) || - test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) { - clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks); - clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); - wake_up(&hdev->suspend_wait_q); - } - - if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) { - clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks); - wake_up(&hdev->suspend_wait_q); - } -} - -static void hci_req_prepare_adv_monitor_suspend(struct hci_request *req, - bool suspending) -{ - struct hci_dev *hdev = req->hdev; - - switch (hci_get_adv_monitor_offload_ext(hdev)) { - case HCI_ADV_MONITOR_EXT_MSFT: - if (suspending) - msft_suspend(hdev); - else - msft_resume(hdev); - break; - default: - return; - } - - /* No need to block when enabling since it's on resume path */ - if (hdev->suspended && suspending) - set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks); -} - -/* Call with hci_dev_lock */ -void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next) -{ - int old_state; - struct hci_conn *conn; - struct hci_request req; - u8 page_scan; - int disconnect_counter; - - if (next == hdev->suspend_state) { - bt_dev_dbg(hdev, "Same state before and after: %d", next); - goto done; - } - - hdev->suspend_state = next; - hci_req_init(&req, hdev); - - if (next == BT_SUSPEND_DISCONNECT) { - /* Mark device as suspended */ - hdev->suspended = true; - - /* Pause discovery if not already stopped */ - old_state = hdev->discovery.state; - if (old_state != DISCOVERY_STOPPED) { - set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks); - hci_discovery_set_state(hdev, DISCOVERY_STOPPING); - queue_work(hdev->req_workqueue, &hdev->discov_update); - } - - hdev->discovery_paused = true; - hdev->discovery_old_state = old_state; - - /* Stop directed advertising */ - old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING); - if (old_state) { - set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks); - cancel_delayed_work(&hdev->discov_off); - queue_delayed_work(hdev->req_workqueue, - &hdev->discov_off, 0); - } - - /* Pause other advertisements */ - if (hdev->adv_instance_cnt) - __hci_req_pause_adv_instances(&req); - - hdev->advertising_paused = true; - hdev->advertising_old_state = old_state; - - /* Disable page scan if enabled */ - if (test_bit(HCI_PSCAN, &hdev->flags)) { - page_scan = SCAN_DISABLED; - hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, - &page_scan); - set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); - } - - /* Disable LE passive scan if enabled */ - if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { - cancel_interleave_scan(hdev); - hci_req_add_le_scan_disable(&req, false); - } - - /* Disable advertisement filters */ - hci_req_prepare_adv_monitor_suspend(&req, true); - - /* Prevent disconnects from causing scanning to be re-enabled */ - hdev->scanning_paused = true; - - /* Run commands before disconnecting */ - hci_req_run(&req, suspend_req_complete); - - disconnect_counter = 0; - /* Soft disconnect everything (power off) */ - list_for_each_entry(conn, &hdev->conn_hash.list, list) { - hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF); - disconnect_counter++; - } - - if (disconnect_counter > 0) { - bt_dev_dbg(hdev, - "Had %d disconnects. Will wait on them", - disconnect_counter); - set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks); - } - } else if (next == BT_SUSPEND_CONFIGURE_WAKE) { - /* Unpause to take care of updating scanning params */ - hdev->scanning_paused = false; - /* Enable event filter for paired devices */ - hci_req_set_event_filter(&req); - /* Enable passive scan at lower duty cycle */ - __hci_update_background_scan(&req); - /* Pause scan changes again. */ - hdev->scanning_paused = true; - hci_req_run(&req, suspend_req_complete); - } else { - hdev->suspended = false; - hdev->scanning_paused = false; - - /* Clear any event filters and restore scan state */ - hci_req_clear_event_filter(&req); - __hci_req_update_scan(&req); - - /* Reset passive/background scanning to normal */ - __hci_update_background_scan(&req); - /* Enable all of the advertisement filters */ - hci_req_prepare_adv_monitor_suspend(&req, false); - - /* Unpause directed advertising */ - hdev->advertising_paused = false; - if (hdev->advertising_old_state) { - set_bit(SUSPEND_UNPAUSE_ADVERTISING, - hdev->suspend_tasks); - hci_dev_set_flag(hdev, HCI_ADVERTISING); - queue_work(hdev->req_workqueue, - &hdev->discoverable_update); - hdev->advertising_old_state = 0; - } - - /* Resume other advertisements */ - if (hdev->adv_instance_cnt) - __hci_req_resume_adv_instances(&req); - - /* Unpause discovery */ - hdev->discovery_paused = false; - if (hdev->discovery_old_state != DISCOVERY_STOPPED && - hdev->discovery_old_state != DISCOVERY_STOPPING) { - set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks); - hci_discovery_set_state(hdev, DISCOVERY_STARTING); - queue_work(hdev->req_workqueue, &hdev->discov_update); - } - - hci_req_run(&req, suspend_req_complete); - } - - hdev->suspend_state = next; - -done: - clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks); - wake_up(&hdev->suspend_wait_q); -} - static bool adv_cur_instance_is_scannable(struct hci_dev *hdev) { return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance); @@ -1548,8 +1160,7 @@ void hci_req_disable_address_resolution(struct hci_dev *hdev) struct hci_request req; __u8 enable = 0x00; - if (!use_ll_privacy(hdev) && - !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) + if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) return; hci_req_init(&req, hdev); @@ -1692,8 +1303,7 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, /* If Controller supports LL Privacy use own address type is * 0x03 */ - if (use_ll_privacy(hdev) && - hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) + if (use_ll_privacy(hdev)) *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; else *own_addr_type = ADDR_LE_DEV_RANDOM; @@ -1871,7 +1481,8 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp); - if (own_addr_type == ADDR_LE_DEV_RANDOM && + if ((own_addr_type == ADDR_LE_DEV_RANDOM || + own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) && bacmp(&random_addr, BDADDR_ANY)) { struct hci_cp_le_set_adv_set_rand_addr cp; @@ -2160,8 +1771,7 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy, /* If Controller supports LL Privacy use own address type is * 0x03 */ - if (use_ll_privacy(hdev) && - hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) + if (use_ll_privacy(hdev)) *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; else *own_addr_type = ADDR_LE_DEV_RANDOM; @@ -2301,47 +1911,6 @@ static void scan_update_work(struct work_struct *work) hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL); } -static int connectable_update(struct hci_request *req, unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - - hci_dev_lock(hdev); - - __hci_req_update_scan(req); - - /* If BR/EDR is not enabled and we disable advertising as a - * by-product of disabling connectable, we need to update the - * advertising flags. - */ - if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) - __hci_req_update_adv_data(req, hdev->cur_adv_instance); - - /* Update the advertising parameters if necessary */ - if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || - !list_empty(&hdev->adv_instances)) { - if (ext_adv_capable(hdev)) - __hci_req_start_ext_adv(req, hdev->cur_adv_instance); - else - __hci_req_enable_advertising(req); - } - - __hci_update_background_scan(req); - - hci_dev_unlock(hdev); - - return 0; -} - -static void connectable_update_work(struct work_struct *work) -{ - struct hci_dev *hdev = container_of(work, struct hci_dev, - connectable_update); - u8 status; - - hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status); - mgmt_set_connectable_complete(hdev, status); -} - static u8 get_service_classes(struct hci_dev *hdev) { struct bt_uuid *uuid; @@ -2445,16 +2014,6 @@ static int discoverable_update(struct hci_request *req, unsigned long opt) return 0; } -static void discoverable_update_work(struct work_struct *work) -{ - struct hci_dev *hdev = container_of(work, struct hci_dev, - discoverable_update); - u8 status; - - hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status); - mgmt_set_discoverable_complete(hdev, status); -} - void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn, u8 reason) { @@ -2548,35 +2107,6 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason) return 0; } -static int update_bg_scan(struct hci_request *req, unsigned long opt) -{ - hci_dev_lock(req->hdev); - __hci_update_background_scan(req); - hci_dev_unlock(req->hdev); - return 0; -} - -static void bg_scan_update(struct work_struct *work) -{ - struct hci_dev *hdev = container_of(work, struct hci_dev, - bg_scan_update); - struct hci_conn *conn; - u8 status; - int err; - - err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status); - if (!err) - return; - - hci_dev_lock(hdev); - - conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); - if (conn) - hci_le_conn_failed(conn, status); - - hci_dev_unlock(hdev); -} - static int le_scan_disable(struct hci_request *req, unsigned long opt) { hci_req_add_le_scan_disable(req, false); @@ -3163,10 +2693,7 @@ int __hci_req_hci_power_on(struct hci_dev *hdev) void hci_request_setup(struct hci_dev *hdev) { INIT_WORK(&hdev->discov_update, discov_update); - INIT_WORK(&hdev->bg_scan_update, bg_scan_update); INIT_WORK(&hdev->scan_update, scan_update_work); - INIT_WORK(&hdev->connectable_update, connectable_update_work); - INIT_WORK(&hdev->discoverable_update, discoverable_update_work); INIT_DELAYED_WORK(&hdev->discov_off, discov_off); INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work); INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work); @@ -3179,10 +2706,7 @@ void hci_request_cancel_all(struct hci_dev *hdev) hci_req_sync_cancel(hdev, ENODEV); cancel_work_sync(&hdev->discov_update); - cancel_work_sync(&hdev->bg_scan_update); cancel_work_sync(&hdev->scan_update); - cancel_work_sync(&hdev->connectable_update); - cancel_work_sync(&hdev->discoverable_update); cancel_delayed_work_sync(&hdev->discov_off); cancel_delayed_work_sync(&hdev->le_scan_disable); cancel_delayed_work_sync(&hdev->le_scan_restart); diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h index f31420f58525..5f8e8846ec74 100644 --- a/net/bluetooth/hci_request.h +++ b/net/bluetooth/hci_request.h @@ -22,9 +22,17 @@ #include <asm/unaligned.h> +#define HCI_REQ_DONE 0 +#define HCI_REQ_PEND 1 +#define HCI_REQ_CANCELED 2 + #define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock) #define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock) +#define HCI_REQ_DONE 0 +#define HCI_REQ_PEND 1 +#define HCI_REQ_CANCELED 2 + struct hci_request { struct hci_dev *hdev; struct sk_buff_head cmd_q; @@ -40,6 +48,8 @@ void hci_req_purge(struct hci_request *req); bool hci_req_status_pend(struct hci_dev *hdev); int hci_req_run(struct hci_request *req, hci_req_complete_t complete); int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete); +void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, + struct sk_buff *skb); void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, const void *param); void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, @@ -117,10 +127,5 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason); void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn, u8 reason); -static inline void hci_update_background_scan(struct hci_dev *hdev) -{ - queue_work(hdev->req_workqueue, &hdev->bg_scan_update); -} - void hci_request_setup(struct hci_dev *hdev); void hci_request_cancel_all(struct hci_dev *hdev); diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index d0dad1fafe07..446573a12571 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -889,10 +889,6 @@ static int hci_sock_release(struct socket *sock) } sock_orphan(sk); - - skb_queue_purge(&sk->sk_receive_queue); - skb_queue_purge(&sk->sk_write_queue); - release_sock(sk); sock_put(sk); return 0; @@ -2058,6 +2054,12 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, return err; } +static void hci_sock_destruct(struct sock *sk) +{ + skb_queue_purge(&sk->sk_receive_queue); + skb_queue_purge(&sk->sk_write_queue); +} + static const struct proto_ops hci_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, @@ -2111,6 +2113,7 @@ static int hci_sock_create(struct net *net, struct socket *sock, int protocol, sock->state = SS_UNCONNECTED; sk->sk_state = BT_OPEN; + sk->sk_destruct = hci_sock_destruct; bt_sock_link(&hci_sk_list, sk); return 0; diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c new file mode 100644 index 000000000000..ad86caf41f91 --- /dev/null +++ b/net/bluetooth/hci_sync.c @@ -0,0 +1,4922 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * BlueZ - Bluetooth protocol stack for Linux + * + * Copyright (C) 2021 Intel Corporation + */ + +#include <linux/property.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/mgmt.h> + +#include "hci_request.h" +#include "hci_debugfs.h" +#include "smp.h" +#include "eir.h" +#include "msft.h" +#include "aosp.h" +#include "leds.h" + +static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, + struct sk_buff *skb) +{ + bt_dev_dbg(hdev, "result 0x%2.2x", result); + + if (hdev->req_status != HCI_REQ_PEND) + return; + + hdev->req_result = result; + hdev->req_status = HCI_REQ_DONE; + + if (skb) { + struct sock *sk = hci_skb_sk(skb); + + /* Drop sk reference if set */ + if (sk) + sock_put(sk); + + hdev->req_skb = skb_get(skb); + } + + wake_up_interruptible(&hdev->req_wait_q); +} + +static struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, + u32 plen, const void *param, + struct sock *sk) +{ + int len = HCI_COMMAND_HDR_SIZE + plen; + struct hci_command_hdr *hdr; + struct sk_buff *skb; + + skb = bt_skb_alloc(len, GFP_ATOMIC); + if (!skb) + return NULL; + + hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE); + hdr->opcode = cpu_to_le16(opcode); + hdr->plen = plen; + + if (plen) + skb_put_data(skb, param, plen); + + bt_dev_dbg(hdev, "skb len %d", skb->len); + + hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; + hci_skb_opcode(skb) = opcode; + + /* Grab a reference if command needs to be associated with a sock (e.g. + * likely mgmt socket that initiated the command). + */ + if (sk) { + hci_skb_sk(skb) = sk; + sock_hold(sk); + } + + return skb; +} + +static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen, + const void *param, u8 event, struct sock *sk) +{ + struct hci_dev *hdev = req->hdev; + struct sk_buff *skb; + + bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); + + /* If an error occurred during request building, there is no point in + * queueing the HCI command. We can simply return. + */ + if (req->err) + return; + + skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk); + if (!skb) { + bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", + opcode); + req->err = -ENOMEM; + return; + } + + if (skb_queue_empty(&req->cmd_q)) + bt_cb(skb)->hci.req_flags |= HCI_REQ_START; + + bt_cb(skb)->hci.req_event = event; + + skb_queue_tail(&req->cmd_q, skb); +} + +static int hci_cmd_sync_run(struct hci_request *req) +{ + struct hci_dev *hdev = req->hdev; + struct sk_buff *skb; + unsigned long flags; + + bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q)); + + /* If an error occurred during request building, remove all HCI + * commands queued on the HCI request queue. + */ + if (req->err) { + skb_queue_purge(&req->cmd_q); + return req->err; + } + + /* Do not allow empty requests */ + if (skb_queue_empty(&req->cmd_q)) + return -ENODATA; + + skb = skb_peek_tail(&req->cmd_q); + bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete; + bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB; + + spin_lock_irqsave(&hdev->cmd_q.lock, flags); + skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); + spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); + + queue_work(hdev->workqueue, &hdev->cmd_work); + + return 0; +} + +/* This function requires the caller holds hdev->req_lock. */ +struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, u8 event, u32 timeout, + struct sock *sk) +{ + struct hci_request req; + struct sk_buff *skb; + int err = 0; + + bt_dev_dbg(hdev, "Opcode 0x%4x", opcode); + + hci_req_init(&req, hdev); + + hci_cmd_sync_add(&req, opcode, plen, param, event, sk); + + hdev->req_status = HCI_REQ_PEND; + + err = hci_cmd_sync_run(&req); + if (err < 0) + return ERR_PTR(err); + + err = wait_event_interruptible_timeout(hdev->req_wait_q, + hdev->req_status != HCI_REQ_PEND, + timeout); + + if (err == -ERESTARTSYS) + return ERR_PTR(-EINTR); + + switch (hdev->req_status) { + case HCI_REQ_DONE: + err = -bt_to_errno(hdev->req_result); + break; + + case HCI_REQ_CANCELED: + err = -hdev->req_result; + break; + + default: + err = -ETIMEDOUT; + break; + } + + hdev->req_status = 0; + hdev->req_result = 0; + skb = hdev->req_skb; + hdev->req_skb = NULL; + + bt_dev_dbg(hdev, "end: err %d", err); + + if (err < 0) { + kfree_skb(skb); + return ERR_PTR(err); + } + + return skb; +} +EXPORT_SYMBOL(__hci_cmd_sync_sk); + +/* This function requires the caller holds hdev->req_lock. */ +struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, u32 timeout) +{ + return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL); +} +EXPORT_SYMBOL(__hci_cmd_sync); + +/* Send HCI command and wait for command complete event */ +struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, u32 timeout) +{ + struct sk_buff *skb; + + if (!test_bit(HCI_UP, &hdev->flags)) + return ERR_PTR(-ENETDOWN); + + bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); + + hci_req_sync_lock(hdev); + skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout); + hci_req_sync_unlock(hdev); + + return skb; +} +EXPORT_SYMBOL(hci_cmd_sync); + +/* This function requires the caller holds hdev->req_lock. */ +struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, u8 event, u32 timeout) +{ + return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, + NULL); +} +EXPORT_SYMBOL(__hci_cmd_sync_ev); + +/* This function requires the caller holds hdev->req_lock. */ +int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, u8 event, u32 timeout, + struct sock *sk) +{ + struct sk_buff *skb; + u8 status; + + skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode, + PTR_ERR(skb)); + return PTR_ERR(skb); + } + + /* If command return a status event skb will be set to NULL as there are + * no parameters, in case of failure IS_ERR(skb) would have be set to + * the actual error would be found with PTR_ERR(skb). + */ + if (!skb) + return 0; + + status = skb->data[0]; + + kfree_skb(skb); + + return status; +} +EXPORT_SYMBOL(__hci_cmd_sync_status_sk); + +int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, u32 timeout) +{ + return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout, + NULL); +} +EXPORT_SYMBOL(__hci_cmd_sync_status); + +static void hci_cmd_sync_work(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work); + struct hci_cmd_sync_work_entry *entry; + hci_cmd_sync_work_func_t func; + hci_cmd_sync_work_destroy_t destroy; + void *data; + + bt_dev_dbg(hdev, ""); + + mutex_lock(&hdev->cmd_sync_work_lock); + entry = list_first_entry(&hdev->cmd_sync_work_list, + struct hci_cmd_sync_work_entry, list); + if (entry) { + list_del(&entry->list); + func = entry->func; + data = entry->data; + destroy = entry->destroy; + kfree(entry); + } else { + func = NULL; + data = NULL; + destroy = NULL; + } + mutex_unlock(&hdev->cmd_sync_work_lock); + + if (func) { + int err; + + hci_req_sync_lock(hdev); + + err = func(hdev, data); + + if (destroy) + destroy(hdev, data, err); + + hci_req_sync_unlock(hdev); + } +} + +void hci_cmd_sync_init(struct hci_dev *hdev) +{ + INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work); + INIT_LIST_HEAD(&hdev->cmd_sync_work_list); + mutex_init(&hdev->cmd_sync_work_lock); +} + +void hci_cmd_sync_clear(struct hci_dev *hdev) +{ + struct hci_cmd_sync_work_entry *entry, *tmp; + + cancel_work_sync(&hdev->cmd_sync_work); + + list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) { + if (entry->destroy) + entry->destroy(hdev, entry->data, -ECANCELED); + + list_del(&entry->list); + kfree(entry); + } +} + +int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + struct hci_cmd_sync_work_entry *entry; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + entry->func = func; + entry->data = data; + entry->destroy = destroy; + + mutex_lock(&hdev->cmd_sync_work_lock); + list_add_tail(&entry->list, &hdev->cmd_sync_work_list); + mutex_unlock(&hdev->cmd_sync_work_lock); + + queue_work(hdev->req_workqueue, &hdev->cmd_sync_work); + + return 0; +} +EXPORT_SYMBOL(hci_cmd_sync_queue); + +int hci_update_eir_sync(struct hci_dev *hdev) +{ + struct hci_cp_write_eir cp; + + bt_dev_dbg(hdev, ""); + + if (!hdev_is_powered(hdev)) + return 0; + + if (!lmp_ext_inq_capable(hdev)) + return 0; + + if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) + return 0; + + if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) + return 0; + + memset(&cp, 0, sizeof(cp)); + + eir_create(hdev, cp.data); + + if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) + return 0; + + memcpy(hdev->eir, cp.data, sizeof(cp.data)); + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp, + HCI_CMD_TIMEOUT); +} + +static u8 get_service_classes(struct hci_dev *hdev) +{ + struct bt_uuid *uuid; + u8 val = 0; + + list_for_each_entry(uuid, &hdev->uuids, list) + val |= uuid->svc_hint; + + return val; +} + +int hci_update_class_sync(struct hci_dev *hdev) +{ + u8 cod[3]; + + bt_dev_dbg(hdev, ""); + + if (!hdev_is_powered(hdev)) + return 0; + + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + return 0; + + if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) + return 0; + + cod[0] = hdev->minor_class; + cod[1] = hdev->major_class; + cod[2] = get_service_classes(hdev); + + if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) + cod[1] |= 0x20; + + if (memcmp(cod, hdev->dev_class, 3) == 0) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CLASS_OF_DEV, + sizeof(cod), cod, HCI_CMD_TIMEOUT); +} + +static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable) +{ + /* If there is no connection we are OK to advertise. */ + if (hci_conn_num(hdev, LE_LINK) == 0) + return true; + + /* Check le_states if there is any connection in peripheral role. */ + if (hdev->conn_hash.le_num_peripheral > 0) { + /* Peripheral connection state and non connectable mode + * bit 20. + */ + if (!connectable && !(hdev->le_states[2] & 0x10)) + return false; + + /* Peripheral connection state and connectable mode bit 38 + * and scannable bit 21. + */ + if (connectable && (!(hdev->le_states[4] & 0x40) || + !(hdev->le_states[2] & 0x20))) + return false; + } + + /* Check le_states if there is any connection in central role. */ + if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) { + /* Central connection state and non connectable mode bit 18. */ + if (!connectable && !(hdev->le_states[2] & 0x02)) + return false; + + /* Central connection state and connectable mode bit 35 and + * scannable 19. + */ + if (connectable && (!(hdev->le_states[4] & 0x08) || + !(hdev->le_states[2] & 0x08))) + return false; + } + + return true; +} + +static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags) +{ + /* If privacy is not enabled don't use RPA */ + if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) + return false; + + /* If basic privacy mode is enabled use RPA */ + if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) + return true; + + /* If limited privacy mode is enabled don't use RPA if we're + * both discoverable and bondable. + */ + if ((flags & MGMT_ADV_FLAG_DISCOV) && + hci_dev_test_flag(hdev, HCI_BONDABLE)) + return false; + + /* We're neither bondable nor discoverable in the limited + * privacy mode, therefore use RPA. + */ + return true; +} + +static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa) +{ + /* If we're advertising or initiating an LE connection we can't + * go ahead and change the random address at this time. This is + * because the eventual initiator address used for the + * subsequently created connection will be undefined (some + * controllers use the new address and others the one we had + * when the operation started). + * + * In this kind of scenario skip the update and let the random + * address be updated at the next cycle. + */ + if (hci_dev_test_flag(hdev, HCI_LE_ADV) || + hci_lookup_le_connect(hdev)) { + bt_dev_dbg(hdev, "Deferring random address update"); + hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); + return 0; + } + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RANDOM_ADDR, + 6, rpa, HCI_CMD_TIMEOUT); +} + +int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy, + bool rpa, u8 *own_addr_type) +{ + int err; + + /* If privacy is enabled use a resolvable private address. If + * current RPA has expired or there is something else than + * the current RPA in use, then generate a new one. + */ + if (rpa) { + /* If Controller supports LL Privacy use own address type is + * 0x03 + */ + if (use_ll_privacy(hdev)) + *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; + else + *own_addr_type = ADDR_LE_DEV_RANDOM; + + /* Check if RPA is valid */ + if (rpa_valid(hdev)) + return 0; + + err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); + if (err < 0) { + bt_dev_err(hdev, "failed to generate new RPA"); + return err; + } + + err = hci_set_random_addr_sync(hdev, &hdev->rpa); + if (err) + return err; + + return 0; + } + + /* In case of required privacy without resolvable private address, + * use an non-resolvable private address. This is useful for active + * scanning and non-connectable advertising. + */ + if (require_privacy) { + bdaddr_t nrpa; + + while (true) { + /* The non-resolvable private address is generated + * from random six bytes with the two most significant + * bits cleared. + */ + get_random_bytes(&nrpa, 6); + nrpa.b[5] &= 0x3f; + + /* The non-resolvable private address shall not be + * equal to the public address. + */ + if (bacmp(&hdev->bdaddr, &nrpa)) + break; + } + + *own_addr_type = ADDR_LE_DEV_RANDOM; + + return hci_set_random_addr_sync(hdev, &nrpa); + } + + /* If forcing static address is in use or there is no public + * address use the static address as random address (but skip + * the HCI command if the current random address is already the + * static one. + * + * In case BR/EDR has been disabled on a dual-mode controller + * and a static address has been configured, then use that + * address instead of the public BR/EDR address. + */ + if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || + !bacmp(&hdev->bdaddr, BDADDR_ANY) || + (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && + bacmp(&hdev->static_addr, BDADDR_ANY))) { + *own_addr_type = ADDR_LE_DEV_RANDOM; + if (bacmp(&hdev->static_addr, &hdev->random_addr)) + return hci_set_random_addr_sync(hdev, + &hdev->static_addr); + return 0; + } + + /* Neither privacy nor static address is being used so use a + * public address. + */ + *own_addr_type = ADDR_LE_DEV_PUBLIC; + + return 0; +} + +static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) +{ + struct hci_cp_le_set_ext_adv_enable *cp; + struct hci_cp_ext_adv_set *set; + u8 data[sizeof(*cp) + sizeof(*set) * 1]; + u8 size; + + /* If request specifies an instance that doesn't exist, fail */ + if (instance > 0) { + struct adv_info *adv; + + adv = hci_find_adv_instance(hdev, instance); + if (!adv) + return -EINVAL; + + /* If not enabled there is nothing to do */ + if (!adv->enabled) + return 0; + } + + memset(data, 0, sizeof(data)); + + cp = (void *)data; + set = (void *)cp->data; + + /* Instance 0x00 indicates all advertising instances will be disabled */ + cp->num_of_sets = !!instance; + cp->enable = 0x00; + + set->handle = instance; + + size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, + size, data, HCI_CMD_TIMEOUT); +} + +static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance, + bdaddr_t *random_addr) +{ + struct hci_cp_le_set_adv_set_rand_addr cp; + int err; + + if (!instance) { + /* Instance 0x00 doesn't have an adv_info, instead it uses + * hdev->random_addr to track its address so whenever it needs + * to be updated this also set the random address since + * hdev->random_addr is shared with scan state machine. + */ + err = hci_set_random_addr_sync(hdev, random_addr); + if (err) + return err; + } + + memset(&cp, 0, sizeof(cp)); + + cp.handle = instance; + bacpy(&cp.bdaddr, random_addr); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) +{ + struct hci_cp_le_set_ext_adv_params cp; + bool connectable; + u32 flags; + bdaddr_t random_addr; + u8 own_addr_type; + int err; + struct adv_info *adv; + bool secondary_adv; + + if (instance > 0) { + adv = hci_find_adv_instance(hdev, instance); + if (!adv) + return -EINVAL; + } else { + adv = NULL; + } + + /* Updating parameters of an active instance will return a + * Command Disallowed error, so we must first disable the + * instance if it is active. + */ + if (adv && !adv->pending) { + err = hci_disable_ext_adv_instance_sync(hdev, instance); + if (err) + return err; + } + + flags = hci_adv_instance_flags(hdev, instance); + + /* If the "connectable" instance flag was not set, then choose between + * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. + */ + connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || + mgmt_get_connectable(hdev); + + if (!is_advertising_allowed(hdev, connectable)) + return -EPERM; + + /* Set require_privacy to true only when non-connectable + * advertising is used. In that case it is fine to use a + * non-resolvable private address. + */ + err = hci_get_random_address(hdev, !connectable, + adv_use_rpa(hdev, flags), adv, + &own_addr_type, &random_addr); + if (err < 0) + return err; + + memset(&cp, 0, sizeof(cp)); + + if (adv) { + hci_cpu_to_le24(adv->min_interval, cp.min_interval); + hci_cpu_to_le24(adv->max_interval, cp.max_interval); + cp.tx_power = adv->tx_power; + } else { + hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval); + hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval); + cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE; + } + + secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK); + + if (connectable) { + if (secondary_adv) + cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND); + else + cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND); + } else if (hci_adv_instance_is_scannable(hdev, instance) || + (flags & MGMT_ADV_PARAM_SCAN_RSP)) { + if (secondary_adv) + cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND); + else + cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND); + } else { + if (secondary_adv) + cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND); + else + cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND); + } + + /* If Own_Address_Type equals 0x02 or 0x03, the Peer_Address parameter + * contains the peer’s Identity Address and the Peer_Address_Type + * parameter contains the peer’s Identity Type (i.e., 0x00 or 0x01). + * These parameters are used to locate the corresponding local IRK in + * the resolving list; this IRK is used to generate their own address + * used in the advertisement. + */ + if (own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) + hci_copy_identity_address(hdev, &cp.peer_addr, + &cp.peer_addr_type); + + cp.own_addr_type = own_addr_type; + cp.channel_map = hdev->le_adv_channel_map; + cp.handle = instance; + + if (flags & MGMT_ADV_FLAG_SEC_2M) { + cp.primary_phy = HCI_ADV_PHY_1M; + cp.secondary_phy = HCI_ADV_PHY_2M; + } else if (flags & MGMT_ADV_FLAG_SEC_CODED) { + cp.primary_phy = HCI_ADV_PHY_CODED; + cp.secondary_phy = HCI_ADV_PHY_CODED; + } else { + /* In all other cases use 1M */ + cp.primary_phy = HCI_ADV_PHY_1M; + cp.secondary_phy = HCI_ADV_PHY_1M; + } + + err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + if (err) + return err; + + if ((own_addr_type == ADDR_LE_DEV_RANDOM || + own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) && + bacmp(&random_addr, BDADDR_ANY)) { + /* Check if random address need to be updated */ + if (adv) { + if (!bacmp(&random_addr, &adv->random_addr)) + return 0; + } else { + if (!bacmp(&random_addr, &hdev->random_addr)) + return 0; + } + + return hci_set_adv_set_random_addr_sync(hdev, instance, + &random_addr); + } + + return 0; +} + +static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) +{ + struct { + struct hci_cp_le_set_ext_scan_rsp_data cp; + u8 data[HCI_MAX_EXT_AD_LENGTH]; + } pdu; + u8 len; + + memset(&pdu, 0, sizeof(pdu)); + + len = eir_create_scan_rsp(hdev, instance, pdu.data); + + if (hdev->scan_rsp_data_len == len && + !memcmp(pdu.data, hdev->scan_rsp_data, len)) + return 0; + + memcpy(hdev->scan_rsp_data, pdu.data, len); + hdev->scan_rsp_data_len = len; + + pdu.cp.handle = instance; + pdu.cp.length = len; + pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; + pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, + sizeof(pdu.cp) + len, &pdu.cp, + HCI_CMD_TIMEOUT); +} + +static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) +{ + struct hci_cp_le_set_scan_rsp_data cp; + u8 len; + + memset(&cp, 0, sizeof(cp)); + + len = eir_create_scan_rsp(hdev, instance, cp.data); + + if (hdev->scan_rsp_data_len == len && + !memcmp(cp.data, hdev->scan_rsp_data, len)) + return 0; + + memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); + hdev->scan_rsp_data_len = len; + + cp.length = len; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_RSP_DATA, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) +{ + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + return 0; + + if (ext_adv_capable(hdev)) + return hci_set_ext_scan_rsp_data_sync(hdev, instance); + + return __hci_set_scan_rsp_data_sync(hdev, instance); +} + +int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance) +{ + struct hci_cp_le_set_ext_adv_enable *cp; + struct hci_cp_ext_adv_set *set; + u8 data[sizeof(*cp) + sizeof(*set) * 1]; + struct adv_info *adv; + + if (instance > 0) { + adv = hci_find_adv_instance(hdev, instance); + if (!adv) + return -EINVAL; + /* If already enabled there is nothing to do */ + if (adv->enabled) + return 0; + } else { + adv = NULL; + } + + cp = (void *)data; + set = (void *)cp->data; + + memset(cp, 0, sizeof(*cp)); + + cp->enable = 0x01; + cp->num_of_sets = 0x01; + + memset(set, 0, sizeof(*set)); + + set->handle = instance; + + /* Set duration per instance since controller is responsible for + * scheduling it. + */ + if (adv && adv->timeout) { + u16 duration = adv->timeout * MSEC_PER_SEC; + + /* Time = N * 10 ms */ + set->duration = cpu_to_le16(duration / 10); + } + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, + sizeof(*cp) + + sizeof(*set) * cp->num_of_sets, + data, HCI_CMD_TIMEOUT); +} + +int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance) +{ + int err; + + err = hci_setup_ext_adv_instance_sync(hdev, instance); + if (err) + return err; + + err = hci_set_ext_scan_rsp_data_sync(hdev, instance); + if (err) + return err; + + return hci_enable_ext_advertising_sync(hdev, instance); +} + +static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance) +{ + int err; + + if (ext_adv_capable(hdev)) + return hci_start_ext_adv_sync(hdev, instance); + + err = hci_update_adv_data_sync(hdev, instance); + if (err) + return err; + + err = hci_update_scan_rsp_data_sync(hdev, instance); + if (err) + return err; + + return hci_enable_advertising_sync(hdev); +} + +int hci_enable_advertising_sync(struct hci_dev *hdev) +{ + struct adv_info *adv_instance; + struct hci_cp_le_set_adv_param cp; + u8 own_addr_type, enable = 0x01; + bool connectable; + u16 adv_min_interval, adv_max_interval; + u32 flags; + u8 status; + + if (ext_adv_capable(hdev)) + return hci_enable_ext_advertising_sync(hdev, + hdev->cur_adv_instance); + + flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance); + adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance); + + /* If the "connectable" instance flag was not set, then choose between + * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. + */ + connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || + mgmt_get_connectable(hdev); + + if (!is_advertising_allowed(hdev, connectable)) + return -EINVAL; + + status = hci_disable_advertising_sync(hdev); + if (status) + return status; + + /* Clear the HCI_LE_ADV bit temporarily so that the + * hci_update_random_address knows that it's safe to go ahead + * and write a new random address. The flag will be set back on + * as soon as the SET_ADV_ENABLE HCI command completes. + */ + hci_dev_clear_flag(hdev, HCI_LE_ADV); + + /* Set require_privacy to true only when non-connectable + * advertising is used. In that case it is fine to use a + * non-resolvable private address. + */ + status = hci_update_random_address_sync(hdev, !connectable, + adv_use_rpa(hdev, flags), + &own_addr_type); + if (status) + return status; + + memset(&cp, 0, sizeof(cp)); + + if (adv_instance) { + adv_min_interval = adv_instance->min_interval; + adv_max_interval = adv_instance->max_interval; + } else { + adv_min_interval = hdev->le_adv_min_interval; + adv_max_interval = hdev->le_adv_max_interval; + } + + if (connectable) { + cp.type = LE_ADV_IND; + } else { + if (hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance)) + cp.type = LE_ADV_SCAN_IND; + else + cp.type = LE_ADV_NONCONN_IND; + + if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) || + hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { + adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN; + adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX; + } + } + + cp.min_interval = cpu_to_le16(adv_min_interval); + cp.max_interval = cpu_to_le16(adv_max_interval); + cp.own_address_type = own_addr_type; + cp.channel_map = hdev->le_adv_channel_map; + + status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + if (status) + return status; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, + sizeof(enable), &enable, HCI_CMD_TIMEOUT); +} + +static int enable_advertising_sync(struct hci_dev *hdev, void *data) +{ + return hci_enable_advertising_sync(hdev); +} + +int hci_enable_advertising(struct hci_dev *hdev) +{ + if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && + list_empty(&hdev->adv_instances)) + return 0; + + return hci_cmd_sync_queue(hdev, enable_advertising_sync, NULL, NULL); +} + +int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance, + struct sock *sk) +{ + int err; + + if (!ext_adv_capable(hdev)) + return 0; + + err = hci_disable_ext_adv_instance_sync(hdev, instance); + if (err) + return err; + + /* If request specifies an instance that doesn't exist, fail */ + if (instance > 0 && !hci_find_adv_instance(hdev, instance)) + return -EINVAL; + + return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_REMOVE_ADV_SET, + sizeof(instance), &instance, 0, + HCI_CMD_TIMEOUT, sk); +} + +static void cancel_adv_timeout(struct hci_dev *hdev) +{ + if (hdev->adv_instance_timeout) { + hdev->adv_instance_timeout = 0; + cancel_delayed_work(&hdev->adv_instance_expire); + } +} + +static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance) +{ + struct { + struct hci_cp_le_set_ext_adv_data cp; + u8 data[HCI_MAX_EXT_AD_LENGTH]; + } pdu; + u8 len; + + memset(&pdu, 0, sizeof(pdu)); + + len = eir_create_adv_data(hdev, instance, pdu.data); + + /* There's nothing to do if the data hasn't changed */ + if (hdev->adv_data_len == len && + memcmp(pdu.data, hdev->adv_data, len) == 0) + return 0; + + memcpy(hdev->adv_data, pdu.data, len); + hdev->adv_data_len = len; + + pdu.cp.length = len; + pdu.cp.handle = instance; + pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; + pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA, + sizeof(pdu.cp) + len, &pdu.cp, + HCI_CMD_TIMEOUT); +} + +static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance) +{ + struct hci_cp_le_set_adv_data cp; + u8 len; + + memset(&cp, 0, sizeof(cp)); + + len = eir_create_adv_data(hdev, instance, cp.data); + + /* There's nothing to do if the data hasn't changed */ + if (hdev->adv_data_len == len && + memcmp(cp.data, hdev->adv_data, len) == 0) + return 0; + + memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); + hdev->adv_data_len = len; + + cp.length = len; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance) +{ + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + return 0; + + if (ext_adv_capable(hdev)) + return hci_set_ext_adv_data_sync(hdev, instance); + + return hci_set_adv_data_sync(hdev, instance); +} + +int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance, + bool force) +{ + struct adv_info *adv = NULL; + u16 timeout; + + if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && !ext_adv_capable(hdev)) + return -EPERM; + + if (hdev->adv_instance_timeout) + return -EBUSY; + + adv = hci_find_adv_instance(hdev, instance); + if (!adv) + return -ENOENT; + + /* A zero timeout means unlimited advertising. As long as there is + * only one instance, duration should be ignored. We still set a timeout + * in case further instances are being added later on. + * + * If the remaining lifetime of the instance is more than the duration + * then the timeout corresponds to the duration, otherwise it will be + * reduced to the remaining instance lifetime. + */ + if (adv->timeout == 0 || adv->duration <= adv->remaining_time) + timeout = adv->duration; + else + timeout = adv->remaining_time; + + /* The remaining time is being reduced unless the instance is being + * advertised without time limit. + */ + if (adv->timeout) + adv->remaining_time = adv->remaining_time - timeout; + + /* Only use work for scheduling instances with legacy advertising */ + if (!ext_adv_capable(hdev)) { + hdev->adv_instance_timeout = timeout; + queue_delayed_work(hdev->req_workqueue, + &hdev->adv_instance_expire, + msecs_to_jiffies(timeout * 1000)); + } + + /* If we're just re-scheduling the same instance again then do not + * execute any HCI commands. This happens when a single instance is + * being advertised. + */ + if (!force && hdev->cur_adv_instance == instance && + hci_dev_test_flag(hdev, HCI_LE_ADV)) + return 0; + + hdev->cur_adv_instance = instance; + + return hci_start_adv_sync(hdev, instance); +} + +static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk) +{ + int err; + + if (!ext_adv_capable(hdev)) + return 0; + + /* Disable instance 0x00 to disable all instances */ + err = hci_disable_ext_adv_instance_sync(hdev, 0x00); + if (err) + return err; + + return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CLEAR_ADV_SETS, + 0, NULL, 0, HCI_CMD_TIMEOUT, sk); +} + +static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force) +{ + struct adv_info *adv, *n; + + if (ext_adv_capable(hdev)) + /* Remove all existing sets */ + return hci_clear_adv_sets_sync(hdev, sk); + + /* This is safe as long as there is no command send while the lock is + * held. + */ + hci_dev_lock(hdev); + + /* Cleanup non-ext instances */ + list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { + u8 instance = adv->instance; + int err; + + if (!(force || adv->timeout)) + continue; + + err = hci_remove_adv_instance(hdev, instance); + if (!err) + mgmt_advertising_removed(sk, hdev, instance); + } + + hci_dev_unlock(hdev); + + return 0; +} + +static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance, + struct sock *sk) +{ + int err; + + /* If we use extended advertising, instance has to be removed first. */ + if (ext_adv_capable(hdev)) + return hci_remove_ext_adv_instance_sync(hdev, instance, sk); + + /* This is safe as long as there is no command send while the lock is + * held. + */ + hci_dev_lock(hdev); + + err = hci_remove_adv_instance(hdev, instance); + if (!err) + mgmt_advertising_removed(sk, hdev, instance); + + hci_dev_unlock(hdev); + + return err; +} + +/* For a single instance: + * - force == true: The instance will be removed even when its remaining + * lifetime is not zero. + * - force == false: the instance will be deactivated but kept stored unless + * the remaining lifetime is zero. + * + * For instance == 0x00: + * - force == true: All instances will be removed regardless of their timeout + * setting. + * - force == false: Only instances that have a timeout will be removed. + */ +int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk, + u8 instance, bool force) +{ + struct adv_info *next = NULL; + int err; + + /* Cancel any timeout concerning the removed instance(s). */ + if (!instance || hdev->cur_adv_instance == instance) + cancel_adv_timeout(hdev); + + /* Get the next instance to advertise BEFORE we remove + * the current one. This can be the same instance again + * if there is only one instance. + */ + if (hdev->cur_adv_instance == instance) + next = hci_get_next_instance(hdev, instance); + + if (!instance) { + err = hci_clear_adv_sync(hdev, sk, force); + if (err) + return err; + } else { + struct adv_info *adv = hci_find_adv_instance(hdev, instance); + + if (force || (adv && adv->timeout && !adv->remaining_time)) { + /* Don't advertise a removed instance. */ + if (next && next->instance == instance) + next = NULL; + + err = hci_remove_adv_sync(hdev, instance, sk); + if (err) + return err; + } + } + + if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING)) + return 0; + + if (next && !ext_adv_capable(hdev)) + hci_schedule_adv_instance_sync(hdev, next->instance, false); + + return 0; +} + +int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle) +{ + struct hci_cp_read_rssi cp; + + cp.handle = handle; + return __hci_cmd_sync_status(hdev, HCI_OP_READ_RSSI, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLOCK, + sizeof(*cp), cp, HCI_CMD_TIMEOUT); +} + +int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type) +{ + struct hci_cp_read_tx_power cp; + + cp.handle = handle; + cp.type = type; + return __hci_cmd_sync_status(hdev, HCI_OP_READ_TX_POWER, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_disable_advertising_sync(struct hci_dev *hdev) +{ + u8 enable = 0x00; + + /* If controller is not advertising we are done. */ + if (!hci_dev_test_flag(hdev, HCI_LE_ADV)) + return 0; + + if (ext_adv_capable(hdev)) + return hci_disable_ext_adv_instance_sync(hdev, 0x00); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, + sizeof(enable), &enable, HCI_CMD_TIMEOUT); +} + +static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val, + u8 filter_dup) +{ + struct hci_cp_le_set_ext_scan_enable cp; + + memset(&cp, 0, sizeof(cp)); + cp.enable = val; + cp.filter_dup = filter_dup; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, + u8 filter_dup) +{ + struct hci_cp_le_set_scan_enable cp; + + if (use_ext_scan(hdev)) + return hci_le_set_ext_scan_enable_sync(hdev, val, filter_dup); + + memset(&cp, 0, sizeof(cp)); + cp.enable = val; + cp.filter_dup = filter_dup; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val) +{ + if (!use_ll_privacy(hdev)) + return 0; + + /* If controller is not/already resolving we are done. */ + if (val == hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, + sizeof(val), &val, HCI_CMD_TIMEOUT); +} + +static int hci_scan_disable_sync(struct hci_dev *hdev) +{ + int err; + + /* If controller is not scanning we are done. */ + if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) + return 0; + + if (hdev->scanning_paused) { + bt_dev_dbg(hdev, "Scanning is paused for suspend"); + return 0; + } + + err = hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00); + if (err) { + bt_dev_err(hdev, "Unable to disable scanning: %d", err); + return err; + } + + return err; +} + +static bool scan_use_rpa(struct hci_dev *hdev) +{ + return hci_dev_test_flag(hdev, HCI_PRIVACY); +} + +static void hci_start_interleave_scan(struct hci_dev *hdev) +{ + hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; + queue_delayed_work(hdev->req_workqueue, + &hdev->interleave_scan, 0); +} + +static bool is_interleave_scanning(struct hci_dev *hdev) +{ + return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; +} + +static void cancel_interleave_scan(struct hci_dev *hdev) +{ + bt_dev_dbg(hdev, "cancelling interleave scan"); + + cancel_delayed_work_sync(&hdev->interleave_scan); + + hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE; +} + +/* Return true if interleave_scan wasn't started until exiting this function, + * otherwise, return false + */ +static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev) +{ + /* Do interleaved scan only if all of the following are true: + * - There is at least one ADV monitor + * - At least one pending LE connection or one device to be scanned for + * - Monitor offloading is not supported + * If so, we should alternate between allowlist scan and one without + * any filters to save power. + */ + bool use_interleaving = hci_is_adv_monitoring(hdev) && + !(list_empty(&hdev->pend_le_conns) && + list_empty(&hdev->pend_le_reports)) && + hci_get_adv_monitor_offload_ext(hdev) == + HCI_ADV_MONITOR_EXT_NONE; + bool is_interleaving = is_interleave_scanning(hdev); + + if (use_interleaving && !is_interleaving) { + hci_start_interleave_scan(hdev); + bt_dev_dbg(hdev, "starting interleave scan"); + return true; + } + + if (!use_interleaving && is_interleaving) + cancel_interleave_scan(hdev); + + return false; +} + +/* Removes connection to resolve list if needed.*/ +static int hci_le_del_resolve_list_sync(struct hci_dev *hdev, + bdaddr_t *bdaddr, u8 bdaddr_type) +{ + struct hci_cp_le_del_from_resolv_list cp; + struct bdaddr_list_with_irk *entry; + + if (!use_ll_privacy(hdev)) + return 0; + + /* Check if the IRK has been programmed */ + entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, bdaddr, + bdaddr_type); + if (!entry) + return 0; + + cp.bdaddr_type = bdaddr_type; + bacpy(&cp.bdaddr, bdaddr); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_le_del_accept_list_sync(struct hci_dev *hdev, + bdaddr_t *bdaddr, u8 bdaddr_type) +{ + struct hci_cp_le_del_from_accept_list cp; + int err; + + /* Check if device is on accept list before removing it */ + if (!hci_bdaddr_list_lookup(&hdev->le_accept_list, bdaddr, bdaddr_type)) + return 0; + + cp.bdaddr_type = bdaddr_type; + bacpy(&cp.bdaddr, bdaddr); + + /* Ignore errors when removing from resolving list as that is likely + * that the device was never added. + */ + hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type); + + err = __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + if (err) { + bt_dev_err(hdev, "Unable to remove from allow list: %d", err); + return err; + } + + bt_dev_dbg(hdev, "Remove %pMR (0x%x) from allow list", &cp.bdaddr, + cp.bdaddr_type); + + return 0; +} + +/* Adds connection to resolve list if needed. + * Setting params to NULL programs local hdev->irk + */ +static int hci_le_add_resolve_list_sync(struct hci_dev *hdev, + struct hci_conn_params *params) +{ + struct hci_cp_le_add_to_resolv_list cp; + struct smp_irk *irk; + struct bdaddr_list_with_irk *entry; + + if (!use_ll_privacy(hdev)) + return 0; + + /* Attempt to program local identity address, type and irk if params is + * NULL. + */ + if (!params) { + if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) + return 0; + + hci_copy_identity_address(hdev, &cp.bdaddr, &cp.bdaddr_type); + memcpy(cp.peer_irk, hdev->irk, 16); + goto done; + } + + irk = hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type); + if (!irk) + return 0; + + /* Check if the IK has _not_ been programmed yet. */ + entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, + ¶ms->addr, + params->addr_type); + if (entry) + return 0; + + cp.bdaddr_type = params->addr_type; + bacpy(&cp.bdaddr, ¶ms->addr); + memcpy(cp.peer_irk, irk->val, 16); + +done: + if (hci_dev_test_flag(hdev, HCI_PRIVACY)) + memcpy(cp.local_irk, hdev->irk, 16); + else + memset(cp.local_irk, 0, 16); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +/* Adds connection to allow list if needed, if the device uses RPA (has IRK) + * this attempts to program the device in the resolving list as well. + */ +static int hci_le_add_accept_list_sync(struct hci_dev *hdev, + struct hci_conn_params *params, + u8 *num_entries) +{ + struct hci_cp_le_add_to_accept_list cp; + int err; + + /* Already in accept list */ + if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr, + params->addr_type)) + return 0; + + /* Select filter policy to accept all advertising */ + if (*num_entries >= hdev->le_accept_list_size) + return -ENOSPC; + + /* Accept list can not be used with RPAs */ + if (!use_ll_privacy(hdev) && + hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) { + return -EINVAL; + } + + /* During suspend, only wakeable devices can be in acceptlist */ + if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP, + params->current_flags)) + return 0; + + /* Attempt to program the device in the resolving list first to avoid + * having to rollback in case it fails since the resolving list is + * dynamic it can probably be smaller than the accept list. + */ + err = hci_le_add_resolve_list_sync(hdev, params); + if (err) { + bt_dev_err(hdev, "Unable to add to resolve list: %d", err); + return err; + } + + *num_entries += 1; + cp.bdaddr_type = params->addr_type; + bacpy(&cp.bdaddr, ¶ms->addr); + + err = __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + if (err) { + bt_dev_err(hdev, "Unable to add to allow list: %d", err); + /* Rollback the device from the resolving list */ + hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type); + return err; + } + + bt_dev_dbg(hdev, "Add %pMR (0x%x) to allow list", &cp.bdaddr, + cp.bdaddr_type); + + return 0; +} + +/* This function disables/pause all advertising instances */ +static int hci_pause_advertising_sync(struct hci_dev *hdev) +{ + int err; + int old_state; + + /* If there are no instances or advertising has already been paused + * there is nothing to do. + */ + if (!hdev->adv_instance_cnt || hdev->advertising_paused) + return 0; + + bt_dev_dbg(hdev, "Pausing directed advertising"); + + /* Stop directed advertising */ + old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING); + if (old_state) { + /* When discoverable timeout triggers, then just make sure + * the limited discoverable flag is cleared. Even in the case + * of a timeout triggered from general discoverable, it is + * safe to unconditionally clear the flag. + */ + hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); + hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); + hdev->discov_timeout = 0; + } + + bt_dev_dbg(hdev, "Pausing advertising instances"); + + /* Call to disable any advertisements active on the controller. + * This will succeed even if no advertisements are configured. + */ + err = hci_disable_advertising_sync(hdev); + if (err) + return err; + + /* If we are using software rotation, pause the loop */ + if (!ext_adv_capable(hdev)) + cancel_adv_timeout(hdev); + + hdev->advertising_paused = true; + hdev->advertising_old_state = old_state; + + return 0; +} + +/* This function enables all user advertising instances */ +static int hci_resume_advertising_sync(struct hci_dev *hdev) +{ + struct adv_info *adv, *tmp; + int err; + + /* If advertising has not been paused there is nothing to do. */ + if (!hdev->advertising_paused) + return 0; + + /* Resume directed advertising */ + hdev->advertising_paused = false; + if (hdev->advertising_old_state) { + hci_dev_set_flag(hdev, HCI_ADVERTISING); + hdev->advertising_old_state = 0; + } + + bt_dev_dbg(hdev, "Resuming advertising instances"); + + if (ext_adv_capable(hdev)) { + /* Call for each tracked instance to be re-enabled */ + list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) { + err = hci_enable_ext_advertising_sync(hdev, + adv->instance); + if (!err) + continue; + + /* If the instance cannot be resumed remove it */ + hci_remove_ext_adv_instance_sync(hdev, adv->instance, + NULL); + } + } else { + /* Schedule for most recent instance to be restarted and begin + * the software rotation loop + */ + err = hci_schedule_adv_instance_sync(hdev, + hdev->cur_adv_instance, + true); + } + + hdev->advertising_paused = false; + + return err; +} + +struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev, + bool extended, struct sock *sk) +{ + u16 opcode = extended ? HCI_OP_READ_LOCAL_OOB_EXT_DATA : + HCI_OP_READ_LOCAL_OOB_DATA; + + return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk); +} + +/* Device must not be scanning when updating the accept list. + * + * Update is done using the following sequence: + * + * use_ll_privacy((Disable Advertising) -> Disable Resolving List) -> + * Remove Devices From Accept List -> + * (has IRK && use_ll_privacy(Remove Devices From Resolving List))-> + * Add Devices to Accept List -> + * (has IRK && use_ll_privacy(Remove Devices From Resolving List)) -> + * use_ll_privacy(Enable Resolving List -> (Enable Advertising)) -> + * Enable Scanning + * + * In case of failure advertising shall be restored to its original state and + * return would disable accept list since either accept or resolving list could + * not be programmed. + * + */ +static u8 hci_update_accept_list_sync(struct hci_dev *hdev) +{ + struct hci_conn_params *params; + struct bdaddr_list *b, *t; + u8 num_entries = 0; + bool pend_conn, pend_report; + int err; + + /* Pause advertising if resolving list can be used as controllers are + * cannot accept resolving list modifications while advertising. + */ + if (use_ll_privacy(hdev)) { + err = hci_pause_advertising_sync(hdev); + if (err) { + bt_dev_err(hdev, "pause advertising failed: %d", err); + return 0x00; + } + } + + /* Disable address resolution while reprogramming accept list since + * devices that do have an IRK will be programmed in the resolving list + * when LL Privacy is enabled. + */ + err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00); + if (err) { + bt_dev_err(hdev, "Unable to disable LL privacy: %d", err); + goto done; + } + + /* Go through the current accept list programmed into the + * controller one by one and check if that address is still + * in the list of pending connections or list of devices to + * report. If not present in either list, then remove it from + * the controller. + */ + list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) { + pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns, + &b->bdaddr, + b->bdaddr_type); + pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports, + &b->bdaddr, + b->bdaddr_type); + + /* If the device is not likely to connect or report, + * remove it from the acceptlist. + */ + if (!pend_conn && !pend_report) { + hci_le_del_accept_list_sync(hdev, &b->bdaddr, + b->bdaddr_type); + continue; + } + + num_entries++; + } + + /* Since all no longer valid accept list entries have been + * removed, walk through the list of pending connections + * and ensure that any new device gets programmed into + * the controller. + * + * If the list of the devices is larger than the list of + * available accept list entries in the controller, then + * just abort and return filer policy value to not use the + * accept list. + */ + list_for_each_entry(params, &hdev->pend_le_conns, action) { + err = hci_le_add_accept_list_sync(hdev, params, &num_entries); + if (err) + goto done; + } + + /* After adding all new pending connections, walk through + * the list of pending reports and also add these to the + * accept list if there is still space. Abort if space runs out. + */ + list_for_each_entry(params, &hdev->pend_le_reports, action) { + err = hci_le_add_accept_list_sync(hdev, params, &num_entries); + if (err) + goto done; + } + + /* Use the allowlist unless the following conditions are all true: + * - We are not currently suspending + * - There are 1 or more ADV monitors registered and it's not offloaded + * - Interleaved scanning is not currently using the allowlist + */ + if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended && + hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE && + hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST) + err = -EINVAL; + +done: + /* Enable address resolution when LL Privacy is enabled. */ + err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01); + if (err) + bt_dev_err(hdev, "Unable to enable LL privacy: %d", err); + + /* Resume advertising if it was paused */ + if (use_ll_privacy(hdev)) + hci_resume_advertising_sync(hdev); + + /* Select filter policy to use accept list */ + return err ? 0x00 : 0x01; +} + +/* Returns true if an le connection is in the scanning state */ +static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type == LE_LINK && c->state == BT_CONNECT && + test_bit(HCI_CONN_SCANNING, &c->flags)) { + rcu_read_unlock(); + return true; + } + } + + rcu_read_unlock(); + + return false; +} + +static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type, + u16 interval, u16 window, + u8 own_addr_type, u8 filter_policy) +{ + struct hci_cp_le_set_ext_scan_params *cp; + struct hci_cp_le_scan_phy_params *phy; + u8 data[sizeof(*cp) + sizeof(*phy) * 2]; + u8 num_phy = 0; + + cp = (void *)data; + phy = (void *)cp->data; + + memset(data, 0, sizeof(data)); + + cp->own_addr_type = own_addr_type; + cp->filter_policy = filter_policy; + + if (scan_1m(hdev) || scan_2m(hdev)) { + cp->scanning_phys |= LE_SCAN_PHY_1M; + + phy->type = type; + phy->interval = cpu_to_le16(interval); + phy->window = cpu_to_le16(window); + + num_phy++; + phy++; + } + + if (scan_coded(hdev)) { + cp->scanning_phys |= LE_SCAN_PHY_CODED; + + phy->type = type; + phy->interval = cpu_to_le16(interval); + phy->window = cpu_to_le16(window); + + num_phy++; + phy++; + } + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS, + sizeof(*cp) + sizeof(*phy) * num_phy, + data, HCI_CMD_TIMEOUT); +} + +static int hci_le_set_scan_param_sync(struct hci_dev *hdev, u8 type, + u16 interval, u16 window, + u8 own_addr_type, u8 filter_policy) +{ + struct hci_cp_le_set_scan_param cp; + + if (use_ext_scan(hdev)) + return hci_le_set_ext_scan_param_sync(hdev, type, interval, + window, own_addr_type, + filter_policy); + + memset(&cp, 0, sizeof(cp)); + cp.type = type; + cp.interval = cpu_to_le16(interval); + cp.window = cpu_to_le16(window); + cp.own_address_type = own_addr_type; + cp.filter_policy = filter_policy; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_PARAM, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_start_scan_sync(struct hci_dev *hdev, u8 type, u16 interval, + u16 window, u8 own_addr_type, u8 filter_policy, + u8 filter_dup) +{ + int err; + + if (hdev->scanning_paused) { + bt_dev_dbg(hdev, "Scanning is paused for suspend"); + return 0; + } + + err = hci_le_set_scan_param_sync(hdev, type, interval, window, + own_addr_type, filter_policy); + if (err) + return err; + + return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, filter_dup); +} + +static int hci_passive_scan_sync(struct hci_dev *hdev) +{ + u8 own_addr_type; + u8 filter_policy; + u16 window, interval; + int err; + + if (hdev->scanning_paused) { + bt_dev_dbg(hdev, "Scanning is paused for suspend"); + return 0; + } + + err = hci_scan_disable_sync(hdev); + if (err) { + bt_dev_err(hdev, "disable scanning failed: %d", err); + return err; + } + + /* Set require_privacy to false since no SCAN_REQ are send + * during passive scanning. Not using an non-resolvable address + * here is important so that peer devices using direct + * advertising with our address will be correctly reported + * by the controller. + */ + if (hci_update_random_address_sync(hdev, false, scan_use_rpa(hdev), + &own_addr_type)) + return 0; + + if (hdev->enable_advmon_interleave_scan && + hci_update_interleaved_scan_sync(hdev)) + return 0; + + bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state); + + /* Adding or removing entries from the accept list must + * happen before enabling scanning. The controller does + * not allow accept list modification while scanning. + */ + filter_policy = hci_update_accept_list_sync(hdev); + + /* When the controller is using random resolvable addresses and + * with that having LE privacy enabled, then controllers with + * Extended Scanner Filter Policies support can now enable support + * for handling directed advertising. + * + * So instead of using filter polices 0x00 (no acceptlist) + * and 0x01 (acceptlist enabled) use the new filter policies + * 0x02 (no acceptlist) and 0x03 (acceptlist enabled). + */ + if (hci_dev_test_flag(hdev, HCI_PRIVACY) && + (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) + filter_policy |= 0x02; + + if (hdev->suspended) { + window = hdev->le_scan_window_suspend; + interval = hdev->le_scan_int_suspend; + } else if (hci_is_le_conn_scanning(hdev)) { + window = hdev->le_scan_window_connect; + interval = hdev->le_scan_int_connect; + } else if (hci_is_adv_monitoring(hdev)) { + window = hdev->le_scan_window_adv_monitor; + interval = hdev->le_scan_int_adv_monitor; + } else { + window = hdev->le_scan_window; + interval = hdev->le_scan_interval; + } + + bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy); + + return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window, + own_addr_type, filter_policy, + LE_SCAN_FILTER_DUP_ENABLE); +} + +/* This function controls the passive scanning based on hdev->pend_le_conns + * list. If there are pending LE connection we start the background scanning, + * otherwise we stop it in the following sequence: + * + * If there are devices to scan: + * + * Disable Scanning -> Update Accept List -> + * use_ll_privacy((Disable Advertising) -> Disable Resolving List -> + * Update Resolving List -> Enable Resolving List -> (Enable Advertising)) -> + * Enable Scanning + * + * Otherwise: + * + * Disable Scanning + */ +int hci_update_passive_scan_sync(struct hci_dev *hdev) +{ + int err; + + if (!test_bit(HCI_UP, &hdev->flags) || + test_bit(HCI_INIT, &hdev->flags) || + hci_dev_test_flag(hdev, HCI_SETUP) || + hci_dev_test_flag(hdev, HCI_CONFIG) || + hci_dev_test_flag(hdev, HCI_AUTO_OFF) || + hci_dev_test_flag(hdev, HCI_UNREGISTER)) + return 0; + + /* No point in doing scanning if LE support hasn't been enabled */ + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + return 0; + + /* If discovery is active don't interfere with it */ + if (hdev->discovery.state != DISCOVERY_STOPPED) + return 0; + + /* Reset RSSI and UUID filters when starting background scanning + * since these filters are meant for service discovery only. + * + * The Start Discovery and Start Service Discovery operations + * ensure to set proper values for RSSI threshold and UUID + * filter list. So it is safe to just reset them here. + */ + hci_discovery_filter_clear(hdev); + + bt_dev_dbg(hdev, "ADV monitoring is %s", + hci_is_adv_monitoring(hdev) ? "on" : "off"); + + if (list_empty(&hdev->pend_le_conns) && + list_empty(&hdev->pend_le_reports) && + !hci_is_adv_monitoring(hdev)) { + /* If there is no pending LE connections or devices + * to be scanned for or no ADV monitors, we should stop the + * background scanning. + */ + + bt_dev_dbg(hdev, "stopping background scanning"); + + err = hci_scan_disable_sync(hdev); + if (err) + bt_dev_err(hdev, "stop background scanning failed: %d", + err); + } else { + /* If there is at least one pending LE connection, we should + * keep the background scan running. + */ + + /* If controller is connecting, we should not start scanning + * since some controllers are not able to scan and connect at + * the same time. + */ + if (hci_lookup_le_connect(hdev)) + return 0; + + bt_dev_dbg(hdev, "start background scanning"); + + err = hci_passive_scan_sync(hdev); + if (err) + bt_dev_err(hdev, "start background scanning failed: %d", + err); + } + + return err; +} + +static int update_passive_scan_sync(struct hci_dev *hdev, void *data) +{ + return hci_update_passive_scan_sync(hdev); +} + +int hci_update_passive_scan(struct hci_dev *hdev) +{ + /* Only queue if it would have any effect */ + if (!test_bit(HCI_UP, &hdev->flags) || + test_bit(HCI_INIT, &hdev->flags) || + hci_dev_test_flag(hdev, HCI_SETUP) || + hci_dev_test_flag(hdev, HCI_CONFIG) || + hci_dev_test_flag(hdev, HCI_AUTO_OFF) || + hci_dev_test_flag(hdev, HCI_UNREGISTER)) + return 0; + + return hci_cmd_sync_queue(hdev, update_passive_scan_sync, NULL, NULL); +} + +int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val) +{ + int err; + + if (!bredr_sc_enabled(hdev) || lmp_host_sc_capable(hdev)) + return 0; + + err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT, + sizeof(val), &val, HCI_CMD_TIMEOUT); + + if (!err) { + if (val) { + hdev->features[1][0] |= LMP_HOST_SC; + hci_dev_set_flag(hdev, HCI_SC_ENABLED); + } else { + hdev->features[1][0] &= ~LMP_HOST_SC; + hci_dev_clear_flag(hdev, HCI_SC_ENABLED); + } + } + + return err; +} + +int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode) +{ + int err; + + if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) || + lmp_host_ssp_capable(hdev)) + return 0; + + if (!mode && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) { + __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, + sizeof(mode), &mode, HCI_CMD_TIMEOUT); + } + + err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE, + sizeof(mode), &mode, HCI_CMD_TIMEOUT); + if (err) + return err; + + return hci_write_sc_support_sync(hdev, 0x01); +} + +int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul) +{ + struct hci_cp_write_le_host_supported cp; + + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) || + !lmp_bredr_capable(hdev)) + return 0; + + /* Check first if we already have the right host state + * (host features set) + */ + if (le == lmp_host_le_capable(hdev) && + simul == lmp_host_le_br_capable(hdev)) + return 0; + + memset(&cp, 0, sizeof(cp)); + + cp.le = le; + cp.simul = simul; + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_powered_update_adv_sync(struct hci_dev *hdev) +{ + struct adv_info *adv, *tmp; + int err; + + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + return 0; + + /* If RPA Resolution has not been enable yet it means the + * resolving list is empty and we should attempt to program the + * local IRK in order to support using own_addr_type + * ADDR_LE_DEV_RANDOM_RESOLVED (0x03). + */ + if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) { + hci_le_add_resolve_list_sync(hdev, NULL); + hci_le_set_addr_resolution_enable_sync(hdev, 0x01); + } + + /* Make sure the controller has a good default for + * advertising data. This also applies to the case + * where BR/EDR was toggled during the AUTO_OFF phase. + */ + if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || + list_empty(&hdev->adv_instances)) { + if (ext_adv_capable(hdev)) { + err = hci_setup_ext_adv_instance_sync(hdev, 0x00); + if (!err) + hci_update_scan_rsp_data_sync(hdev, 0x00); + } else { + err = hci_update_adv_data_sync(hdev, 0x00); + if (!err) + hci_update_scan_rsp_data_sync(hdev, 0x00); + } + + if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) + hci_enable_advertising_sync(hdev); + } + + /* Call for each tracked instance to be scheduled */ + list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) + hci_schedule_adv_instance_sync(hdev, adv->instance, true); + + return 0; +} + +static int hci_write_auth_enable_sync(struct hci_dev *hdev) +{ + u8 link_sec; + + link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY); + if (link_sec == test_bit(HCI_AUTH, &hdev->flags)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE, + sizeof(link_sec), &link_sec, + HCI_CMD_TIMEOUT); +} + +int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable) +{ + struct hci_cp_write_page_scan_activity cp; + u8 type; + int err = 0; + + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + return 0; + + if (hdev->hci_ver < BLUETOOTH_VER_1_2) + return 0; + + memset(&cp, 0, sizeof(cp)); + + if (enable) { + type = PAGE_SCAN_TYPE_INTERLACED; + + /* 160 msec page scan interval */ + cp.interval = cpu_to_le16(0x0100); + } else { + type = hdev->def_page_scan_type; + cp.interval = cpu_to_le16(hdev->def_page_scan_int); + } + + cp.window = cpu_to_le16(hdev->def_page_scan_window); + + if (__cpu_to_le16(hdev->page_scan_interval) != cp.interval || + __cpu_to_le16(hdev->page_scan_window) != cp.window) { + err = __hci_cmd_sync_status(hdev, + HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + if (err) + return err; + } + + if (hdev->page_scan_type != type) + err = __hci_cmd_sync_status(hdev, + HCI_OP_WRITE_PAGE_SCAN_TYPE, + sizeof(type), &type, + HCI_CMD_TIMEOUT); + + return err; +} + +static bool disconnected_accept_list_entries(struct hci_dev *hdev) +{ + struct bdaddr_list *b; + + list_for_each_entry(b, &hdev->accept_list, list) { + struct hci_conn *conn; + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr); + if (!conn) + return true; + + if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) + return true; + } + + return false; +} + +static int hci_write_scan_enable_sync(struct hci_dev *hdev, u8 val) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE, + sizeof(val), &val, + HCI_CMD_TIMEOUT); +} + +int hci_update_scan_sync(struct hci_dev *hdev) +{ + u8 scan; + + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + return 0; + + if (!hdev_is_powered(hdev)) + return 0; + + if (mgmt_powering_down(hdev)) + return 0; + + if (hdev->scanning_paused) + return 0; + + if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) || + disconnected_accept_list_entries(hdev)) + scan = SCAN_PAGE; + else + scan = SCAN_DISABLED; + + if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) + scan |= SCAN_INQUIRY; + + if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) && + test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY)) + return 0; + + return hci_write_scan_enable_sync(hdev, scan); +} + +int hci_update_name_sync(struct hci_dev *hdev) +{ + struct hci_cp_write_local_name cp; + + memset(&cp, 0, sizeof(cp)); + + memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME, + sizeof(cp), &cp, + HCI_CMD_TIMEOUT); +} + +/* This function perform powered update HCI command sequence after the HCI init + * sequence which end up resetting all states, the sequence is as follows: + * + * HCI_SSP_ENABLED(Enable SSP) + * HCI_LE_ENABLED(Enable LE) + * HCI_LE_ENABLED(use_ll_privacy(Add local IRK to Resolving List) -> + * Update adv data) + * Enable Authentication + * lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class -> + * Set Name -> Set EIR) + */ +int hci_powered_update_sync(struct hci_dev *hdev) +{ + int err; + + /* Register the available SMP channels (BR/EDR and LE) only when + * successfully powering on the controller. This late + * registration is required so that LE SMP can clearly decide if + * the public address or static address is used. + */ + smp_register(hdev); + + err = hci_write_ssp_mode_sync(hdev, 0x01); + if (err) + return err; + + err = hci_write_le_host_supported_sync(hdev, 0x01, 0x00); + if (err) + return err; + + err = hci_powered_update_adv_sync(hdev); + if (err) + return err; + + err = hci_write_auth_enable_sync(hdev); + if (err) + return err; + + if (lmp_bredr_capable(hdev)) { + if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) + hci_write_fast_connectable_sync(hdev, true); + else + hci_write_fast_connectable_sync(hdev, false); + hci_update_scan_sync(hdev); + hci_update_class_sync(hdev); + hci_update_name_sync(hdev); + hci_update_eir_sync(hdev); + } + + return 0; +} + +/** + * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address + * (BD_ADDR) for a HCI device from + * a firmware node property. + * @hdev: The HCI device + * + * Search the firmware node for 'local-bd-address'. + * + * All-zero BD addresses are rejected, because those could be properties + * that exist in the firmware tables, but were not updated by the firmware. For + * example, the DTS could define 'local-bd-address', with zero BD addresses. + */ +static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev) +{ + struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent); + bdaddr_t ba; + int ret; + + ret = fwnode_property_read_u8_array(fwnode, "local-bd-address", + (u8 *)&ba, sizeof(ba)); + if (ret < 0 || !bacmp(&ba, BDADDR_ANY)) + return; + + bacpy(&hdev->public_addr, &ba); +} + +struct hci_init_stage { + int (*func)(struct hci_dev *hdev); +}; + +/* Run init stage NULL terminated function table */ +static int hci_init_stage_sync(struct hci_dev *hdev, + const struct hci_init_stage *stage) +{ + size_t i; + + for (i = 0; stage[i].func; i++) { + int err; + + err = stage[i].func(hdev); + if (err) + return err; + } + + return 0; +} + +/* Read Local Version */ +static int hci_read_local_version_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_VERSION, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read BD Address */ +static int hci_read_bd_addr_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_BD_ADDR, + 0, NULL, HCI_CMD_TIMEOUT); +} + +#define HCI_INIT(_func) \ +{ \ + .func = _func, \ +} + +static const struct hci_init_stage hci_init0[] = { + /* HCI_OP_READ_LOCAL_VERSION */ + HCI_INIT(hci_read_local_version_sync), + /* HCI_OP_READ_BD_ADDR */ + HCI_INIT(hci_read_bd_addr_sync), + {} +}; + +int hci_reset_sync(struct hci_dev *hdev) +{ + int err; + + set_bit(HCI_RESET, &hdev->flags); + + err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL, + HCI_CMD_TIMEOUT); + if (err) + return err; + + return 0; +} + +static int hci_init0_sync(struct hci_dev *hdev) +{ + int err; + + bt_dev_dbg(hdev, ""); + + /* Reset */ + if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { + err = hci_reset_sync(hdev); + if (err) + return err; + } + + return hci_init_stage_sync(hdev, hci_init0); +} + +static int hci_unconf_init_sync(struct hci_dev *hdev) +{ + int err; + + if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) + return 0; + + err = hci_init0_sync(hdev); + if (err < 0) + return err; + + if (hci_dev_test_flag(hdev, HCI_SETUP)) + hci_debugfs_create_basic(hdev); + + return 0; +} + +/* Read Local Supported Features. */ +static int hci_read_local_features_sync(struct hci_dev *hdev) +{ + /* Not all AMP controllers support this command */ + if (hdev->dev_type == HCI_AMP && !(hdev->commands[14] & 0x20)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* BR Controller init stage 1 command sequence */ +static const struct hci_init_stage br_init1[] = { + /* HCI_OP_READ_LOCAL_FEATURES */ + HCI_INIT(hci_read_local_features_sync), + /* HCI_OP_READ_LOCAL_VERSION */ + HCI_INIT(hci_read_local_version_sync), + /* HCI_OP_READ_BD_ADDR */ + HCI_INIT(hci_read_bd_addr_sync), + {} +}; + +/* Read Local Commands */ +static int hci_read_local_cmds_sync(struct hci_dev *hdev) +{ + /* All Bluetooth 1.2 and later controllers should support the + * HCI command for reading the local supported commands. + * + * Unfortunately some controllers indicate Bluetooth 1.2 support, + * but do not have support for this command. If that is the case, + * the driver can quirk the behavior and skip reading the local + * supported commands. + */ + if (hdev->hci_ver > BLUETOOTH_VER_1_1 && + !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks)) + return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS, + 0, NULL, HCI_CMD_TIMEOUT); + + return 0; +} + +/* Read Local AMP Info */ +static int hci_read_local_amp_info_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_AMP_INFO, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read Data Blk size */ +static int hci_read_data_block_size_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read Flow Control Mode */ +static int hci_read_flow_control_mode_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read Location Data */ +static int hci_read_location_data_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCATION_DATA, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* AMP Controller init stage 1 command sequence */ +static const struct hci_init_stage amp_init1[] = { + /* HCI_OP_READ_LOCAL_VERSION */ + HCI_INIT(hci_read_local_version_sync), + /* HCI_OP_READ_LOCAL_COMMANDS */ + HCI_INIT(hci_read_local_cmds_sync), + /* HCI_OP_READ_LOCAL_AMP_INFO */ + HCI_INIT(hci_read_local_amp_info_sync), + /* HCI_OP_READ_DATA_BLOCK_SIZE */ + HCI_INIT(hci_read_data_block_size_sync), + /* HCI_OP_READ_FLOW_CONTROL_MODE */ + HCI_INIT(hci_read_flow_control_mode_sync), + /* HCI_OP_READ_LOCATION_DATA */ + HCI_INIT(hci_read_location_data_sync), +}; + +static int hci_init1_sync(struct hci_dev *hdev) +{ + int err; + + bt_dev_dbg(hdev, ""); + + /* Reset */ + if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { + err = hci_reset_sync(hdev); + if (err) + return err; + } + + switch (hdev->dev_type) { + case HCI_PRIMARY: + hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; + return hci_init_stage_sync(hdev, br_init1); + case HCI_AMP: + hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; + return hci_init_stage_sync(hdev, amp_init1); + default: + bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type); + break; + } + + return 0; +} + +/* AMP Controller init stage 2 command sequence */ +static const struct hci_init_stage amp_init2[] = { + /* HCI_OP_READ_LOCAL_FEATURES */ + HCI_INIT(hci_read_local_features_sync), +}; + +/* Read Buffer Size (ACL mtu, max pkt, etc.) */ +static int hci_read_buffer_size_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read Class of Device */ +static int hci_read_dev_class_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLASS_OF_DEV, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read Local Name */ +static int hci_read_local_name_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_NAME, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read Voice Setting */ +static int hci_read_voice_setting_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read Number of Supported IAC */ +static int hci_read_num_supported_iac_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_NUM_SUPPORTED_IAC, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read Current IAC LAP */ +static int hci_read_current_iac_lap_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_CURRENT_IAC_LAP, + 0, NULL, HCI_CMD_TIMEOUT); +} + +static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type, + u8 cond_type, bdaddr_t *bdaddr, + u8 auto_accept) +{ + struct hci_cp_set_event_filter cp; + + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + return 0; + + memset(&cp, 0, sizeof(cp)); + cp.flt_type = flt_type; + + if (flt_type != HCI_FLT_CLEAR_ALL) { + cp.cond_type = cond_type; + bacpy(&cp.addr_conn_flt.bdaddr, bdaddr); + cp.addr_conn_flt.auto_accept = auto_accept; + } + + return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_FLT, + flt_type == HCI_FLT_CLEAR_ALL ? + sizeof(cp.flt_type) : sizeof(cp), &cp, + HCI_CMD_TIMEOUT); +} + +static int hci_clear_event_filter_sync(struct hci_dev *hdev) +{ + if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED)) + return 0; + + return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, 0x00, + BDADDR_ANY, 0x00); +} + +/* Connection accept timeout ~20 secs */ +static int hci_write_ca_timeout_sync(struct hci_dev *hdev) +{ + __le16 param = cpu_to_le16(0x7d00); + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CA_TIMEOUT, + sizeof(param), ¶m, HCI_CMD_TIMEOUT); +} + +/* BR Controller init stage 2 command sequence */ +static const struct hci_init_stage br_init2[] = { + /* HCI_OP_READ_BUFFER_SIZE */ + HCI_INIT(hci_read_buffer_size_sync), + /* HCI_OP_READ_CLASS_OF_DEV */ + HCI_INIT(hci_read_dev_class_sync), + /* HCI_OP_READ_LOCAL_NAME */ + HCI_INIT(hci_read_local_name_sync), + /* HCI_OP_READ_VOICE_SETTING */ + HCI_INIT(hci_read_voice_setting_sync), + /* HCI_OP_READ_NUM_SUPPORTED_IAC */ + HCI_INIT(hci_read_num_supported_iac_sync), + /* HCI_OP_READ_CURRENT_IAC_LAP */ + HCI_INIT(hci_read_current_iac_lap_sync), + /* HCI_OP_SET_EVENT_FLT */ + HCI_INIT(hci_clear_event_filter_sync), + /* HCI_OP_WRITE_CA_TIMEOUT */ + HCI_INIT(hci_write_ca_timeout_sync), + {} +}; + +static int hci_write_ssp_mode_1_sync(struct hci_dev *hdev) +{ + u8 mode = 0x01; + + if (!lmp_ssp_capable(hdev) || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) + return 0; + + /* When SSP is available, then the host features page + * should also be available as well. However some + * controllers list the max_page as 0 as long as SSP + * has not been enabled. To achieve proper debugging + * output, force the minimum max_page to 1 at least. + */ + hdev->max_page = 0x01; + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE, + sizeof(mode), &mode, HCI_CMD_TIMEOUT); +} + +static int hci_write_eir_sync(struct hci_dev *hdev) +{ + struct hci_cp_write_eir cp; + + if (!lmp_ssp_capable(hdev) || hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) + return 0; + + memset(hdev->eir, 0, sizeof(hdev->eir)); + memset(&cp, 0, sizeof(cp)); + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp, + HCI_CMD_TIMEOUT); +} + +static int hci_write_inquiry_mode_sync(struct hci_dev *hdev) +{ + u8 mode; + + if (!lmp_inq_rssi_capable(hdev) && + !test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) + return 0; + + /* If Extended Inquiry Result events are supported, then + * they are clearly preferred over Inquiry Result with RSSI + * events. + */ + mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01; + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_INQUIRY_MODE, + sizeof(mode), &mode, HCI_CMD_TIMEOUT); +} + +static int hci_read_inq_rsp_tx_power_sync(struct hci_dev *hdev) +{ + if (!lmp_inq_tx_pwr_capable(hdev)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, + 0, NULL, HCI_CMD_TIMEOUT); +} + +static int hci_read_local_ext_features_sync(struct hci_dev *hdev, u8 page) +{ + struct hci_cp_read_local_ext_features cp; + + if (!lmp_ext_feat_capable(hdev)) + return 0; + + memset(&cp, 0, sizeof(cp)); + cp.page = page; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_read_local_ext_features_1_sync(struct hci_dev *hdev) +{ + return hci_read_local_ext_features_sync(hdev, 0x01); +} + +/* HCI Controller init stage 2 command sequence */ +static const struct hci_init_stage hci_init2[] = { + /* HCI_OP_READ_LOCAL_COMMANDS */ + HCI_INIT(hci_read_local_cmds_sync), + /* HCI_OP_WRITE_SSP_MODE */ + HCI_INIT(hci_write_ssp_mode_1_sync), + /* HCI_OP_WRITE_EIR */ + HCI_INIT(hci_write_eir_sync), + /* HCI_OP_WRITE_INQUIRY_MODE */ + HCI_INIT(hci_write_inquiry_mode_sync), + /* HCI_OP_READ_INQ_RSP_TX_POWER */ + HCI_INIT(hci_read_inq_rsp_tx_power_sync), + /* HCI_OP_READ_LOCAL_EXT_FEATURES */ + HCI_INIT(hci_read_local_ext_features_1_sync), + /* HCI_OP_WRITE_AUTH_ENABLE */ + HCI_INIT(hci_write_auth_enable_sync), + {} +}; + +/* Read LE Buffer Size */ +static int hci_le_read_buffer_size_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read LE Local Supported Features */ +static int hci_le_read_local_features_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read LE Supported States */ +static int hci_le_read_supported_states_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* LE Controller init stage 2 command sequence */ +static const struct hci_init_stage le_init2[] = { + /* HCI_OP_LE_READ_BUFFER_SIZE */ + HCI_INIT(hci_le_read_buffer_size_sync), + /* HCI_OP_LE_READ_LOCAL_FEATURES */ + HCI_INIT(hci_le_read_local_features_sync), + /* HCI_OP_LE_READ_SUPPORTED_STATES */ + HCI_INIT(hci_le_read_supported_states_sync), + {} +}; + +static int hci_init2_sync(struct hci_dev *hdev) +{ + int err; + + bt_dev_dbg(hdev, ""); + + if (hdev->dev_type == HCI_AMP) + return hci_init_stage_sync(hdev, amp_init2); + + if (lmp_bredr_capable(hdev)) { + err = hci_init_stage_sync(hdev, br_init2); + if (err) + return err; + } else { + hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED); + } + + if (lmp_le_capable(hdev)) { + err = hci_init_stage_sync(hdev, le_init2); + if (err) + return err; + /* LE-only controllers have LE implicitly enabled */ + if (!lmp_bredr_capable(hdev)) + hci_dev_set_flag(hdev, HCI_LE_ENABLED); + } + + return hci_init_stage_sync(hdev, hci_init2); +} + +static int hci_set_event_mask_sync(struct hci_dev *hdev) +{ + /* The second byte is 0xff instead of 0x9f (two reserved bits + * disabled) since a Broadcom 1.2 dongle doesn't respond to the + * command otherwise. + */ + u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; + + /* CSR 1.1 dongles does not accept any bitfield so don't try to set + * any event mask for pre 1.2 devices. + */ + if (hdev->hci_ver < BLUETOOTH_VER_1_2) + return 0; + + if (lmp_bredr_capable(hdev)) { + events[4] |= 0x01; /* Flow Specification Complete */ + + /* Don't set Disconnect Complete when suspended as that + * would wakeup the host when disconnecting due to + * suspend. + */ + if (hdev->suspended) + events[0] &= 0xef; + } else { + /* Use a different default for LE-only devices */ + memset(events, 0, sizeof(events)); + events[1] |= 0x20; /* Command Complete */ + events[1] |= 0x40; /* Command Status */ + events[1] |= 0x80; /* Hardware Error */ + + /* If the controller supports the Disconnect command, enable + * the corresponding event. In addition enable packet flow + * control related events. + */ + if (hdev->commands[0] & 0x20) { + /* Don't set Disconnect Complete when suspended as that + * would wakeup the host when disconnecting due to + * suspend. + */ + if (!hdev->suspended) + events[0] |= 0x10; /* Disconnection Complete */ + events[2] |= 0x04; /* Number of Completed Packets */ + events[3] |= 0x02; /* Data Buffer Overflow */ + } + + /* If the controller supports the Read Remote Version + * Information command, enable the corresponding event. + */ + if (hdev->commands[2] & 0x80) + events[1] |= 0x08; /* Read Remote Version Information + * Complete + */ + + if (hdev->le_features[0] & HCI_LE_ENCRYPTION) { + events[0] |= 0x80; /* Encryption Change */ + events[5] |= 0x80; /* Encryption Key Refresh Complete */ + } + } + + if (lmp_inq_rssi_capable(hdev) || + test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) + events[4] |= 0x02; /* Inquiry Result with RSSI */ + + if (lmp_ext_feat_capable(hdev)) + events[4] |= 0x04; /* Read Remote Extended Features Complete */ + + if (lmp_esco_capable(hdev)) { + events[5] |= 0x08; /* Synchronous Connection Complete */ + events[5] |= 0x10; /* Synchronous Connection Changed */ + } + + if (lmp_sniffsubr_capable(hdev)) + events[5] |= 0x20; /* Sniff Subrating */ + + if (lmp_pause_enc_capable(hdev)) + events[5] |= 0x80; /* Encryption Key Refresh Complete */ + + if (lmp_ext_inq_capable(hdev)) + events[5] |= 0x40; /* Extended Inquiry Result */ + + if (lmp_no_flush_capable(hdev)) + events[7] |= 0x01; /* Enhanced Flush Complete */ + + if (lmp_lsto_capable(hdev)) + events[6] |= 0x80; /* Link Supervision Timeout Changed */ + + if (lmp_ssp_capable(hdev)) { + events[6] |= 0x01; /* IO Capability Request */ + events[6] |= 0x02; /* IO Capability Response */ + events[6] |= 0x04; /* User Confirmation Request */ + events[6] |= 0x08; /* User Passkey Request */ + events[6] |= 0x10; /* Remote OOB Data Request */ + events[6] |= 0x20; /* Simple Pairing Complete */ + events[7] |= 0x04; /* User Passkey Notification */ + events[7] |= 0x08; /* Keypress Notification */ + events[7] |= 0x10; /* Remote Host Supported + * Features Notification + */ + } + + if (lmp_le_capable(hdev)) + events[7] |= 0x20; /* LE Meta-Event */ + + return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK, + sizeof(events), events, HCI_CMD_TIMEOUT); +} + +static int hci_read_stored_link_key_sync(struct hci_dev *hdev) +{ + struct hci_cp_read_stored_link_key cp; + + if (!(hdev->commands[6] & 0x20) || + test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) + return 0; + + memset(&cp, 0, sizeof(cp)); + bacpy(&cp.bdaddr, BDADDR_ANY); + cp.read_all = 0x01; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_STORED_LINK_KEY, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_setup_link_policy_sync(struct hci_dev *hdev) +{ + struct hci_cp_write_def_link_policy cp; + u16 link_policy = 0; + + if (!(hdev->commands[5] & 0x10)) + return 0; + + memset(&cp, 0, sizeof(cp)); + + if (lmp_rswitch_capable(hdev)) + link_policy |= HCI_LP_RSWITCH; + if (lmp_hold_capable(hdev)) + link_policy |= HCI_LP_HOLD; + if (lmp_sniff_capable(hdev)) + link_policy |= HCI_LP_SNIFF; + if (lmp_park_capable(hdev)) + link_policy |= HCI_LP_PARK; + + cp.policy = cpu_to_le16(link_policy); + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_read_page_scan_activity_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[8] & 0x01)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_ACTIVITY, + 0, NULL, HCI_CMD_TIMEOUT); +} + +static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[18] & 0x04) || + test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING, + 0, NULL, HCI_CMD_TIMEOUT); +} + +static int hci_read_page_scan_type_sync(struct hci_dev *hdev) +{ + /* Some older Broadcom based Bluetooth 1.2 controllers do not + * support the Read Page Scan Type command. Check support for + * this command in the bit mask of supported commands. + */ + if (!(hdev->commands[13] & 0x01)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read features beyond page 1 if available */ +static int hci_read_local_ext_features_all_sync(struct hci_dev *hdev) +{ + u8 page; + int err; + + if (!lmp_ext_feat_capable(hdev)) + return 0; + + for (page = 2; page < HCI_MAX_PAGES && page <= hdev->max_page; + page++) { + err = hci_read_local_ext_features_sync(hdev, page); + if (err) + return err; + } + + return 0; +} + +/* HCI Controller init stage 3 command sequence */ +static const struct hci_init_stage hci_init3[] = { + /* HCI_OP_SET_EVENT_MASK */ + HCI_INIT(hci_set_event_mask_sync), + /* HCI_OP_READ_STORED_LINK_KEY */ + HCI_INIT(hci_read_stored_link_key_sync), + /* HCI_OP_WRITE_DEF_LINK_POLICY */ + HCI_INIT(hci_setup_link_policy_sync), + /* HCI_OP_READ_PAGE_SCAN_ACTIVITY */ + HCI_INIT(hci_read_page_scan_activity_sync), + /* HCI_OP_READ_DEF_ERR_DATA_REPORTING */ + HCI_INIT(hci_read_def_err_data_reporting_sync), + /* HCI_OP_READ_PAGE_SCAN_TYPE */ + HCI_INIT(hci_read_page_scan_type_sync), + /* HCI_OP_READ_LOCAL_EXT_FEATURES */ + HCI_INIT(hci_read_local_ext_features_all_sync), + {} +}; + +static int hci_le_set_event_mask_sync(struct hci_dev *hdev) +{ + u8 events[8]; + + if (!lmp_le_capable(hdev)) + return 0; + + memset(events, 0, sizeof(events)); + + if (hdev->le_features[0] & HCI_LE_ENCRYPTION) + events[0] |= 0x10; /* LE Long Term Key Request */ + + /* If controller supports the Connection Parameters Request + * Link Layer Procedure, enable the corresponding event. + */ + if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC) + /* LE Remote Connection Parameter Request */ + events[0] |= 0x20; + + /* If the controller supports the Data Length Extension + * feature, enable the corresponding event. + */ + if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) + events[0] |= 0x40; /* LE Data Length Change */ + + /* If the controller supports LL Privacy feature, enable + * the corresponding event. + */ + if (hdev->le_features[0] & HCI_LE_LL_PRIVACY) + events[1] |= 0x02; /* LE Enhanced Connection Complete */ + + /* If the controller supports Extended Scanner Filter + * Policies, enable the corresponding event. + */ + if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY) + events[1] |= 0x04; /* LE Direct Advertising Report */ + + /* If the controller supports Channel Selection Algorithm #2 + * feature, enable the corresponding event. + */ + if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2) + events[2] |= 0x08; /* LE Channel Selection Algorithm */ + + /* If the controller supports the LE Set Scan Enable command, + * enable the corresponding advertising report event. + */ + if (hdev->commands[26] & 0x08) + events[0] |= 0x02; /* LE Advertising Report */ + + /* If the controller supports the LE Create Connection + * command, enable the corresponding event. + */ + if (hdev->commands[26] & 0x10) + events[0] |= 0x01; /* LE Connection Complete */ + + /* If the controller supports the LE Connection Update + * command, enable the corresponding event. + */ + if (hdev->commands[27] & 0x04) + events[0] |= 0x04; /* LE Connection Update Complete */ + + /* If the controller supports the LE Read Remote Used Features + * command, enable the corresponding event. + */ + if (hdev->commands[27] & 0x20) + /* LE Read Remote Used Features Complete */ + events[0] |= 0x08; + + /* If the controller supports the LE Read Local P-256 + * Public Key command, enable the corresponding event. + */ + if (hdev->commands[34] & 0x02) + /* LE Read Local P-256 Public Key Complete */ + events[0] |= 0x80; + + /* If the controller supports the LE Generate DHKey + * command, enable the corresponding event. + */ + if (hdev->commands[34] & 0x04) + events[1] |= 0x01; /* LE Generate DHKey Complete */ + + /* If the controller supports the LE Set Default PHY or + * LE Set PHY commands, enable the corresponding event. + */ + if (hdev->commands[35] & (0x20 | 0x40)) + events[1] |= 0x08; /* LE PHY Update Complete */ + + /* If the controller supports LE Set Extended Scan Parameters + * and LE Set Extended Scan Enable commands, enable the + * corresponding event. + */ + if (use_ext_scan(hdev)) + events[1] |= 0x10; /* LE Extended Advertising Report */ + + /* If the controller supports the LE Extended Advertising + * command, enable the corresponding event. + */ + if (ext_adv_capable(hdev)) + events[2] |= 0x02; /* LE Advertising Set Terminated */ + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK, + sizeof(events), events, HCI_CMD_TIMEOUT); +} + +/* Read LE Advertising Channel TX Power */ +static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev) +{ + if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) { + /* HCI TS spec forbids mixing of legacy and extended + * advertising commands wherein READ_ADV_TX_POWER is + * also included. So do not call it if extended adv + * is supported otherwise controller will return + * COMMAND_DISALLOWED for extended commands. + */ + return __hci_cmd_sync_status(hdev, + HCI_OP_LE_READ_ADV_TX_POWER, + 0, NULL, HCI_CMD_TIMEOUT); + } + + return 0; +} + +/* Read LE Min/Max Tx Power*/ +static int hci_le_read_tx_power_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[38] & 0x80)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read LE Accept List Size */ +static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[26] & 0x40)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ACCEPT_LIST_SIZE, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Clear LE Accept List */ +static int hci_le_clear_accept_list_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[26] & 0x80)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL, + HCI_CMD_TIMEOUT); +} + +/* Read LE Resolving List Size */ +static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[34] & 0x40)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_RESOLV_LIST_SIZE, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Clear LE Resolving List */ +static int hci_le_clear_resolv_list_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[34] & 0x20)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL, + HCI_CMD_TIMEOUT); +} + +/* Set RPA timeout */ +static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev) +{ + __le16 timeout = cpu_to_le16(hdev->rpa_timeout); + + if (!(hdev->commands[35] & 0x04)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT, + sizeof(timeout), &timeout, + HCI_CMD_TIMEOUT); +} + +/* Read LE Maximum Data Length */ +static int hci_le_read_max_data_len_sync(struct hci_dev *hdev) +{ + if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL, + HCI_CMD_TIMEOUT); +} + +/* Read LE Suggested Default Data Length */ +static int hci_le_read_def_data_len_sync(struct hci_dev *hdev) +{ + if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL, + HCI_CMD_TIMEOUT); +} + +/* Read LE Number of Supported Advertising Sets */ +static int hci_le_read_num_support_adv_sets_sync(struct hci_dev *hdev) +{ + if (!ext_adv_capable(hdev)) + return 0; + + return __hci_cmd_sync_status(hdev, + HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Write LE Host Supported */ +static int hci_set_le_support_sync(struct hci_dev *hdev) +{ + struct hci_cp_write_le_host_supported cp; + + /* LE-only devices do not support explicit enablement */ + if (!lmp_bredr_capable(hdev)) + return 0; + + memset(&cp, 0, sizeof(cp)); + + if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { + cp.le = 0x01; + cp.simul = 0x00; + } + + if (cp.le == lmp_host_le_capable(hdev)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +/* LE Controller init stage 3 command sequence */ +static const struct hci_init_stage le_init3[] = { + /* HCI_OP_LE_SET_EVENT_MASK */ + HCI_INIT(hci_le_set_event_mask_sync), + /* HCI_OP_LE_READ_ADV_TX_POWER */ + HCI_INIT(hci_le_read_adv_tx_power_sync), + /* HCI_OP_LE_READ_TRANSMIT_POWER */ + HCI_INIT(hci_le_read_tx_power_sync), + /* HCI_OP_LE_READ_ACCEPT_LIST_SIZE */ + HCI_INIT(hci_le_read_accept_list_size_sync), + /* HCI_OP_LE_CLEAR_ACCEPT_LIST */ + HCI_INIT(hci_le_clear_accept_list_sync), + /* HCI_OP_LE_READ_RESOLV_LIST_SIZE */ + HCI_INIT(hci_le_read_resolv_list_size_sync), + /* HCI_OP_LE_CLEAR_RESOLV_LIST */ + HCI_INIT(hci_le_clear_resolv_list_sync), + /* HCI_OP_LE_SET_RPA_TIMEOUT */ + HCI_INIT(hci_le_set_rpa_timeout_sync), + /* HCI_OP_LE_READ_MAX_DATA_LEN */ + HCI_INIT(hci_le_read_max_data_len_sync), + /* HCI_OP_LE_READ_DEF_DATA_LEN */ + HCI_INIT(hci_le_read_def_data_len_sync), + /* HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS */ + HCI_INIT(hci_le_read_num_support_adv_sets_sync), + /* HCI_OP_WRITE_LE_HOST_SUPPORTED */ + HCI_INIT(hci_set_le_support_sync), + {} +}; + +static int hci_init3_sync(struct hci_dev *hdev) +{ + int err; + + bt_dev_dbg(hdev, ""); + + err = hci_init_stage_sync(hdev, hci_init3); + if (err) + return err; + + if (lmp_le_capable(hdev)) + return hci_init_stage_sync(hdev, le_init3); + + return 0; +} + +static int hci_delete_stored_link_key_sync(struct hci_dev *hdev) +{ + struct hci_cp_delete_stored_link_key cp; + + /* Some Broadcom based Bluetooth controllers do not support the + * Delete Stored Link Key command. They are clearly indicating its + * absence in the bit mask of supported commands. + * + * Check the supported commands and only if the command is marked + * as supported send it. If not supported assume that the controller + * does not have actual support for stored link keys which makes this + * command redundant anyway. + * + * Some controllers indicate that they support handling deleting + * stored link keys, but they don't. The quirk lets a driver + * just disable this command. + */ + if (!(hdev->commands[6] & 0x80) || + test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) + return 0; + + memset(&cp, 0, sizeof(cp)); + bacpy(&cp.bdaddr, BDADDR_ANY); + cp.delete_all = 0x01; + + return __hci_cmd_sync_status(hdev, HCI_OP_DELETE_STORED_LINK_KEY, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev) +{ + u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; + bool changed = false; + + /* Set event mask page 2 if the HCI command for it is supported */ + if (!(hdev->commands[22] & 0x04)) + return 0; + + /* If Connectionless Peripheral Broadcast central role is supported + * enable all necessary events for it. + */ + if (lmp_cpb_central_capable(hdev)) { + events[1] |= 0x40; /* Triggered Clock Capture */ + events[1] |= 0x80; /* Synchronization Train Complete */ + events[2] |= 0x10; /* Peripheral Page Response Timeout */ + events[2] |= 0x20; /* CPB Channel Map Change */ + changed = true; + } + + /* If Connectionless Peripheral Broadcast peripheral role is supported + * enable all necessary events for it. + */ + if (lmp_cpb_peripheral_capable(hdev)) { + events[2] |= 0x01; /* Synchronization Train Received */ + events[2] |= 0x02; /* CPB Receive */ + events[2] |= 0x04; /* CPB Timeout */ + events[2] |= 0x08; /* Truncated Page Complete */ + changed = true; + } + + /* Enable Authenticated Payload Timeout Expired event if supported */ + if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) { + events[2] |= 0x80; + changed = true; + } + + /* Some Broadcom based controllers indicate support for Set Event + * Mask Page 2 command, but then actually do not support it. Since + * the default value is all bits set to zero, the command is only + * required if the event mask has to be changed. In case no change + * to the event mask is needed, skip this command. + */ + if (!changed) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK_PAGE_2, + sizeof(events), events, HCI_CMD_TIMEOUT); +} + +/* Read local codec list if the HCI command is supported */ +static int hci_read_local_codecs_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[29] & 0x20)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_CODECS, 0, NULL, + HCI_CMD_TIMEOUT); +} + +/* Read local pairing options if the HCI command is supported */ +static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[41] & 0x08)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_PAIRING_OPTS, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Get MWS transport configuration if the HCI command is supported */ +static int hci_get_mws_transport_config_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[30] & 0x08)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Check for Synchronization Train support */ +static int hci_read_sync_train_params_sync(struct hci_dev *hdev) +{ + if (!lmp_sync_train_capable(hdev)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_SYNC_TRAIN_PARAMS, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Enable Secure Connections if supported and configured */ +static int hci_write_sc_support_1_sync(struct hci_dev *hdev) +{ + u8 support = 0x01; + + if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) || + !bredr_sc_enabled(hdev)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT, + sizeof(support), &support, + HCI_CMD_TIMEOUT); +} + +/* Set erroneous data reporting if supported to the wideband speech + * setting value + */ +static int hci_set_err_data_report_sync(struct hci_dev *hdev) +{ + struct hci_cp_write_def_err_data_reporting cp; + bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED); + + if (!(hdev->commands[18] & 0x08) || + test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) + return 0; + + if (enabled == hdev->err_data_reporting) + return 0; + + memset(&cp, 0, sizeof(cp)); + cp.err_data_reporting = enabled ? ERR_DATA_REPORTING_ENABLED : + ERR_DATA_REPORTING_DISABLED; + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static const struct hci_init_stage hci_init4[] = { + /* HCI_OP_DELETE_STORED_LINK_KEY */ + HCI_INIT(hci_delete_stored_link_key_sync), + /* HCI_OP_SET_EVENT_MASK_PAGE_2 */ + HCI_INIT(hci_set_event_mask_page_2_sync), + /* HCI_OP_READ_LOCAL_CODECS */ + HCI_INIT(hci_read_local_codecs_sync), + /* HCI_OP_READ_LOCAL_PAIRING_OPTS */ + HCI_INIT(hci_read_local_pairing_opts_sync), + /* HCI_OP_GET_MWS_TRANSPORT_CONFIG */ + HCI_INIT(hci_get_mws_transport_config_sync), + /* HCI_OP_READ_SYNC_TRAIN_PARAMS */ + HCI_INIT(hci_read_sync_train_params_sync), + /* HCI_OP_WRITE_SC_SUPPORT */ + HCI_INIT(hci_write_sc_support_1_sync), + /* HCI_OP_WRITE_DEF_ERR_DATA_REPORTING */ + HCI_INIT(hci_set_err_data_report_sync), + {} +}; + +/* Set Suggested Default Data Length to maximum if supported */ +static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev) +{ + struct hci_cp_le_write_def_data_len cp; + + if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) + return 0; + + memset(&cp, 0, sizeof(cp)); + cp.tx_len = cpu_to_le16(hdev->le_max_tx_len); + cp.tx_time = cpu_to_le16(hdev->le_max_tx_time); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +/* Set Default PHY parameters if command is supported */ +static int hci_le_set_default_phy_sync(struct hci_dev *hdev) +{ + struct hci_cp_le_set_default_phy cp; + + if (!(hdev->commands[35] & 0x20)) + return 0; + + memset(&cp, 0, sizeof(cp)); + cp.all_phys = 0x00; + cp.tx_phys = hdev->le_tx_def_phys; + cp.rx_phys = hdev->le_rx_def_phys; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static const struct hci_init_stage le_init4[] = { + /* HCI_OP_LE_WRITE_DEF_DATA_LEN */ + HCI_INIT(hci_le_set_write_def_data_len_sync), + /* HCI_OP_LE_SET_DEFAULT_PHY */ + HCI_INIT(hci_le_set_default_phy_sync), + {} +}; + +static int hci_init4_sync(struct hci_dev *hdev) +{ + int err; + + bt_dev_dbg(hdev, ""); + + err = hci_init_stage_sync(hdev, hci_init4); + if (err) + return err; + + if (lmp_le_capable(hdev)) + return hci_init_stage_sync(hdev, le_init4); + + return 0; +} + +static int hci_init_sync(struct hci_dev *hdev) +{ + int err; + + err = hci_init1_sync(hdev); + if (err < 0) + return err; + + if (hci_dev_test_flag(hdev, HCI_SETUP)) + hci_debugfs_create_basic(hdev); + + err = hci_init2_sync(hdev); + if (err < 0) + return err; + + /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode + * BR/EDR/LE type controllers. AMP controllers only need the + * first two stages of init. + */ + if (hdev->dev_type != HCI_PRIMARY) + return 0; + + err = hci_init3_sync(hdev); + if (err < 0) + return err; + + err = hci_init4_sync(hdev); + if (err < 0) + return err; + + /* This function is only called when the controller is actually in + * configured state. When the controller is marked as unconfigured, + * this initialization procedure is not run. + * + * It means that it is possible that a controller runs through its + * setup phase and then discovers missing settings. If that is the + * case, then this function will not be called. It then will only + * be called during the config phase. + * + * So only when in setup phase or config phase, create the debugfs + * entries and register the SMP channels. + */ + if (!hci_dev_test_flag(hdev, HCI_SETUP) && + !hci_dev_test_flag(hdev, HCI_CONFIG)) + return 0; + + hci_debugfs_create_common(hdev); + + if (lmp_bredr_capable(hdev)) + hci_debugfs_create_bredr(hdev); + + if (lmp_le_capable(hdev)) + hci_debugfs_create_le(hdev); + + return 0; +} + +int hci_dev_open_sync(struct hci_dev *hdev) +{ + int ret = 0; + + bt_dev_dbg(hdev, ""); + + if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { + ret = -ENODEV; + goto done; + } + + if (!hci_dev_test_flag(hdev, HCI_SETUP) && + !hci_dev_test_flag(hdev, HCI_CONFIG)) { + /* Check for rfkill but allow the HCI setup stage to + * proceed (which in itself doesn't cause any RF activity). + */ + if (hci_dev_test_flag(hdev, HCI_RFKILLED)) { + ret = -ERFKILL; + goto done; + } + + /* Check for valid public address or a configured static + * random address, but let the HCI setup proceed to + * be able to determine if there is a public address + * or not. + * + * In case of user channel usage, it is not important + * if a public address or static random address is + * available. + * + * This check is only valid for BR/EDR controllers + * since AMP controllers do not have an address. + */ + if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + hdev->dev_type == HCI_PRIMARY && + !bacmp(&hdev->bdaddr, BDADDR_ANY) && + !bacmp(&hdev->static_addr, BDADDR_ANY)) { + ret = -EADDRNOTAVAIL; + goto done; + } + } + + if (test_bit(HCI_UP, &hdev->flags)) { + ret = -EALREADY; + goto done; + } + + if (hdev->open(hdev)) { + ret = -EIO; + goto done; + } + + set_bit(HCI_RUNNING, &hdev->flags); + hci_sock_dev_event(hdev, HCI_DEV_OPEN); + + atomic_set(&hdev->cmd_cnt, 1); + set_bit(HCI_INIT, &hdev->flags); + + if (hci_dev_test_flag(hdev, HCI_SETUP) || + test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) { + bool invalid_bdaddr; + + hci_sock_dev_event(hdev, HCI_DEV_SETUP); + + if (hdev->setup) + ret = hdev->setup(hdev); + + /* The transport driver can set the quirk to mark the + * BD_ADDR invalid before creating the HCI device or in + * its setup callback. + */ + invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, + &hdev->quirks); + + if (ret) + goto setup_failed; + + if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) { + if (!bacmp(&hdev->public_addr, BDADDR_ANY)) + hci_dev_get_bd_addr_from_property(hdev); + + if (bacmp(&hdev->public_addr, BDADDR_ANY) && + hdev->set_bdaddr) { + ret = hdev->set_bdaddr(hdev, + &hdev->public_addr); + + /* If setting of the BD_ADDR from the device + * property succeeds, then treat the address + * as valid even if the invalid BD_ADDR + * quirk indicates otherwise. + */ + if (!ret) + invalid_bdaddr = false; + } + } + +setup_failed: + /* The transport driver can set these quirks before + * creating the HCI device or in its setup callback. + * + * For the invalid BD_ADDR quirk it is possible that + * it becomes a valid address if the bootloader does + * provide it (see above). + * + * In case any of them is set, the controller has to + * start up as unconfigured. + */ + if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || + invalid_bdaddr) + hci_dev_set_flag(hdev, HCI_UNCONFIGURED); + + /* For an unconfigured controller it is required to + * read at least the version information provided by + * the Read Local Version Information command. + * + * If the set_bdaddr driver callback is provided, then + * also the original Bluetooth public device address + * will be read using the Read BD Address command. + */ + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) + ret = hci_unconf_init_sync(hdev); + } + + if (hci_dev_test_flag(hdev, HCI_CONFIG)) { + /* If public address change is configured, ensure that + * the address gets programmed. If the driver does not + * support changing the public address, fail the power + * on procedure. + */ + if (bacmp(&hdev->public_addr, BDADDR_ANY) && + hdev->set_bdaddr) + ret = hdev->set_bdaddr(hdev, &hdev->public_addr); + else + ret = -EADDRNOTAVAIL; + } + + if (!ret) { + if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && + !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + ret = hci_init_sync(hdev); + if (!ret && hdev->post_init) + ret = hdev->post_init(hdev); + } + } + + /* If the HCI Reset command is clearing all diagnostic settings, + * then they need to be reprogrammed after the init procedure + * completed. + */ + if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && + !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag) + ret = hdev->set_diag(hdev, true); + + if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + msft_do_open(hdev); + aosp_do_open(hdev); + } + + clear_bit(HCI_INIT, &hdev->flags); + + if (!ret) { + hci_dev_hold(hdev); + hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); + hci_adv_instances_set_rpa_expired(hdev, true); + set_bit(HCI_UP, &hdev->flags); + hci_sock_dev_event(hdev, HCI_DEV_UP); + hci_leds_update_powered(hdev, true); + if (!hci_dev_test_flag(hdev, HCI_SETUP) && + !hci_dev_test_flag(hdev, HCI_CONFIG) && + !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && + !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + hci_dev_test_flag(hdev, HCI_MGMT) && + hdev->dev_type == HCI_PRIMARY) { + ret = hci_powered_update_sync(hdev); + } + } else { + /* Init failed, cleanup */ + flush_work(&hdev->tx_work); + + /* Since hci_rx_work() is possible to awake new cmd_work + * it should be flushed first to avoid unexpected call of + * hci_cmd_work() + */ + flush_work(&hdev->rx_work); + flush_work(&hdev->cmd_work); + + skb_queue_purge(&hdev->cmd_q); + skb_queue_purge(&hdev->rx_q); + + if (hdev->flush) + hdev->flush(hdev); + + if (hdev->sent_cmd) { + kfree_skb(hdev->sent_cmd); + hdev->sent_cmd = NULL; + } + + clear_bit(HCI_RUNNING, &hdev->flags); + hci_sock_dev_event(hdev, HCI_DEV_CLOSE); + + hdev->close(hdev); + hdev->flags &= BIT(HCI_RAW); + } + +done: + return ret; +} + +/* This function requires the caller holds hdev->lock */ +static void hci_pend_le_actions_clear(struct hci_dev *hdev) +{ + struct hci_conn_params *p; + + list_for_each_entry(p, &hdev->le_conn_params, list) { + if (p->conn) { + hci_conn_drop(p->conn); + hci_conn_put(p->conn); + p->conn = NULL; + } + list_del_init(&p->action); + } + + BT_DBG("All LE pending actions cleared"); +} + +int hci_dev_close_sync(struct hci_dev *hdev) +{ + bool auto_off; + int err = 0; + + bt_dev_dbg(hdev, ""); + + cancel_delayed_work(&hdev->power_off); + cancel_delayed_work(&hdev->ncmd_timer); + + hci_request_cancel_all(hdev); + + if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && + !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + test_bit(HCI_UP, &hdev->flags)) { + /* Execute vendor specific shutdown routine */ + if (hdev->shutdown) + err = hdev->shutdown(hdev); + } + + if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { + cancel_delayed_work_sync(&hdev->cmd_timer); + return err; + } + + hci_leds_update_powered(hdev, false); + + /* Flush RX and TX works */ + flush_work(&hdev->tx_work); + flush_work(&hdev->rx_work); + + if (hdev->discov_timeout > 0) { + hdev->discov_timeout = 0; + hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); + hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); + } + + if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) + cancel_delayed_work(&hdev->service_cache); + + if (hci_dev_test_flag(hdev, HCI_MGMT)) { + struct adv_info *adv_instance; + + cancel_delayed_work_sync(&hdev->rpa_expired); + + list_for_each_entry(adv_instance, &hdev->adv_instances, list) + cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); + } + + /* Avoid potential lockdep warnings from the *_flush() calls by + * ensuring the workqueue is empty up front. + */ + drain_workqueue(hdev->workqueue); + + hci_dev_lock(hdev); + + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + + auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF); + + if (!auto_off && hdev->dev_type == HCI_PRIMARY && + !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + hci_dev_test_flag(hdev, HCI_MGMT)) + __mgmt_power_off(hdev); + + hci_inquiry_cache_flush(hdev); + hci_pend_le_actions_clear(hdev); + hci_conn_hash_flush(hdev); + hci_dev_unlock(hdev); + + smp_unregister(hdev); + + hci_sock_dev_event(hdev, HCI_DEV_DOWN); + + if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + aosp_do_close(hdev); + msft_do_close(hdev); + } + + if (hdev->flush) + hdev->flush(hdev); + + /* Reset device */ + skb_queue_purge(&hdev->cmd_q); + atomic_set(&hdev->cmd_cnt, 1); + if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) && + !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { + set_bit(HCI_INIT, &hdev->flags); + hci_reset_sync(hdev); + clear_bit(HCI_INIT, &hdev->flags); + } + + /* flush cmd work */ + flush_work(&hdev->cmd_work); + + /* Drop queues */ + skb_queue_purge(&hdev->rx_q); + skb_queue_purge(&hdev->cmd_q); + skb_queue_purge(&hdev->raw_q); + + /* Drop last sent command */ + if (hdev->sent_cmd) { + cancel_delayed_work_sync(&hdev->cmd_timer); + kfree_skb(hdev->sent_cmd); + hdev->sent_cmd = NULL; + } + + clear_bit(HCI_RUNNING, &hdev->flags); + hci_sock_dev_event(hdev, HCI_DEV_CLOSE); + + /* After this point our queues are empty and no tasks are scheduled. */ + hdev->close(hdev); + + /* Clear flags */ + hdev->flags &= BIT(HCI_RAW); + hci_dev_clear_volatile_flags(hdev); + + /* Controller radio is available but is currently powered down */ + hdev->amp_status = AMP_STATUS_POWERED_DOWN; + + memset(hdev->eir, 0, sizeof(hdev->eir)); + memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); + bacpy(&hdev->random_addr, BDADDR_ANY); + + hci_dev_put(hdev); + return err; +} + +/* This function perform power on HCI command sequence as follows: + * + * If controller is already up (HCI_UP) performs hci_powered_update_sync + * sequence otherwise run hci_dev_open_sync which will follow with + * hci_powered_update_sync after the init sequence is completed. + */ +static int hci_power_on_sync(struct hci_dev *hdev) +{ + int err; + + if (test_bit(HCI_UP, &hdev->flags) && + hci_dev_test_flag(hdev, HCI_MGMT) && + hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { + cancel_delayed_work(&hdev->power_off); + return hci_powered_update_sync(hdev); + } + + err = hci_dev_open_sync(hdev); + if (err < 0) + return err; + + /* During the HCI setup phase, a few error conditions are + * ignored and they need to be checked now. If they are still + * valid, it is important to return the device back off. + */ + if (hci_dev_test_flag(hdev, HCI_RFKILLED) || + hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || + (hdev->dev_type == HCI_PRIMARY && + !bacmp(&hdev->bdaddr, BDADDR_ANY) && + !bacmp(&hdev->static_addr, BDADDR_ANY))) { + hci_dev_clear_flag(hdev, HCI_AUTO_OFF); + hci_dev_close_sync(hdev); + } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) { + queue_delayed_work(hdev->req_workqueue, &hdev->power_off, + HCI_AUTO_OFF_TIMEOUT); + } + + if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) { + /* For unconfigured devices, set the HCI_RAW flag + * so that userspace can easily identify them. + */ + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) + set_bit(HCI_RAW, &hdev->flags); + + /* For fully configured devices, this will send + * the Index Added event. For unconfigured devices, + * it will send Unconfigued Index Added event. + * + * Devices with HCI_QUIRK_RAW_DEVICE are ignored + * and no event will be send. + */ + mgmt_index_added(hdev); + } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) { + /* When the controller is now configured, then it + * is important to clear the HCI_RAW flag. + */ + if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) + clear_bit(HCI_RAW, &hdev->flags); + + /* Powering on the controller with HCI_CONFIG set only + * happens with the transition from unconfigured to + * configured. This will send the Index Added event. + */ + mgmt_index_added(hdev); + } + + return 0; +} + +static int hci_remote_name_cancel_sync(struct hci_dev *hdev, bdaddr_t *addr) +{ + struct hci_cp_remote_name_req_cancel cp; + + memset(&cp, 0, sizeof(cp)); + bacpy(&cp.bdaddr, addr); + + return __hci_cmd_sync_status(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_stop_discovery_sync(struct hci_dev *hdev) +{ + struct discovery_state *d = &hdev->discovery; + struct inquiry_entry *e; + int err; + + bt_dev_dbg(hdev, "state %u", hdev->discovery.state); + + if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) { + if (test_bit(HCI_INQUIRY, &hdev->flags)) { + err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, + 0, NULL, HCI_CMD_TIMEOUT); + if (err) + return err; + } + + if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { + cancel_delayed_work(&hdev->le_scan_disable); + cancel_delayed_work(&hdev->le_scan_restart); + + err = hci_scan_disable_sync(hdev); + if (err) + return err; + } + + } else { + err = hci_scan_disable_sync(hdev); + if (err) + return err; + } + + /* Resume advertising if it was paused */ + if (use_ll_privacy(hdev)) + hci_resume_advertising_sync(hdev); + + /* No further actions needed for LE-only discovery */ + if (d->type == DISCOV_TYPE_LE) + return 0; + + if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) { + e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, + NAME_PENDING); + if (!e) + return 0; + + return hci_remote_name_cancel_sync(hdev, &e->data.bdaddr); + } + + return 0; +} + +static int hci_disconnect_phy_link_sync(struct hci_dev *hdev, u16 handle, + u8 reason) +{ + struct hci_cp_disconn_phy_link cp; + + memset(&cp, 0, sizeof(cp)); + cp.phy_handle = HCI_PHY_HANDLE(handle); + cp.reason = reason; + + return __hci_cmd_sync_status(hdev, HCI_OP_DISCONN_PHY_LINK, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn, + u8 reason) +{ + struct hci_cp_disconnect cp; + + if (conn->type == AMP_LINK) + return hci_disconnect_phy_link_sync(hdev, conn->handle, reason); + + memset(&cp, 0, sizeof(cp)); + cp.handle = cpu_to_le16(conn->handle); + cp.reason = reason; + + /* Wait for HCI_EV_DISCONN_COMPLETE not HCI_EV_CMD_STATUS when not + * suspending. + */ + if (!hdev->suspended) + return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT, + sizeof(cp), &cp, + HCI_EV_DISCONN_COMPLETE, + HCI_CMD_TIMEOUT, NULL); + + return __hci_cmd_sync_status(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp, + HCI_CMD_TIMEOUT); +} + +static int hci_le_connect_cancel_sync(struct hci_dev *hdev, + struct hci_conn *conn) +{ + if (test_bit(HCI_CONN_SCANNING, &conn->flags)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, + 6, &conn->dst, HCI_CMD_TIMEOUT); +} + +static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn) +{ + if (conn->type == LE_LINK) + return hci_le_connect_cancel_sync(hdev, conn); + + if (hdev->hci_ver < BLUETOOTH_VER_1_2) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL, + 6, &conn->dst, HCI_CMD_TIMEOUT); +} + +static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn, + u8 reason) +{ + struct hci_cp_reject_sync_conn_req cp; + + memset(&cp, 0, sizeof(cp)); + bacpy(&cp.bdaddr, &conn->dst); + cp.reason = reason; + + /* SCO rejection has its own limited set of + * allowed error values (0x0D-0x0F). + */ + if (reason < 0x0d || reason > 0x0f) + cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES; + + return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_SYNC_CONN_REQ, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, + u8 reason) +{ + struct hci_cp_reject_conn_req cp; + + if (conn->type == SCO_LINK || conn->type == ESCO_LINK) + return hci_reject_sco_sync(hdev, conn, reason); + + memset(&cp, 0, sizeof(cp)); + bacpy(&cp.bdaddr, &conn->dst); + cp.reason = reason; + + return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_CONN_REQ, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, + u8 reason) +{ + switch (conn->state) { + case BT_CONNECTED: + case BT_CONFIG: + return hci_disconnect_sync(hdev, conn, reason); + case BT_CONNECT: + return hci_connect_cancel_sync(hdev, conn); + case BT_CONNECT2: + return hci_reject_conn_sync(hdev, conn, reason); + default: + conn->state = BT_CLOSED; + break; + } + + return 0; +} + +static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason) +{ + struct hci_conn *conn, *tmp; + int err; + + list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) { + err = hci_abort_conn_sync(hdev, conn, reason); + if (err) + return err; + } + + return err; +} + +/* This function perform power off HCI command sequence as follows: + * + * Clear Advertising + * Stop Discovery + * Disconnect all connections + * hci_dev_close_sync + */ +static int hci_power_off_sync(struct hci_dev *hdev) +{ + int err; + + /* If controller is already down there is nothing to do */ + if (!test_bit(HCI_UP, &hdev->flags)) + return 0; + + if (test_bit(HCI_ISCAN, &hdev->flags) || + test_bit(HCI_PSCAN, &hdev->flags)) { + err = hci_write_scan_enable_sync(hdev, 0x00); + if (err) + return err; + } + + err = hci_clear_adv_sync(hdev, NULL, false); + if (err) + return err; + + err = hci_stop_discovery_sync(hdev); + if (err) + return err; + + /* Terminated due to Power Off */ + err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); + if (err) + return err; + + return hci_dev_close_sync(hdev); +} + +int hci_set_powered_sync(struct hci_dev *hdev, u8 val) +{ + if (val) + return hci_power_on_sync(hdev); + + return hci_power_off_sync(hdev); +} + +static int hci_write_iac_sync(struct hci_dev *hdev) +{ + struct hci_cp_write_current_iac_lap cp; + + if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) + return 0; + + memset(&cp, 0, sizeof(cp)); + + if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { + /* Limited discoverable mode */ + cp.num_iac = min_t(u8, hdev->num_iac, 2); + cp.iac_lap[0] = 0x00; /* LIAC */ + cp.iac_lap[1] = 0x8b; + cp.iac_lap[2] = 0x9e; + cp.iac_lap[3] = 0x33; /* GIAC */ + cp.iac_lap[4] = 0x8b; + cp.iac_lap[5] = 0x9e; + } else { + /* General discoverable mode */ + cp.num_iac = 1; + cp.iac_lap[0] = 0x33; /* GIAC */ + cp.iac_lap[1] = 0x8b; + cp.iac_lap[2] = 0x9e; + } + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP, + (cp.num_iac * 3) + 1, &cp, + HCI_CMD_TIMEOUT); +} + +int hci_update_discoverable_sync(struct hci_dev *hdev) +{ + int err = 0; + + if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { + err = hci_write_iac_sync(hdev); + if (err) + return err; + + err = hci_update_scan_sync(hdev); + if (err) + return err; + + err = hci_update_class_sync(hdev); + if (err) + return err; + } + + /* Advertising instances don't use the global discoverable setting, so + * only update AD if advertising was enabled using Set Advertising. + */ + if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) { + err = hci_update_adv_data_sync(hdev, 0x00); + if (err) + return err; + + /* Discoverable mode affects the local advertising + * address in limited privacy mode. + */ + if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) { + if (ext_adv_capable(hdev)) + err = hci_start_ext_adv_sync(hdev, 0x00); + else + err = hci_enable_advertising_sync(hdev); + } + } + + return err; +} + +static int update_discoverable_sync(struct hci_dev *hdev, void *data) +{ + return hci_update_discoverable_sync(hdev); +} + +int hci_update_discoverable(struct hci_dev *hdev) +{ + /* Only queue if it would have any effect */ + if (hdev_is_powered(hdev) && + hci_dev_test_flag(hdev, HCI_ADVERTISING) && + hci_dev_test_flag(hdev, HCI_DISCOVERABLE) && + hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) + return hci_cmd_sync_queue(hdev, update_discoverable_sync, NULL, + NULL); + + return 0; +} + +int hci_update_connectable_sync(struct hci_dev *hdev) +{ + int err; + + err = hci_update_scan_sync(hdev); + if (err) + return err; + + /* If BR/EDR is not enabled and we disable advertising as a + * by-product of disabling connectable, we need to update the + * advertising flags. + */ + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + err = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance); + + /* Update the advertising parameters if necessary */ + if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || + !list_empty(&hdev->adv_instances)) { + if (ext_adv_capable(hdev)) + err = hci_start_ext_adv_sync(hdev, + hdev->cur_adv_instance); + else + err = hci_enable_advertising_sync(hdev); + + if (err) + return err; + } + + return hci_update_passive_scan_sync(hdev); +} + +static int hci_inquiry_sync(struct hci_dev *hdev, u8 length) +{ + const u8 giac[3] = { 0x33, 0x8b, 0x9e }; + const u8 liac[3] = { 0x00, 0x8b, 0x9e }; + struct hci_cp_inquiry cp; + + bt_dev_dbg(hdev, ""); + + if (hci_dev_test_flag(hdev, HCI_INQUIRY)) + return 0; + + hci_dev_lock(hdev); + hci_inquiry_cache_flush(hdev); + hci_dev_unlock(hdev); + + memset(&cp, 0, sizeof(cp)); + + if (hdev->discovery.limited) + memcpy(&cp.lap, liac, sizeof(cp.lap)); + else + memcpy(&cp.lap, giac, sizeof(cp.lap)); + + cp.length = length; + + return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval) +{ + u8 own_addr_type; + /* Accept list is not used for discovery */ + u8 filter_policy = 0x00; + /* Default is to enable duplicates filter */ + u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE; + int err; + + bt_dev_dbg(hdev, ""); + + /* If controller is scanning, it means the passive scanning is + * running. Thus, we should temporarily stop it in order to set the + * discovery scanning parameters. + */ + err = hci_scan_disable_sync(hdev); + if (err) { + bt_dev_err(hdev, "Unable to disable scanning: %d", err); + return err; + } + + cancel_interleave_scan(hdev); + + /* Pause advertising since active scanning disables address resolution + * which advertising depend on in order to generate its RPAs. + */ + if (use_ll_privacy(hdev)) { + err = hci_pause_advertising_sync(hdev); + if (err) { + bt_dev_err(hdev, "pause advertising failed: %d", err); + goto failed; + } + } + + /* Disable address resolution while doing active scanning since the + * accept list shall not be used and all reports shall reach the host + * anyway. + */ + err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00); + if (err) { + bt_dev_err(hdev, "Unable to disable Address Resolution: %d", + err); + goto failed; + } + + /* All active scans will be done with either a resolvable private + * address (when privacy feature has been enabled) or non-resolvable + * private address. + */ + err = hci_update_random_address_sync(hdev, true, scan_use_rpa(hdev), + &own_addr_type); + if (err < 0) + own_addr_type = ADDR_LE_DEV_PUBLIC; + + if (hci_is_adv_monitoring(hdev)) { + /* Duplicate filter should be disabled when some advertisement + * monitor is activated, otherwise AdvMon can only receive one + * advertisement for one peer(*) during active scanning, and + * might report loss to these peers. + * + * Note that different controllers have different meanings of + * |duplicate|. Some of them consider packets with the same + * address as duplicate, and others consider packets with the + * same address and the same RSSI as duplicate. Although in the + * latter case we don't need to disable duplicate filter, but + * it is common to have active scanning for a short period of + * time, the power impact should be neglectable. + */ + filter_dup = LE_SCAN_FILTER_DUP_DISABLE; + } + + err = hci_start_scan_sync(hdev, LE_SCAN_ACTIVE, interval, + hdev->le_scan_window_discovery, + own_addr_type, filter_policy, filter_dup); + if (!err) + return err; + +failed: + /* Resume advertising if it was paused */ + if (use_ll_privacy(hdev)) + hci_resume_advertising_sync(hdev); + + /* Resume passive scanning */ + hci_update_passive_scan_sync(hdev); + return err; +} + +static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev) +{ + int err; + + bt_dev_dbg(hdev, ""); + + err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery * 2); + if (err) + return err; + + return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN); +} + +int hci_start_discovery_sync(struct hci_dev *hdev) +{ + unsigned long timeout; + int err; + + bt_dev_dbg(hdev, "type %u", hdev->discovery.type); + + switch (hdev->discovery.type) { + case DISCOV_TYPE_BREDR: + return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN); + case DISCOV_TYPE_INTERLEAVED: + /* When running simultaneous discovery, the LE scanning time + * should occupy the whole discovery time sine BR/EDR inquiry + * and LE scanning are scheduled by the controller. + * + * For interleaving discovery in comparison, BR/EDR inquiry + * and LE scanning are done sequentially with separate + * timeouts. + */ + if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, + &hdev->quirks)) { + timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); + /* During simultaneous discovery, we double LE scan + * interval. We must leave some time for the controller + * to do BR/EDR inquiry. + */ + err = hci_start_interleaved_discovery_sync(hdev); + break; + } + + timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); + err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery); + break; + case DISCOV_TYPE_LE: + timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); + err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery); + break; + default: + return -EINVAL; + } + + if (err) + return err; + + bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout)); + + /* When service discovery is used and the controller has a + * strict duplicate filter, it is important to remember the + * start and duration of the scan. This is required for + * restarting scanning during the discovery phase. + */ + if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) && + hdev->discovery.result_filtering) { + hdev->discovery.scan_start = jiffies; + hdev->discovery.scan_duration = timeout; + } + + queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable, + timeout); + return 0; +} + +static void hci_suspend_monitor_sync(struct hci_dev *hdev) +{ + switch (hci_get_adv_monitor_offload_ext(hdev)) { + case HCI_ADV_MONITOR_EXT_MSFT: + msft_suspend_sync(hdev); + break; + default: + return; + } +} + +/* This function disables discovery and mark it as paused */ +static int hci_pause_discovery_sync(struct hci_dev *hdev) +{ + int old_state = hdev->discovery.state; + int err; + + /* If discovery already stopped/stopping/paused there nothing to do */ + if (old_state == DISCOVERY_STOPPED || old_state == DISCOVERY_STOPPING || + hdev->discovery_paused) + return 0; + + hci_discovery_set_state(hdev, DISCOVERY_STOPPING); + err = hci_stop_discovery_sync(hdev); + if (err) + return err; + + hdev->discovery_paused = true; + hdev->discovery_old_state = old_state; + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + + return 0; +} + +static int hci_update_event_filter_sync(struct hci_dev *hdev) +{ + struct bdaddr_list_with_flags *b; + u8 scan = SCAN_DISABLED; + bool scanning = test_bit(HCI_PSCAN, &hdev->flags); + int err; + + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + return 0; + + /* Always clear event filter when starting */ + hci_clear_event_filter_sync(hdev); + + list_for_each_entry(b, &hdev->accept_list, list) { + if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP, + b->current_flags)) + continue; + + bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr); + + err = hci_set_event_filter_sync(hdev, HCI_FLT_CONN_SETUP, + HCI_CONN_SETUP_ALLOW_BDADDR, + &b->bdaddr, + HCI_CONN_SETUP_AUTO_ON); + if (err) + bt_dev_dbg(hdev, "Failed to set event filter for %pMR", + &b->bdaddr); + else + scan = SCAN_PAGE; + } + + if (scan && !scanning) + hci_write_scan_enable_sync(hdev, scan); + else if (!scan && scanning) + hci_write_scan_enable_sync(hdev, scan); + + return 0; +} + +/* This function performs the HCI suspend procedures in the follow order: + * + * Pause discovery (active scanning/inquiry) + * Pause Directed Advertising/Advertising + * Disconnect all connections + * Set suspend_status to BT_SUSPEND_DISCONNECT if hdev cannot wakeup + * otherwise: + * Update event mask (only set events that are allowed to wake up the host) + * Update event filter (with devices marked with HCI_CONN_FLAG_REMOTE_WAKEUP) + * Update passive scanning (lower duty cycle) + * Set suspend_status to BT_SUSPEND_CONFIGURE_WAKE + */ +int hci_suspend_sync(struct hci_dev *hdev) +{ + int err; + + /* If marked as suspended there nothing to do */ + if (hdev->suspended) + return 0; + + /* Mark device as suspended */ + hdev->suspended = true; + + /* Pause discovery if not already stopped */ + hci_pause_discovery_sync(hdev); + + /* Pause other advertisements */ + hci_pause_advertising_sync(hdev); + + /* Disable page scan if enabled */ + if (test_bit(HCI_PSCAN, &hdev->flags)) + hci_write_scan_enable_sync(hdev, SCAN_DISABLED); + + /* Suspend monitor filters */ + hci_suspend_monitor_sync(hdev); + + /* Prevent disconnects from causing scanning to be re-enabled */ + hdev->scanning_paused = true; + + /* Soft disconnect everything (power off) */ + err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); + if (err) { + /* Set state to BT_RUNNING so resume doesn't notify */ + hdev->suspend_state = BT_RUNNING; + hci_resume_sync(hdev); + return err; + } + + /* Only configure accept list if disconnect succeeded and wake + * isn't being prevented. + */ + if (!hdev->wakeup || !hdev->wakeup(hdev)) { + hdev->suspend_state = BT_SUSPEND_DISCONNECT; + return 0; + } + + /* Unpause to take care of updating scanning params */ + hdev->scanning_paused = false; + + /* Update event mask so only the allowed event can wakeup the host */ + hci_set_event_mask_sync(hdev); + + /* Enable event filter for paired devices */ + hci_update_event_filter_sync(hdev); + + /* Update LE passive scan if enabled */ + hci_update_passive_scan_sync(hdev); + + /* Pause scan changes again. */ + hdev->scanning_paused = true; + + hdev->suspend_state = BT_SUSPEND_CONFIGURE_WAKE; + + return 0; +} + +/* This function resumes discovery */ +static int hci_resume_discovery_sync(struct hci_dev *hdev) +{ + int err; + + /* If discovery not paused there nothing to do */ + if (!hdev->discovery_paused) + return 0; + + hdev->discovery_paused = false; + + hci_discovery_set_state(hdev, DISCOVERY_STARTING); + + err = hci_start_discovery_sync(hdev); + + hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED : + DISCOVERY_FINDING); + + return err; +} + +static void hci_resume_monitor_sync(struct hci_dev *hdev) +{ + switch (hci_get_adv_monitor_offload_ext(hdev)) { + case HCI_ADV_MONITOR_EXT_MSFT: + msft_resume_sync(hdev); + break; + default: + return; + } +} + +/* This function performs the HCI suspend procedures in the follow order: + * + * Restore event mask + * Clear event filter + * Update passive scanning (normal duty cycle) + * Resume Directed Advertising/Advertising + * Resume discovery (active scanning/inquiry) + */ +int hci_resume_sync(struct hci_dev *hdev) +{ + /* If not marked as suspended there nothing to do */ + if (!hdev->suspended) + return 0; + + hdev->suspended = false; + hdev->scanning_paused = false; + + /* Restore event mask */ + hci_set_event_mask_sync(hdev); + + /* Clear any event filters and restore scan state */ + hci_clear_event_filter_sync(hdev); + hci_update_scan_sync(hdev); + + /* Reset passive scanning to normal */ + hci_update_passive_scan_sync(hdev); + + /* Resume monitor filters */ + hci_resume_monitor_sync(hdev); + + /* Resume other advertisements */ + hci_resume_advertising_sync(hdev); + + /* Resume discovery */ + hci_resume_discovery_sync(hdev); + + return 0; +} diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 7827639ecf5c..4e3e0451b08c 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c @@ -86,6 +86,8 @@ static void bt_host_release(struct device *dev) if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) hci_release_dev(hdev); + else + kfree(hdev); module_put(THIS_MODULE); } diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 160c016a5dfb..4574c5cb1b59 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -172,6 +172,21 @@ done: return err; } +static void l2cap_sock_init_pid(struct sock *sk) +{ + struct l2cap_chan *chan = l2cap_pi(sk)->chan; + + /* Only L2CAP_MODE_EXT_FLOWCTL ever need to access the PID in order to + * group the channels being requested. + */ + if (chan->mode != L2CAP_MODE_EXT_FLOWCTL) + return; + + spin_lock(&sk->sk_peer_lock); + sk->sk_peer_pid = get_pid(task_tgid(current)); + spin_unlock(&sk->sk_peer_lock); +} + static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { @@ -243,6 +258,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, if (chan->psm && bdaddr_type_is_le(chan->src_type) && !chan->mode) chan->mode = L2CAP_MODE_LE_FLOWCTL; + l2cap_sock_init_pid(sk); + err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid), &la.l2_bdaddr, la.l2_bdaddr_type); if (err) @@ -298,6 +315,8 @@ static int l2cap_sock_listen(struct socket *sock, int backlog) goto done; } + l2cap_sock_init_pid(sk); + sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 3e5283607b97..f8f74d344297 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -39,6 +39,7 @@ #include "mgmt_config.h" #include "msft.h" #include "eir.h" +#include "aosp.h" #define MGMT_VERSION 1 #define MGMT_REVISION 21 @@ -276,10 +277,39 @@ static const u8 mgmt_status_table[] = { MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */ }; -static u8 mgmt_status(u8 hci_status) +static u8 mgmt_errno_status(int err) { - if (hci_status < ARRAY_SIZE(mgmt_status_table)) - return mgmt_status_table[hci_status]; + switch (err) { + case 0: + return MGMT_STATUS_SUCCESS; + case -EPERM: + return MGMT_STATUS_REJECTED; + case -EINVAL: + return MGMT_STATUS_INVALID_PARAMS; + case -EOPNOTSUPP: + return MGMT_STATUS_NOT_SUPPORTED; + case -EBUSY: + return MGMT_STATUS_BUSY; + case -ETIMEDOUT: + return MGMT_STATUS_AUTH_FAILED; + case -ENOMEM: + return MGMT_STATUS_NO_RESOURCES; + case -EISCONN: + return MGMT_STATUS_ALREADY_CONNECTED; + case -ENOTCONN: + return MGMT_STATUS_DISCONNECTED; + } + + return MGMT_STATUS_FAILED; +} + +static u8 mgmt_status(int err) +{ + if (err < 0) + return mgmt_errno_status(err); + + if (err < ARRAY_SIZE(mgmt_status_table)) + return mgmt_status_table[err]; return MGMT_STATUS_FAILED; } @@ -810,12 +840,7 @@ static u32 get_supported_settings(struct hci_dev *hdev) settings |= MGMT_SETTING_SECURE_CONN; settings |= MGMT_SETTING_PRIVACY; settings |= MGMT_SETTING_STATIC_ADDRESS; - - /* When the experimental feature for LL Privacy support is - * enabled, then advertising is no longer supported. - */ - if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) - settings |= MGMT_SETTING_ADVERTISING; + settings |= MGMT_SETTING_ADVERTISING; } if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || @@ -903,13 +928,6 @@ static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev) return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev); } -static struct mgmt_pending_cmd *pending_find_data(u16 opcode, - struct hci_dev *hdev, - const void *data) -{ - return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data); -} - u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev) { struct mgmt_pending_cmd *cmd; @@ -951,32 +969,41 @@ bool mgmt_get_connectable(struct hci_dev *hdev) return hci_dev_test_flag(hdev, HCI_CONNECTABLE); } +static int service_cache_sync(struct hci_dev *hdev, void *data) +{ + hci_update_eir_sync(hdev); + hci_update_class_sync(hdev); + + return 0; +} + static void service_cache_off(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, service_cache.work); - struct hci_request req; if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) return; - hci_req_init(&req, hdev); - - hci_dev_lock(hdev); - - __hci_req_update_eir(&req); - __hci_req_update_class(&req); - - hci_dev_unlock(hdev); + hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL); +} - hci_req_run(&req, NULL); +static int rpa_expired_sync(struct hci_dev *hdev, void *data) +{ + /* The generation of a new RPA and programming it into the + * controller happens in the hci_req_enable_advertising() + * function. + */ + if (ext_adv_capable(hdev)) + return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance); + else + return hci_enable_advertising_sync(hdev); } static void rpa_expired(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, rpa_expired.work); - struct hci_request req; bt_dev_dbg(hdev, ""); @@ -985,16 +1012,7 @@ static void rpa_expired(struct work_struct *work) if (!hci_dev_test_flag(hdev, HCI_ADVERTISING)) return; - /* The generation of a new RPA and programming it into the - * controller happens in the hci_req_enable_advertising() - * function. - */ - hci_req_init(&req, hdev); - if (ext_adv_capable(hdev)) - __hci_req_start_ext_adv(&req, hdev->cur_adv_instance); - else - __hci_req_enable_advertising(&req); - hci_req_run(&req, NULL); + hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL); } static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev) @@ -1131,16 +1149,6 @@ static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev) sizeof(settings)); } -static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode) -{ - bt_dev_dbg(hdev, "status 0x%02x", status); - - if (hci_conn_count(hdev) == 0) { - cancel_delayed_work(&hdev->power_off); - queue_work(hdev->req_workqueue, &hdev->power_off.work); - } -} - void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance) { struct mgmt_ev_advertising_added ev; @@ -1168,38 +1176,77 @@ static void cancel_adv_timeout(struct hci_dev *hdev) } } -static int clean_up_hci_state(struct hci_dev *hdev) +/* This function requires the caller holds hdev->lock */ +static void restart_le_actions(struct hci_dev *hdev) { - struct hci_request req; - struct hci_conn *conn; - bool discov_stopped; - int err; + struct hci_conn_params *p; - hci_req_init(&req, hdev); + list_for_each_entry(p, &hdev->le_conn_params, list) { + /* Needed for AUTO_OFF case where might not "really" + * have been powered off. + */ + list_del_init(&p->action); - if (test_bit(HCI_ISCAN, &hdev->flags) || - test_bit(HCI_PSCAN, &hdev->flags)) { - u8 scan = 0x00; - hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); + switch (p->auto_connect) { + case HCI_AUTO_CONN_DIRECT: + case HCI_AUTO_CONN_ALWAYS: + list_add(&p->action, &hdev->pend_le_conns); + break; + case HCI_AUTO_CONN_REPORT: + list_add(&p->action, &hdev->pend_le_reports); + break; + default: + break; + } } +} + +static int new_settings(struct hci_dev *hdev, struct sock *skip) +{ + __le32 ev = cpu_to_le32(get_current_settings(hdev)); - hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false); + return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, + sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip); +} - if (hci_dev_test_flag(hdev, HCI_LE_ADV)) - __hci_req_disable_advertising(&req); +static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_mode *cp = cmd->param; - discov_stopped = hci_req_stop_discovery(&req); + bt_dev_dbg(hdev, "err %d", err); - list_for_each_entry(conn, &hdev->conn_hash.list, list) { - /* 0x15 == Terminated due to Power Off */ - __hci_abort_conn(&req, conn, 0x15); + if (!err) { + if (cp->val) { + hci_dev_lock(hdev); + restart_le_actions(hdev); + hci_update_passive_scan(hdev); + hci_dev_unlock(hdev); + } + + send_settings_rsp(cmd->sk, cmd->opcode, hdev); + + /* Only call new_setting for power on as power off is deferred + * to hdev->power_off work which does call hci_dev_do_close. + */ + if (cp->val) + new_settings(hdev, cmd->sk); + } else { + mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, + mgmt_status(err)); } - err = hci_req_run(&req, clean_up_hci_complete); - if (!err && discov_stopped) - hci_discovery_set_state(hdev, DISCOVERY_STOPPING); + mgmt_pending_free(cmd); +} - return err; +static int set_powered_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_mode *cp = cmd->param; + + BT_DBG("%s", hdev->name); + + return hci_set_powered_sync(hdev, cp->val); } static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data, @@ -1228,43 +1275,20 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data, goto failed; } - cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len); + cmd = mgmt_pending_new(sk, MGMT_OP_SET_POWERED, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } - if (cp->val) { - queue_work(hdev->req_workqueue, &hdev->power_on); - err = 0; - } else { - /* Disconnect connections, stop scans, etc */ - err = clean_up_hci_state(hdev); - if (!err) - queue_delayed_work(hdev->req_workqueue, &hdev->power_off, - HCI_POWER_OFF_TIMEOUT); - - /* ENODATA means there were no HCI commands queued */ - if (err == -ENODATA) { - cancel_delayed_work(&hdev->power_off); - queue_work(hdev->req_workqueue, &hdev->power_off.work); - err = 0; - } - } + err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd, + mgmt_set_powered_complete); failed: hci_dev_unlock(hdev); return err; } -static int new_settings(struct hci_dev *hdev, struct sock *skip) -{ - __le32 ev = cpu_to_le32(get_current_settings(hdev)); - - return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, - sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip); -} - int mgmt_new_settings(struct hci_dev *hdev) { return new_settings(hdev, NULL); @@ -1346,23 +1370,20 @@ static u8 mgmt_le_support(struct hci_dev *hdev) return MGMT_STATUS_SUCCESS; } -void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status) +static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data, + int err) { - struct mgmt_pending_cmd *cmd; + struct mgmt_pending_cmd *cmd = data; - bt_dev_dbg(hdev, "status 0x%02x", status); + bt_dev_dbg(hdev, "err %d", err); hci_dev_lock(hdev); - cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev); - if (!cmd) - goto unlock; - - if (status) { - u8 mgmt_err = mgmt_status(status); + if (err) { + u8 mgmt_err = mgmt_status(err); mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); - goto remove_cmd; + goto done; } if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) && @@ -1374,13 +1395,18 @@ void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status) send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev); new_settings(hdev, cmd->sk); -remove_cmd: - mgmt_pending_remove(cmd); - -unlock: +done: + mgmt_pending_free(cmd); hci_dev_unlock(hdev); } +static int set_discoverable_sync(struct hci_dev *hdev, void *data) +{ + BT_DBG("%s", hdev->name); + + return hci_update_discoverable_sync(hdev); +} + static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { @@ -1479,7 +1505,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, goto failed; } - cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len); + cmd = mgmt_pending_new(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; @@ -1503,39 +1529,34 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, else hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); - queue_work(hdev->req_workqueue, &hdev->discoverable_update); - err = 0; + err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd, + mgmt_set_discoverable_complete); failed: hci_dev_unlock(hdev); return err; } -void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status) +static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data, + int err) { - struct mgmt_pending_cmd *cmd; + struct mgmt_pending_cmd *cmd = data; - bt_dev_dbg(hdev, "status 0x%02x", status); + bt_dev_dbg(hdev, "err %d", err); hci_dev_lock(hdev); - cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev); - if (!cmd) - goto unlock; - - if (status) { - u8 mgmt_err = mgmt_status(status); + if (err) { + u8 mgmt_err = mgmt_status(err); mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); - goto remove_cmd; + goto done; } send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev); new_settings(hdev, cmd->sk); -remove_cmd: - mgmt_pending_remove(cmd); - -unlock: +done: + mgmt_pending_free(cmd); hci_dev_unlock(hdev); } @@ -1561,13 +1582,20 @@ static int set_connectable_update_settings(struct hci_dev *hdev, if (changed) { hci_req_update_scan(hdev); - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); return new_settings(hdev, sk); } return 0; } +static int set_connectable_sync(struct hci_dev *hdev, void *data) +{ + BT_DBG("%s", hdev->name); + + return hci_update_connectable_sync(hdev); +} + static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { @@ -1600,7 +1628,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, goto failed; } - cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len); + cmd = mgmt_pending_new(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; @@ -1617,8 +1645,8 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, hci_dev_clear_flag(hdev, HCI_CONNECTABLE); } - queue_work(hdev->req_workqueue, &hdev->connectable_update); - err = 0; + err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd, + mgmt_set_connectable_complete); failed: hci_dev_unlock(hdev); @@ -1653,12 +1681,7 @@ static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data, /* In limited privacy mode the change of bondable mode * may affect the local advertising address. */ - if (hdev_is_powered(hdev) && - hci_dev_test_flag(hdev, HCI_ADVERTISING) && - hci_dev_test_flag(hdev, HCI_DISCOVERABLE) && - hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) - queue_work(hdev->req_workqueue, - &hdev->discoverable_update); + hci_update_discoverable(hdev); err = new_settings(hdev, sk); } @@ -1737,6 +1760,69 @@ failed: return err; } +static void set_ssp_complete(struct hci_dev *hdev, void *data, int err) +{ + struct cmd_lookup match = { NULL, hdev }; + struct mgmt_pending_cmd *cmd = data; + struct mgmt_mode *cp = cmd->param; + u8 enable = cp->val; + bool changed; + + if (err) { + u8 mgmt_err = mgmt_status(err); + + if (enable && hci_dev_test_and_clear_flag(hdev, + HCI_SSP_ENABLED)) { + hci_dev_clear_flag(hdev, HCI_HS_ENABLED); + new_settings(hdev, NULL); + } + + mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp, + &mgmt_err); + return; + } + + if (enable) { + changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED); + } else { + changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED); + + if (!changed) + changed = hci_dev_test_and_clear_flag(hdev, + HCI_HS_ENABLED); + else + hci_dev_clear_flag(hdev, HCI_HS_ENABLED); + } + + mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match); + + if (changed) + new_settings(hdev, match.sk); + + if (match.sk) + sock_put(match.sk); + + hci_update_eir_sync(hdev); +} + +static int set_ssp_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_mode *cp = cmd->param; + bool changed = false; + int err; + + if (cp->val) + changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED); + + err = hci_write_ssp_mode_sync(hdev, cp->val); + + if (!err && changed) + hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); + + return err; +} + static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; @@ -1798,19 +1884,18 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) } cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len); - if (!cmd) { + if (!cmd) err = -ENOMEM; - goto failed; - } - - if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) - hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, - sizeof(cp->val), &cp->val); + else + err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd, + set_ssp_complete); - err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val); if (err < 0) { - mgmt_pending_remove(cmd); - goto failed; + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, + MGMT_STATUS_FAILED); + + if (cmd) + mgmt_pending_remove(cmd); } failed: @@ -1879,18 +1964,17 @@ unlock: return err; } -static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode) +static void set_le_complete(struct hci_dev *hdev, void *data, int err) { struct cmd_lookup match = { NULL, hdev }; + u8 status = mgmt_status(err); - hci_dev_lock(hdev); + bt_dev_dbg(hdev, "err %d", err); if (status) { - u8 mgmt_err = mgmt_status(status); - mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp, - &mgmt_err); - goto unlock; + &status); + return; } mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match); @@ -1899,39 +1983,54 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode) if (match.sk) sock_put(match.sk); +} + +static int set_le_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_mode *cp = cmd->param; + u8 val = !!cp->val; + int err; + + if (!val) { + if (hci_dev_test_flag(hdev, HCI_LE_ADV)) + hci_disable_advertising_sync(hdev); + + if (ext_adv_capable(hdev)) + hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk); + } else { + hci_dev_set_flag(hdev, HCI_LE_ENABLED); + } + + err = hci_write_le_host_supported_sync(hdev, val, 0); /* Make sure the controller has a good default for * advertising data. Restrict the update to when LE * has actually been enabled. During power on, the * update in powered_update_hci will take care of it. */ - if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { - struct hci_request req; - hci_req_init(&req, hdev); + if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { if (ext_adv_capable(hdev)) { - int err; + int status; - err = __hci_req_setup_ext_adv_instance(&req, 0x00); - if (!err) - __hci_req_update_scan_rsp_data(&req, 0x00); + status = hci_setup_ext_adv_instance_sync(hdev, 0x00); + if (!status) + hci_update_scan_rsp_data_sync(hdev, 0x00); } else { - __hci_req_update_adv_data(&req, 0x00); - __hci_req_update_scan_rsp_data(&req, 0x00); + hci_update_adv_data_sync(hdev, 0x00); + hci_update_scan_rsp_data_sync(hdev, 0x00); } - hci_req_run(&req, NULL); - hci_update_background_scan(hdev); + + hci_update_passive_scan(hdev); } -unlock: - hci_dev_unlock(hdev); + return err; } static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; - struct hci_cp_write_le_host_supported hci_cp; struct mgmt_pending_cmd *cmd; - struct hci_request req; int err; u8 val, enabled; @@ -2001,33 +2100,20 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) } cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len); - if (!cmd) { + if (!cmd) err = -ENOMEM; - goto unlock; - } - - hci_req_init(&req, hdev); - - memset(&hci_cp, 0, sizeof(hci_cp)); + else + err = hci_cmd_sync_queue(hdev, set_le_sync, cmd, + set_le_complete); - if (val) { - hci_cp.le = val; - hci_cp.simul = 0x00; - } else { - if (hci_dev_test_flag(hdev, HCI_LE_ADV)) - __hci_req_disable_advertising(&req); + if (err < 0) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE, + MGMT_STATUS_FAILED); - if (ext_adv_capable(hdev)) - __hci_req_clear_ext_adv_sets(&req); + if (cmd) + mgmt_pending_remove(cmd); } - hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp), - &hci_cp); - - err = hci_req_run(&req, le_enable_complete); - if (err < 0) - mgmt_pending_remove(cmd); - unlock: hci_dev_unlock(hdev); return err; @@ -2075,37 +2161,33 @@ static u8 get_uuid_size(const u8 *uuid) return 16; } -static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status) +static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err) { - struct mgmt_pending_cmd *cmd; + struct mgmt_pending_cmd *cmd = data; - hci_dev_lock(hdev); - - cmd = pending_find(mgmt_op, hdev); - if (!cmd) - goto unlock; + bt_dev_dbg(hdev, "err %d", err); mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, - mgmt_status(status), hdev->dev_class, 3); - - mgmt_pending_remove(cmd); + mgmt_status(err), hdev->dev_class, 3); -unlock: - hci_dev_unlock(hdev); + mgmt_pending_free(cmd); } -static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode) +static int add_uuid_sync(struct hci_dev *hdev, void *data) { - bt_dev_dbg(hdev, "status 0x%02x", status); + int err; + + err = hci_update_class_sync(hdev); + if (err) + return err; - mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status); + return hci_update_eir_sync(hdev); } static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_add_uuid *cp = data; struct mgmt_pending_cmd *cmd; - struct hci_request req; struct bt_uuid *uuid; int err; @@ -2131,28 +2213,17 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) list_add_tail(&uuid->list, &hdev->uuids); - hci_req_init(&req, hdev); - - __hci_req_update_class(&req); - __hci_req_update_eir(&req); - - err = hci_req_run(&req, add_uuid_complete); - if (err < 0) { - if (err != -ENODATA) - goto failed; - - err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0, - hdev->dev_class, 3); - goto failed; - } - - cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len); + cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } - err = 0; + err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete); + if (err < 0) { + mgmt_pending_free(cmd); + goto failed; + } failed: hci_dev_unlock(hdev); @@ -2173,11 +2244,15 @@ static bool enable_service_cache(struct hci_dev *hdev) return false; } -static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode) +static int remove_uuid_sync(struct hci_dev *hdev, void *data) { - bt_dev_dbg(hdev, "status 0x%02x", status); + int err; + + err = hci_update_class_sync(hdev); + if (err) + return err; - mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status); + return hci_update_eir_sync(hdev); } static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, @@ -2187,7 +2262,6 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, struct mgmt_pending_cmd *cmd; struct bt_uuid *match, *tmp; u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - struct hci_request req; int err, found; bt_dev_dbg(hdev, "sock %p", sk); @@ -2231,39 +2305,35 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, } update_class: - hci_req_init(&req, hdev); - - __hci_req_update_class(&req); - __hci_req_update_eir(&req); - - err = hci_req_run(&req, remove_uuid_complete); - if (err < 0) { - if (err != -ENODATA) - goto unlock; - - err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0, - hdev->dev_class, 3); - goto unlock; - } - - cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len); + cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len); if (!cmd) { err = -ENOMEM; goto unlock; } - err = 0; + err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd, + mgmt_class_complete); + if (err < 0) + mgmt_pending_free(cmd); unlock: hci_dev_unlock(hdev); return err; } -static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode) +static int set_class_sync(struct hci_dev *hdev, void *data) { - bt_dev_dbg(hdev, "status 0x%02x", status); + int err = 0; + + if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) { + cancel_delayed_work_sync(&hdev->service_cache); + err = hci_update_eir_sync(hdev); + } + + if (err) + return err; - mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status); + return hci_update_class_sync(hdev); } static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data, @@ -2271,7 +2341,6 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data, { struct mgmt_cp_set_dev_class *cp = data; struct mgmt_pending_cmd *cmd; - struct hci_request req; int err; bt_dev_dbg(hdev, "sock %p", sk); @@ -2303,34 +2372,16 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data, goto unlock; } - hci_req_init(&req, hdev); - - if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) { - hci_dev_unlock(hdev); - cancel_delayed_work_sync(&hdev->service_cache); - hci_dev_lock(hdev); - __hci_req_update_eir(&req); - } - - __hci_req_update_class(&req); - - err = hci_req_run(&req, set_class_complete); - if (err < 0) { - if (err != -ENODATA) - goto unlock; - - err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0, - hdev->dev_class, 3); - goto unlock; - } - - cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len); + cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len); if (!cmd) { err = -ENOMEM; goto unlock; } - err = 0; + err = hci_cmd_sync_queue(hdev, set_class_sync, cmd, + mgmt_class_complete); + if (err < 0) + mgmt_pending_free(cmd); unlock: hci_dev_unlock(hdev); @@ -3228,65 +3279,70 @@ static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev, HCI_OP_USER_PASSKEY_NEG_REPLY, 0); } -static void adv_expire(struct hci_dev *hdev, u32 flags) +static int adv_expire_sync(struct hci_dev *hdev, u32 flags) { struct adv_info *adv_instance; - struct hci_request req; - int err; adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance); if (!adv_instance) - return; + return 0; /* stop if current instance doesn't need to be changed */ if (!(adv_instance->flags & flags)) - return; + return 0; cancel_adv_timeout(hdev); adv_instance = hci_get_next_instance(hdev, adv_instance->instance); if (!adv_instance) - return; + return 0; - hci_req_init(&req, hdev); - err = __hci_req_schedule_adv_instance(&req, adv_instance->instance, - true); - if (err) - return; + hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true); - hci_req_run(&req, NULL); + return 0; } -static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode) +static int name_changed_sync(struct hci_dev *hdev, void *data) { - struct mgmt_cp_set_local_name *cp; - struct mgmt_pending_cmd *cmd; - - bt_dev_dbg(hdev, "status 0x%02x", status); - - hci_dev_lock(hdev); + return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME); +} - cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev); - if (!cmd) - goto unlock; +static void set_name_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_set_local_name *cp = cmd->param; + u8 status = mgmt_status(err); - cp = cmd->param; + bt_dev_dbg(hdev, "err %d", err); if (status) { mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, - mgmt_status(status)); + status); } else { mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, cp, sizeof(*cp)); if (hci_dev_test_flag(hdev, HCI_LE_ADV)) - adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME); + hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL); } mgmt_pending_remove(cmd); +} -unlock: - hci_dev_unlock(hdev); +static int set_name_sync(struct hci_dev *hdev, void *data) +{ + if (lmp_bredr_capable(hdev)) { + hci_update_name_sync(hdev); + hci_update_eir_sync(hdev); + } + + /* The name is stored in the scan response data and so + * no need to update the advertising data here. + */ + if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING)) + hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance); + + return 0; } static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data, @@ -3294,7 +3350,6 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data, { struct mgmt_cp_set_local_name *cp = data; struct mgmt_pending_cmd *cmd; - struct hci_request req; int err; bt_dev_dbg(hdev, "sock %p", sk); @@ -3330,35 +3385,34 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data, } cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len); - if (!cmd) { + if (!cmd) err = -ENOMEM; - goto failed; - } + else + err = hci_cmd_sync_queue(hdev, set_name_sync, cmd, + set_name_complete); - memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name)); + if (err < 0) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, + MGMT_STATUS_FAILED); - hci_req_init(&req, hdev); + if (cmd) + mgmt_pending_remove(cmd); - if (lmp_bredr_capable(hdev)) { - __hci_req_update_name(&req); - __hci_req_update_eir(&req); + goto failed; } - /* The name is stored in the scan response data and so - * no need to update the advertising data here. - */ - if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING)) - __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance); - - err = hci_req_run(&req, set_name_complete); - if (err < 0) - mgmt_pending_remove(cmd); + memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name)); failed: hci_dev_unlock(hdev); return err; } +static int appearance_changed_sync(struct hci_dev *hdev, void *data) +{ + return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE); +} + static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { @@ -3380,7 +3434,8 @@ static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data, hdev->appearance = appearance; if (hci_dev_test_flag(hdev, HCI_LE_ADV)) - adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE); + hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL, + NULL); ext_info_changed(hdev, sk); } @@ -3426,23 +3481,26 @@ int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip) sizeof(ev), skip); } -static void set_default_phy_complete(struct hci_dev *hdev, u8 status, - u16 opcode, struct sk_buff *skb) +static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err) { - struct mgmt_pending_cmd *cmd; - - bt_dev_dbg(hdev, "status 0x%02x", status); + struct mgmt_pending_cmd *cmd = data; + struct sk_buff *skb = cmd->skb; + u8 status = mgmt_status(err); - hci_dev_lock(hdev); + if (!status) { + if (!skb) + status = MGMT_STATUS_FAILED; + else if (IS_ERR(skb)) + status = mgmt_status(PTR_ERR(skb)); + else + status = mgmt_status(skb->data[0]); + } - cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev); - if (!cmd) - goto unlock; + bt_dev_dbg(hdev, "status %d", status); if (status) { mgmt_cmd_status(cmd->sk, hdev->id, - MGMT_OP_SET_PHY_CONFIGURATION, - mgmt_status(status)); + MGMT_OP_SET_PHY_CONFIGURATION, status); } else { mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_PHY_CONFIGURATION, 0, @@ -3451,19 +3509,56 @@ static void set_default_phy_complete(struct hci_dev *hdev, u8 status, mgmt_phy_configuration_changed(hdev, cmd->sk); } + if (skb && !IS_ERR(skb)) + kfree_skb(skb); + mgmt_pending_remove(cmd); +} -unlock: - hci_dev_unlock(hdev); +static int set_default_phy_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_set_phy_configuration *cp = cmd->param; + struct hci_cp_le_set_default_phy cp_phy; + u32 selected_phys = __le32_to_cpu(cp->selected_phys); + + memset(&cp_phy, 0, sizeof(cp_phy)); + + if (!(selected_phys & MGMT_PHY_LE_TX_MASK)) + cp_phy.all_phys |= 0x01; + + if (!(selected_phys & MGMT_PHY_LE_RX_MASK)) + cp_phy.all_phys |= 0x02; + + if (selected_phys & MGMT_PHY_LE_1M_TX) + cp_phy.tx_phys |= HCI_LE_SET_PHY_1M; + + if (selected_phys & MGMT_PHY_LE_2M_TX) + cp_phy.tx_phys |= HCI_LE_SET_PHY_2M; + + if (selected_phys & MGMT_PHY_LE_CODED_TX) + cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED; + + if (selected_phys & MGMT_PHY_LE_1M_RX) + cp_phy.rx_phys |= HCI_LE_SET_PHY_1M; + + if (selected_phys & MGMT_PHY_LE_2M_RX) + cp_phy.rx_phys |= HCI_LE_SET_PHY_2M; + + if (selected_phys & MGMT_PHY_LE_CODED_RX) + cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED; + + cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY, + sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT); + + return 0; } static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_phy_configuration *cp = data; - struct hci_cp_le_set_default_phy cp_phy; struct mgmt_pending_cmd *cmd; - struct hci_request req; u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys; u16 pkt_type = (HCI_DH1 | HCI_DM1); bool changed = false; @@ -3567,44 +3662,20 @@ static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev, cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data, len); - if (!cmd) { + if (!cmd) err = -ENOMEM; - goto unlock; - } - - hci_req_init(&req, hdev); - - memset(&cp_phy, 0, sizeof(cp_phy)); - - if (!(selected_phys & MGMT_PHY_LE_TX_MASK)) - cp_phy.all_phys |= 0x01; - - if (!(selected_phys & MGMT_PHY_LE_RX_MASK)) - cp_phy.all_phys |= 0x02; - - if (selected_phys & MGMT_PHY_LE_1M_TX) - cp_phy.tx_phys |= HCI_LE_SET_PHY_1M; - - if (selected_phys & MGMT_PHY_LE_2M_TX) - cp_phy.tx_phys |= HCI_LE_SET_PHY_2M; - - if (selected_phys & MGMT_PHY_LE_CODED_TX) - cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED; - - if (selected_phys & MGMT_PHY_LE_1M_RX) - cp_phy.rx_phys |= HCI_LE_SET_PHY_1M; - - if (selected_phys & MGMT_PHY_LE_2M_RX) - cp_phy.rx_phys |= HCI_LE_SET_PHY_2M; - - if (selected_phys & MGMT_PHY_LE_CODED_RX) - cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED; + else + err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd, + set_default_phy_complete); - hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy); + if (err < 0) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_PHY_CONFIGURATION, + MGMT_STATUS_FAILED); - err = hci_req_run_skb(&req, set_default_phy_complete); - if (err < 0) - mgmt_pending_remove(cmd); + if (cmd) + mgmt_pending_remove(cmd); + } unlock: hci_dev_unlock(hdev); @@ -3852,7 +3923,7 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, idx++; } - if (hdev && use_ll_privacy(hdev)) { + if (hdev && ll_privacy_capable(hdev)) { if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) flags = BIT(0) | BIT(1); else @@ -3863,7 +3934,8 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, idx++; } - if (hdev && hdev->set_quality_report) { + if (hdev && (aosp_has_quality_report(hdev) || + hdev->set_quality_report)) { if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT)) flags = BIT(0); else @@ -3927,7 +3999,9 @@ static int exp_debug_feature_changed(bool enabled, struct sock *skip) } #endif -static int exp_quality_report_feature_changed(bool enabled, struct sock *skip) +static int exp_quality_report_feature_changed(bool enabled, + struct hci_dev *hdev, + struct sock *skip) { struct mgmt_ev_exp_feature_changed ev; @@ -3935,7 +4009,7 @@ static int exp_quality_report_feature_changed(bool enabled, struct sock *skip) memcpy(ev.uuid, quality_report_uuid, 16); ev.flags = cpu_to_le32(enabled ? BIT(0) : 0); - return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL, + return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev, &ev, sizeof(ev), HCI_MGMT_EXP_FEATURE_EVENTS, skip); } @@ -4125,7 +4199,7 @@ static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev, val = !!cp->param[0]; changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT)); - if (!hdev->set_quality_report) { + if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_NOT_SUPPORTED); @@ -4133,13 +4207,18 @@ static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev, } if (changed) { - err = hdev->set_quality_report(hdev, val); + if (hdev->set_quality_report) + err = hdev->set_quality_report(hdev, val); + else + err = aosp_set_quality_report(hdev, val); + if (err) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_FAILED); goto unlock_quality_report; } + if (val) hci_dev_set_flag(hdev, HCI_QUALITY_REPORT); else @@ -4151,19 +4230,20 @@ static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev, memcpy(rp.uuid, quality_report_uuid, 16); rp.flags = cpu_to_le32(val ? BIT(0) : 0); hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); - err = mgmt_cmd_complete(sk, hdev->id, - MGMT_OP_SET_EXP_FEATURE, 0, + + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0, &rp, sizeof(rp)); if (changed) - exp_quality_report_feature_changed(val, sk); + exp_quality_report_feature_changed(val, hdev, sk); unlock_quality_report: hci_req_sync_unlock(hdev); return err; } -static int exp_offload_codec_feature_changed(bool enabled, struct sock *skip) +static int exp_offload_codec_feature_changed(bool enabled, struct hci_dev *hdev, + struct sock *skip) { struct mgmt_ev_exp_feature_changed ev; @@ -4171,7 +4251,7 @@ static int exp_offload_codec_feature_changed(bool enabled, struct sock *skip) memcpy(ev.uuid, offload_codecs_uuid, 16); ev.flags = cpu_to_le32(enabled ? BIT(0) : 0); - return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL, + return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev, &ev, sizeof(ev), HCI_MGMT_EXP_FEATURE_EVENTS, skip); } @@ -4229,7 +4309,7 @@ static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev, &rp, sizeof(rp)); if (changed) - exp_offload_codec_feature_changed(val, sk); + exp_offload_codec_feature_changed(val, hdev, sk); return err; } @@ -4496,7 +4576,7 @@ int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status) hdev->adv_monitors_cnt++; if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED) monitor->state = ADV_MONITOR_STATE_REGISTERED; - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); } err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, @@ -4722,7 +4802,7 @@ int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status) rp.monitor_handle = cp->monitor_handle; if (!status) - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status), &rp, sizeof(rp)); @@ -4801,28 +4881,33 @@ unlock: status); } -static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status, - u16 opcode, struct sk_buff *skb) +static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_rp_read_local_oob_data mgmt_rp; size_t rp_size = sizeof(mgmt_rp); - struct mgmt_pending_cmd *cmd; + struct mgmt_pending_cmd *cmd = data; + struct sk_buff *skb = cmd->skb; + u8 status = mgmt_status(err); - bt_dev_dbg(hdev, "status %u", status); + if (!status) { + if (!skb) + status = MGMT_STATUS_FAILED; + else if (IS_ERR(skb)) + status = mgmt_status(PTR_ERR(skb)); + else + status = mgmt_status(skb->data[0]); + } - cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev); - if (!cmd) - return; + bt_dev_dbg(hdev, "status %d", status); - if (status || !skb) { - mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, - status ? mgmt_status(status) : MGMT_STATUS_FAILED); + if (status) { + mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status); goto remove; } memset(&mgmt_rp, 0, sizeof(mgmt_rp)); - if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) { + if (!bredr_sc_enabled(hdev)) { struct hci_rp_read_local_oob_data *rp = (void *) skb->data; if (skb->len < sizeof(*rp)) { @@ -4857,14 +4942,31 @@ static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status, MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size); remove: - mgmt_pending_remove(cmd); + if (skb && !IS_ERR(skb)) + kfree_skb(skb); + + mgmt_pending_free(cmd); +} + +static int read_local_oob_data_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + + if (bredr_sc_enabled(hdev)) + cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk); + else + cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk); + + if (IS_ERR(cmd->skb)) + return PTR_ERR(cmd->skb); + else + return 0; } static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_pending_cmd *cmd; - struct hci_request req; int err; bt_dev_dbg(hdev, "sock %p", sk); @@ -4889,22 +4991,20 @@ static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev, goto unlock; } - cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0); - if (!cmd) { + cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0); + if (!cmd) err = -ENOMEM; - goto unlock; - } - - hci_req_init(&req, hdev); - - if (bredr_sc_enabled(hdev)) - hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL); else - hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL); + err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd, + read_local_oob_data_complete); - err = hci_req_run_skb(&req, read_local_oob_data_complete); - if (err < 0) - mgmt_pending_remove(cmd); + if (err < 0) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, + MGMT_STATUS_FAILED); + + if (cmd) + mgmt_pending_free(cmd); + } unlock: hci_dev_unlock(hdev); @@ -5077,13 +5177,6 @@ void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status) } hci_dev_unlock(hdev); - - /* Handle suspend notifier */ - if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY, - hdev->suspend_tasks)) { - bt_dev_dbg(hdev, "Unpaused discovery"); - wake_up(&hdev->suspend_wait_q); - } } static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type, @@ -5113,6 +5206,25 @@ static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type, return true; } +static void start_discovery_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + + bt_dev_dbg(hdev, "err %d", err); + + mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err), + cmd->param, 1); + mgmt_pending_free(cmd); + + hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED: + DISCOVERY_FINDING); +} + +static int start_discovery_sync(struct hci_dev *hdev, void *data) +{ + return hci_start_discovery_sync(hdev); +} + static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev, u16 op, void *data, u16 len) { @@ -5164,17 +5276,20 @@ static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev, else hdev->discovery.limited = false; - cmd = mgmt_pending_add(sk, op, hdev, data, len); + cmd = mgmt_pending_new(sk, op, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } - cmd->cmd_complete = generic_cmd_complete; + err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd, + start_discovery_complete); + if (err < 0) { + mgmt_pending_free(cmd); + goto failed; + } hci_discovery_set_state(hdev, DISCOVERY_STARTING); - queue_work(hdev->req_workqueue, &hdev->discov_update); - err = 0; failed: hci_dev_unlock(hdev); @@ -5196,13 +5311,6 @@ static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev, data, len); } -static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd, - u8 status) -{ - return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, - cmd->param, 1); -} - static int start_service_discovery(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { @@ -5271,15 +5379,13 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev, goto failed; } - cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY, + cmd = mgmt_pending_new(sk, MGMT_OP_START_SERVICE_DISCOVERY, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } - cmd->cmd_complete = service_discovery_cmd_complete; - /* Clear the discovery filter first to free any previously * allocated memory for the UUID list. */ @@ -5303,9 +5409,14 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev, } } + err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd, + start_discovery_complete); + if (err < 0) { + mgmt_pending_free(cmd); + goto failed; + } + hci_discovery_set_state(hdev, DISCOVERY_STARTING); - queue_work(hdev->req_workqueue, &hdev->discov_update); - err = 0; failed: hci_dev_unlock(hdev); @@ -5327,12 +5438,25 @@ void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status) } hci_dev_unlock(hdev); +} - /* Handle suspend notifier */ - if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) { - bt_dev_dbg(hdev, "Paused discovery"); - wake_up(&hdev->suspend_wait_q); - } +static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + + bt_dev_dbg(hdev, "err %d", err); + + mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err), + cmd->param, 1); + mgmt_pending_free(cmd); + + if (!err) + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); +} + +static int stop_discovery_sync(struct hci_dev *hdev, void *data) +{ + return hci_stop_discovery_sync(hdev); } static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data, @@ -5360,17 +5484,20 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data, goto unlock; } - cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len); + cmd = mgmt_pending_new(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len); if (!cmd) { err = -ENOMEM; goto unlock; } - cmd->cmd_complete = generic_cmd_complete; + err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd, + stop_discovery_complete); + if (err < 0) { + mgmt_pending_free(cmd); + goto unlock; + } hci_discovery_set_state(hdev, DISCOVERY_STOPPING); - queue_work(hdev->req_workqueue, &hdev->discov_update); - err = 0; unlock: hci_dev_unlock(hdev); @@ -5491,11 +5618,15 @@ done: return err; } +static int set_device_id_sync(struct hci_dev *hdev, void *data) +{ + return hci_update_eir_sync(hdev); +} + static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_device_id *cp = data; - struct hci_request req; int err; __u16 source; @@ -5517,38 +5648,32 @@ static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data, err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0); - hci_req_init(&req, hdev); - __hci_req_update_eir(&req); - hci_req_run(&req, NULL); + hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL); hci_dev_unlock(hdev); return err; } -static void enable_advertising_instance(struct hci_dev *hdev, u8 status, - u16 opcode) +static void enable_advertising_instance(struct hci_dev *hdev, int err) { - bt_dev_dbg(hdev, "status %u", status); + if (err) + bt_dev_err(hdev, "failed to re-configure advertising %d", err); + else + bt_dev_dbg(hdev, "status %d", err); } -static void set_advertising_complete(struct hci_dev *hdev, u8 status, - u16 opcode) +static void set_advertising_complete(struct hci_dev *hdev, void *data, int err) { struct cmd_lookup match = { NULL, hdev }; - struct hci_request req; u8 instance; struct adv_info *adv_instance; - int err; - - hci_dev_lock(hdev); + u8 status = mgmt_status(err); if (status) { - u8 mgmt_err = mgmt_status(status); - mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, - cmd_status_rsp, &mgmt_err); - goto unlock; + cmd_status_rsp, &status); + return; } if (hci_dev_test_flag(hdev, HCI_LE_ADV)) @@ -5564,46 +5689,60 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status, if (match.sk) sock_put(match.sk); - /* Handle suspend notifier */ - if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING, - hdev->suspend_tasks)) { - bt_dev_dbg(hdev, "Paused advertising"); - wake_up(&hdev->suspend_wait_q); - } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING, - hdev->suspend_tasks)) { - bt_dev_dbg(hdev, "Unpaused advertising"); - wake_up(&hdev->suspend_wait_q); - } - /* If "Set Advertising" was just disabled and instance advertising was * set up earlier, then re-enable multi-instance advertising. */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || list_empty(&hdev->adv_instances)) - goto unlock; + return; instance = hdev->cur_adv_instance; if (!instance) { adv_instance = list_first_entry_or_null(&hdev->adv_instances, struct adv_info, list); if (!adv_instance) - goto unlock; + return; instance = adv_instance->instance; } - hci_req_init(&req, hdev); + err = hci_schedule_adv_instance_sync(hdev, instance, true); - err = __hci_req_schedule_adv_instance(&req, instance, true); + enable_advertising_instance(hdev, err); +} - if (!err) - err = hci_req_run(&req, enable_advertising_instance); +static int set_adv_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_mode *cp = cmd->param; + u8 val = !!cp->val; - if (err) - bt_dev_err(hdev, "failed to re-configure advertising"); + if (cp->val == 0x02) + hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE); + else + hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE); -unlock: - hci_dev_unlock(hdev); + cancel_adv_timeout(hdev); + + if (val) { + /* Switch to instance "0" for the Set Advertising setting. + * We cannot use update_[adv|scan_rsp]_data() here as the + * HCI_ADVERTISING flag is not yet set. + */ + hdev->cur_adv_instance = 0x00; + + if (ext_adv_capable(hdev)) { + hci_start_ext_adv_sync(hdev, 0x00); + } else { + hci_update_adv_data_sync(hdev, 0x00); + hci_update_scan_rsp_data_sync(hdev, 0x00); + hci_enable_advertising_sync(hdev); + } + } else { + hci_disable_advertising_sync(hdev); + } + + return 0; } static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data, @@ -5611,7 +5750,6 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data, { struct mgmt_mode *cp = data; struct mgmt_pending_cmd *cmd; - struct hci_request req; u8 val, status; int err; @@ -5622,13 +5760,6 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data, return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, status); - /* Enabling the experimental LL Privay support disables support for - * advertising. - */ - if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) - return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, - MGMT_STATUS_NOT_SUPPORTED); - if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, MGMT_STATUS_INVALID_PARAMS); @@ -5684,40 +5815,13 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data, } cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len); - if (!cmd) { + if (!cmd) err = -ENOMEM; - goto unlock; - } - - hci_req_init(&req, hdev); - - if (cp->val == 0x02) - hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE); else - hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE); - - cancel_adv_timeout(hdev); - - if (val) { - /* Switch to instance "0" for the Set Advertising setting. - * We cannot use update_[adv|scan_rsp]_data() here as the - * HCI_ADVERTISING flag is not yet set. - */ - hdev->cur_adv_instance = 0x00; + err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd, + set_advertising_complete); - if (ext_adv_capable(hdev)) { - __hci_req_start_ext_adv(&req, 0x00); - } else { - __hci_req_update_adv_data(&req, 0x00); - __hci_req_update_scan_rsp_data(&req, 0x00); - __hci_req_enable_advertising(&req); - } - } else { - __hci_req_disable_advertising(&req); - } - - err = hci_req_run(&req, set_advertising_complete); - if (err < 0) + if (err < 0 && cmd) mgmt_pending_remove(cmd); unlock: @@ -5810,38 +5914,23 @@ static int set_scan_params(struct sock *sk, struct hci_dev *hdev, * loaded. */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN) && - hdev->discovery.state == DISCOVERY_STOPPED) { - struct hci_request req; - - hci_req_init(&req, hdev); - - hci_req_add_le_scan_disable(&req, false); - hci_req_add_le_passive_scan(&req); - - hci_req_run(&req, NULL); - } + hdev->discovery.state == DISCOVERY_STOPPED) + hci_update_passive_scan(hdev); hci_dev_unlock(hdev); return err; } -static void fast_connectable_complete(struct hci_dev *hdev, u8 status, - u16 opcode) +static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err) { - struct mgmt_pending_cmd *cmd; - - bt_dev_dbg(hdev, "status 0x%02x", status); + struct mgmt_pending_cmd *cmd = data; - hci_dev_lock(hdev); - - cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev); - if (!cmd) - goto unlock; + bt_dev_dbg(hdev, "err %d", err); - if (status) { + if (err) { mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, - mgmt_status(status)); + mgmt_status(err)); } else { struct mgmt_mode *cp = cmd->param; @@ -5854,10 +5943,15 @@ static void fast_connectable_complete(struct hci_dev *hdev, u8 status, new_settings(hdev, cmd->sk); } - mgmt_pending_remove(cmd); + mgmt_pending_free(cmd); +} -unlock: - hci_dev_unlock(hdev); +static int write_fast_connectable_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_mode *cp = cmd->param; + + return hci_write_fast_connectable_sync(hdev, cp->val); } static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev, @@ -5865,58 +5959,49 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev, { struct mgmt_mode *cp = data; struct mgmt_pending_cmd *cmd; - struct hci_request req; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) || hdev->hci_ver < BLUETOOTH_VER_1_2) - return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_FAST_CONNECTABLE, MGMT_STATUS_NOT_SUPPORTED); if (cp->val != 0x00 && cp->val != 0x01) - return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_FAST_CONNECTABLE, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); - if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) { - err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, - MGMT_STATUS_BUSY); - goto unlock; - } - if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) { - err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, - hdev); + err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev); goto unlock; } if (!hdev_is_powered(hdev)) { hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE); - err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, - hdev); + err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev); new_settings(hdev, sk); goto unlock; } - cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, - data, len); - if (!cmd) { + cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data, + len); + if (!cmd) err = -ENOMEM; - goto unlock; - } - - hci_req_init(&req, hdev); - - __hci_req_write_fast_connectable(&req, cp->val); + else + err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd, + fast_connectable_complete); - err = hci_req_run(&req, fast_connectable_complete); if (err < 0) { - err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, - MGMT_STATUS_FAILED); - mgmt_pending_remove(cmd); + mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, + MGMT_STATUS_FAILED); + + if (cmd) + mgmt_pending_free(cmd); } unlock: @@ -5925,20 +6010,14 @@ unlock: return err; } -static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode) +static void set_bredr_complete(struct hci_dev *hdev, void *data, int err) { - struct mgmt_pending_cmd *cmd; + struct mgmt_pending_cmd *cmd = data; - bt_dev_dbg(hdev, "status 0x%02x", status); - - hci_dev_lock(hdev); - - cmd = pending_find(MGMT_OP_SET_BREDR, hdev); - if (!cmd) - goto unlock; + bt_dev_dbg(hdev, "err %d", err); - if (status) { - u8 mgmt_err = mgmt_status(status); + if (err) { + u8 mgmt_err = mgmt_status(err); /* We need to restore the flag if related HCI commands * failed. @@ -5951,17 +6030,31 @@ static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode) new_settings(hdev, cmd->sk); } - mgmt_pending_remove(cmd); + mgmt_pending_free(cmd); +} -unlock: - hci_dev_unlock(hdev); +static int set_bredr_sync(struct hci_dev *hdev, void *data) +{ + int status; + + status = hci_write_fast_connectable_sync(hdev, false); + + if (!status) + status = hci_update_scan_sync(hdev); + + /* Since only the advertising data flags will change, there + * is no need to update the scan response data. + */ + if (!status) + status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance); + + return status; } static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; struct mgmt_pending_cmd *cmd; - struct hci_request req; int err; bt_dev_dbg(hdev, "sock %p", sk); @@ -6033,15 +6126,19 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) } } - if (pending_find(MGMT_OP_SET_BREDR, hdev)) { - err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, - MGMT_STATUS_BUSY); - goto unlock; - } - - cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len); - if (!cmd) { + cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len); + if (!cmd) err = -ENOMEM; + else + err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd, + set_bredr_complete); + + if (err < 0) { + mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, + MGMT_STATUS_FAILED); + if (cmd) + mgmt_pending_free(cmd); + goto unlock; } @@ -6050,42 +6147,23 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) */ hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); - hci_req_init(&req, hdev); - - __hci_req_write_fast_connectable(&req, false); - __hci_req_update_scan(&req); - - /* Since only the advertising data flags will change, there - * is no need to update the scan response data. - */ - __hci_req_update_adv_data(&req, hdev->cur_adv_instance); - - err = hci_req_run(&req, set_bredr_complete); - if (err < 0) - mgmt_pending_remove(cmd); - unlock: hci_dev_unlock(hdev); return err; } -static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode) +static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err) { - struct mgmt_pending_cmd *cmd; + struct mgmt_pending_cmd *cmd = data; struct mgmt_mode *cp; - bt_dev_dbg(hdev, "status %u", status); - - hci_dev_lock(hdev); + bt_dev_dbg(hdev, "err %d", err); - cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev); - if (!cmd) - goto unlock; + if (err) { + u8 mgmt_err = mgmt_status(err); - if (status) { - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, - mgmt_status(status)); - goto remove; + mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); + goto done; } cp = cmd->param; @@ -6105,13 +6183,23 @@ static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode) break; } - send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev); + send_settings_rsp(cmd->sk, cmd->opcode, hdev); new_settings(hdev, cmd->sk); -remove: - mgmt_pending_remove(cmd); -unlock: - hci_dev_unlock(hdev); +done: + mgmt_pending_free(cmd); +} + +static int set_secure_conn_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_mode *cp = cmd->param; + u8 val = !!cp->val; + + /* Force write of val */ + hci_dev_set_flag(hdev, HCI_SC_ENABLED); + + return hci_write_sc_support_sync(hdev, val); } static int set_secure_conn(struct sock *sk, struct hci_dev *hdev, @@ -6119,7 +6207,6 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev, { struct mgmt_mode *cp = data; struct mgmt_pending_cmd *cmd; - struct hci_request req; u8 val; int err; @@ -6138,7 +6225,7 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev, if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, - MGMT_STATUS_INVALID_PARAMS); + MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); @@ -6169,12 +6256,6 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev, goto failed; } - if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) { - err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, - MGMT_STATUS_BUSY); - goto failed; - } - val = !!cp->val; if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) && @@ -6183,18 +6264,18 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev, goto failed; } - cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len); - if (!cmd) { + cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len); + if (!cmd) err = -ENOMEM; - goto failed; - } + else + err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd, + set_secure_conn_complete); - hci_req_init(&req, hdev); - hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val); - err = hci_req_run(&req, sc_enable_complete); if (err < 0) { - mgmt_pending_remove(cmd); - goto failed; + mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, + MGMT_STATUS_FAILED); + if (cmd) + mgmt_pending_free(cmd); } failed: @@ -6508,14 +6589,19 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, return err; } -static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status) +static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err) { + struct mgmt_pending_cmd *cmd = data; struct hci_conn *conn = cmd->user_data; + struct mgmt_cp_get_conn_info *cp = cmd->param; struct mgmt_rp_get_conn_info rp; - int err; + u8 status; - memcpy(&rp.addr, cmd->param, sizeof(rp.addr)); + bt_dev_dbg(hdev, "err %d", err); + + memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr)); + status = mgmt_status(err); if (status == MGMT_STATUS_SUCCESS) { rp.rssi = conn->rssi; rp.tx_power = conn->tx_power; @@ -6526,67 +6612,58 @@ static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status) rp.max_tx_power = HCI_TX_POWER_INVALID; } - err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, - status, &rp, sizeof(rp)); + mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status, + &rp, sizeof(rp)); - hci_conn_drop(conn); - hci_conn_put(conn); + if (conn) { + hci_conn_drop(conn); + hci_conn_put(conn); + } - return err; + mgmt_pending_free(cmd); } -static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status, - u16 opcode) +static int get_conn_info_sync(struct hci_dev *hdev, void *data) { - struct hci_cp_read_rssi *cp; - struct mgmt_pending_cmd *cmd; + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_get_conn_info *cp = cmd->param; struct hci_conn *conn; - u16 handle; - u8 status; - - bt_dev_dbg(hdev, "status 0x%02x", hci_status); + int err; + __le16 handle; - hci_dev_lock(hdev); + /* Make sure we are still connected */ + if (cp->addr.type == BDADDR_BREDR) + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, + &cp->addr.bdaddr); + else + conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); - /* Commands sent in request are either Read RSSI or Read Transmit Power - * Level so we check which one was last sent to retrieve connection - * handle. Both commands have handle as first parameter so it's safe to - * cast data on the same command struct. - * - * First command sent is always Read RSSI and we fail only if it fails. - * In other case we simply override error to indicate success as we - * already remembered if TX power value is actually valid. - */ - cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI); - if (!cp) { - cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); - status = MGMT_STATUS_SUCCESS; - } else { - status = mgmt_status(hci_status); + if (!conn || conn != cmd->user_data || conn->state != BT_CONNECTED) { + if (cmd->user_data) { + hci_conn_drop(cmd->user_data); + hci_conn_put(cmd->user_data); + cmd->user_data = NULL; + } + return MGMT_STATUS_NOT_CONNECTED; } - if (!cp) { - bt_dev_err(hdev, "invalid sent_cmd in conn_info response"); - goto unlock; - } + handle = cpu_to_le16(conn->handle); - handle = __le16_to_cpu(cp->handle); - conn = hci_conn_hash_lookup_handle(hdev, handle); - if (!conn) { - bt_dev_err(hdev, "unknown handle (%u) in conn_info response", - handle); - goto unlock; - } + /* Refresh RSSI each time */ + err = hci_read_rssi_sync(hdev, handle); - cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn); - if (!cmd) - goto unlock; + /* For LE links TX power does not change thus we don't need to + * query for it once value is known. + */ + if (!err && (!bdaddr_type_is_le(cp->addr.type) || + conn->tx_power == HCI_TX_POWER_INVALID)) + err = hci_read_tx_power_sync(hdev, handle, 0x00); - cmd->cmd_complete(cmd, status); - mgmt_pending_remove(cmd); + /* Max TX power needs to be read only once per connection */ + if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID) + err = hci_read_tx_power_sync(hdev, handle, 0x01); -unlock: - hci_dev_unlock(hdev); + return err; } static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data, @@ -6631,12 +6708,6 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data, goto unlock; } - if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) { - err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, - MGMT_STATUS_BUSY, &rp, sizeof(rp)); - goto unlock; - } - /* To avoid client trying to guess when to poll again for information we * calculate conn info age as random value between min/max set in hdev. */ @@ -6650,49 +6721,28 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data, if (time_after(jiffies, conn->conn_info_timestamp + msecs_to_jiffies(conn_info_age)) || !conn->conn_info_timestamp) { - struct hci_request req; - struct hci_cp_read_tx_power req_txp_cp; - struct hci_cp_read_rssi req_rssi_cp; struct mgmt_pending_cmd *cmd; - hci_req_init(&req, hdev); - req_rssi_cp.handle = cpu_to_le16(conn->handle); - hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp), - &req_rssi_cp); - - /* For LE links TX power does not change thus we don't need to - * query for it once value is known. - */ - if (!bdaddr_type_is_le(cp->addr.type) || - conn->tx_power == HCI_TX_POWER_INVALID) { - req_txp_cp.handle = cpu_to_le16(conn->handle); - req_txp_cp.type = 0x00; - hci_req_add(&req, HCI_OP_READ_TX_POWER, - sizeof(req_txp_cp), &req_txp_cp); - } + cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data, + len); + if (!cmd) + err = -ENOMEM; + else + err = hci_cmd_sync_queue(hdev, get_conn_info_sync, + cmd, get_conn_info_complete); - /* Max TX power needs to be read only once per connection */ - if (conn->max_tx_power == HCI_TX_POWER_INVALID) { - req_txp_cp.handle = cpu_to_le16(conn->handle); - req_txp_cp.type = 0x01; - hci_req_add(&req, HCI_OP_READ_TX_POWER, - sizeof(req_txp_cp), &req_txp_cp); - } + if (err < 0) { + mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, + MGMT_STATUS_FAILED, &rp, sizeof(rp)); - err = hci_req_run(&req, conn_info_refresh_complete); - if (err < 0) - goto unlock; + if (cmd) + mgmt_pending_free(cmd); - cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev, - data, len); - if (!cmd) { - err = -ENOMEM; goto unlock; } hci_conn_hold(conn); cmd->user_data = hci_conn_get(conn); - cmd->cmd_complete = conn_info_cmd_complete; conn->conn_info_timestamp = jiffies; } else { @@ -6710,82 +6760,76 @@ unlock: return err; } -static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status) +static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err) { - struct hci_conn *conn = cmd->user_data; + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_get_clock_info *cp = cmd->param; struct mgmt_rp_get_clock_info rp; - struct hci_dev *hdev; - int err; + struct hci_conn *conn = cmd->user_data; + u8 status = mgmt_status(err); + + bt_dev_dbg(hdev, "err %d", err); memset(&rp, 0, sizeof(rp)); - memcpy(&rp.addr, cmd->param, sizeof(rp.addr)); + bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); + rp.addr.type = cp->addr.type; - if (status) + if (err) goto complete; - hdev = hci_dev_get(cmd->index); - if (hdev) { - rp.local_clock = cpu_to_le32(hdev->clock); - hci_dev_put(hdev); - } + rp.local_clock = cpu_to_le32(hdev->clock); if (conn) { rp.piconet_clock = cpu_to_le32(conn->clock); rp.accuracy = cpu_to_le16(conn->clock_accuracy); - } - -complete: - err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp, - sizeof(rp)); - - if (conn) { hci_conn_drop(conn); hci_conn_put(conn); } - return err; +complete: + mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp, + sizeof(rp)); + + mgmt_pending_free(cmd); } -static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode) +static int get_clock_info_sync(struct hci_dev *hdev, void *data) { - struct hci_cp_read_clock *hci_cp; - struct mgmt_pending_cmd *cmd; - struct hci_conn *conn; - - bt_dev_dbg(hdev, "status %u", status); + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_get_clock_info *cp = cmd->param; + struct hci_cp_read_clock hci_cp; + struct hci_conn *conn = cmd->user_data; + int err; - hci_dev_lock(hdev); + memset(&hci_cp, 0, sizeof(hci_cp)); + err = hci_read_clock_sync(hdev, &hci_cp); - hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK); - if (!hci_cp) - goto unlock; + if (conn) { + /* Make sure connection still exists */ + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, + &cp->addr.bdaddr); - if (hci_cp->which) { - u16 handle = __le16_to_cpu(hci_cp->handle); - conn = hci_conn_hash_lookup_handle(hdev, handle); - } else { - conn = NULL; + if (conn && conn == cmd->user_data && + conn->state == BT_CONNECTED) { + hci_cp.handle = cpu_to_le16(conn->handle); + hci_cp.which = 0x01; /* Piconet clock */ + err = hci_read_clock_sync(hdev, &hci_cp); + } else if (cmd->user_data) { + hci_conn_drop(cmd->user_data); + hci_conn_put(cmd->user_data); + cmd->user_data = NULL; + } } - cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn); - if (!cmd) - goto unlock; - - cmd->cmd_complete(cmd, mgmt_status(status)); - mgmt_pending_remove(cmd); - -unlock: - hci_dev_unlock(hdev); + return err; } static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data, - u16 len) + u16 len) { struct mgmt_cp_get_clock_info *cp = data; struct mgmt_rp_get_clock_info rp; - struct hci_cp_read_clock hci_cp; struct mgmt_pending_cmd *cmd; - struct hci_request req; struct hci_conn *conn; int err; @@ -6823,31 +6867,25 @@ static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data, conn = NULL; } - cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len); - if (!cmd) { + cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len); + if (!cmd) err = -ENOMEM; - goto unlock; - } - - cmd->cmd_complete = clock_info_cmd_complete; + else + err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd, + get_clock_info_complete); - hci_req_init(&req, hdev); + if (err < 0) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO, + MGMT_STATUS_FAILED, &rp, sizeof(rp)); - memset(&hci_cp, 0, sizeof(hci_cp)); - hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp); + if (cmd) + mgmt_pending_free(cmd); - if (conn) { + } else if (conn) { hci_conn_hold(conn); cmd->user_data = hci_conn_get(conn); - - hci_cp.handle = cpu_to_le16(conn->handle); - hci_cp.which = 0x01; /* Piconet clock */ - hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp); } - err = hci_req_run(&req, get_clock_info_complete); - if (err < 0) - mgmt_pending_remove(cmd); unlock: hci_dev_unlock(hdev); @@ -6928,6 +6966,11 @@ static void device_added(struct sock *sk, struct hci_dev *hdev, mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk); } +static int add_device_sync(struct hci_dev *hdev, void *data) +{ + return hci_update_passive_scan_sync(hdev); +} + static int add_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { @@ -7010,7 +7053,9 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, current_flags = params->current_flags; } - hci_update_background_scan(hdev); + err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL); + if (err < 0) + goto unlock; added: device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action); @@ -7037,6 +7082,11 @@ static void device_removed(struct sock *sk, struct hci_dev *hdev, mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk); } +static int remove_device_sync(struct hci_dev *hdev, void *data) +{ + return hci_update_passive_scan_sync(hdev); +} + static int remove_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { @@ -7116,7 +7166,6 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev, list_del(¶ms->action); list_del(¶ms->list); kfree(params); - hci_update_background_scan(hdev); device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type); } else { @@ -7153,10 +7202,10 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev, } bt_dev_dbg(hdev, "All LE connection parameters were removed"); - - hci_update_background_scan(hdev); } + hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL); + complete: err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE, MGMT_STATUS_SUCCESS, &cp->addr, @@ -7359,21 +7408,27 @@ unlock: return err; } -static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status, - u16 opcode, struct sk_buff *skb) +static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data, + int err) { const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp; struct mgmt_rp_read_local_oob_ext_data *mgmt_rp; u8 *h192, *r192, *h256, *r256; - struct mgmt_pending_cmd *cmd; + struct mgmt_pending_cmd *cmd = data; + struct sk_buff *skb = cmd->skb; + u8 status = mgmt_status(err); u16 eir_len; - int err; - bt_dev_dbg(hdev, "status %u", status); + if (!status) { + if (!skb) + status = MGMT_STATUS_FAILED; + else if (IS_ERR(skb)) + status = mgmt_status(PTR_ERR(skb)); + else + status = mgmt_status(skb->data[0]); + } - cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev); - if (!cmd) - return; + bt_dev_dbg(hdev, "status %u", status); mgmt_cp = cmd->param; @@ -7385,7 +7440,7 @@ static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status, r192 = NULL; h256 = NULL; r256 = NULL; - } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) { + } else if (!bredr_sc_enabled(hdev)) { struct hci_rp_read_local_oob_data *rp; if (skb->len != sizeof(*rp)) { @@ -7466,6 +7521,9 @@ send_rsp: mgmt_rp, sizeof(*mgmt_rp) + eir_len, HCI_MGMT_OOB_DATA_EVENTS, cmd->sk); done: + if (skb && !IS_ERR(skb)) + kfree_skb(skb); + kfree(mgmt_rp); mgmt_pending_remove(cmd); } @@ -7474,7 +7532,6 @@ static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk, struct mgmt_cp_read_local_oob_ext_data *cp) { struct mgmt_pending_cmd *cmd; - struct hci_request req; int err; cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev, @@ -7482,14 +7539,9 @@ static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk, if (!cmd) return -ENOMEM; - hci_req_init(&req, hdev); - - if (bredr_sc_enabled(hdev)) - hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL); - else - hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL); + err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd, + read_local_oob_ext_data_complete); - err = hci_req_run_skb(&req, read_local_oob_ext_data_complete); if (err < 0) { mgmt_pending_remove(cmd); return err; @@ -7713,13 +7765,6 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev, return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES, MGMT_STATUS_REJECTED); - /* Enabling the experimental LL Privay support disables support for - * advertising. - */ - if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) - return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES, - MGMT_STATUS_NOT_SUPPORTED); - hci_dev_lock(hdev); rp_len = sizeof(*rp) + hdev->adv_instance_cnt; @@ -7876,58 +7921,66 @@ static bool adv_busy(struct hci_dev *hdev) pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev)); } -static void add_advertising_complete(struct hci_dev *hdev, u8 status, - u16 opcode) +static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance, + int err) { - struct mgmt_pending_cmd *cmd; - struct mgmt_cp_add_advertising *cp; - struct mgmt_rp_add_advertising rp; - struct adv_info *adv_instance, *n; - u8 instance; + struct adv_info *adv, *n; - bt_dev_dbg(hdev, "status %u", status); + bt_dev_dbg(hdev, "err %d", err); hci_dev_lock(hdev); - cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev); - if (!cmd) - cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev); + list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { + u8 instance; - list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { - if (!adv_instance->pending) + if (!adv->pending) continue; - if (!status) { - adv_instance->pending = false; + if (!err) { + adv->pending = false; continue; } - instance = adv_instance->instance; + instance = adv->instance; if (hdev->cur_adv_instance == instance) cancel_adv_timeout(hdev); hci_remove_adv_instance(hdev, instance); - mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance); + mgmt_advertising_removed(sk, hdev, instance); } - if (!cmd) - goto unlock; + hci_dev_unlock(hdev); +} + +static void add_advertising_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_add_advertising *cp = cmd->param; + struct mgmt_rp_add_advertising rp; + + memset(&rp, 0, sizeof(rp)); - cp = cmd->param; rp.instance = cp->instance; - if (status) + if (err) mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, - mgmt_status(status)); + mgmt_status(err)); else mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, - mgmt_status(status), &rp, sizeof(rp)); + mgmt_status(err), &rp, sizeof(rp)); - mgmt_pending_remove(cmd); + add_adv_complete(hdev, cmd->sk, cp->instance, err); -unlock: - hci_dev_unlock(hdev); + mgmt_pending_free(cmd); +} + +static int add_advertising_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_add_advertising *cp = cmd->param; + + return hci_schedule_adv_instance_sync(hdev, cp->instance, true); } static int add_advertising(struct sock *sk, struct hci_dev *hdev, @@ -7943,7 +7996,6 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev, struct adv_info *next_instance; int err; struct mgmt_pending_cmd *cmd; - struct hci_request req; bt_dev_dbg(hdev, "sock %p", sk); @@ -7952,13 +8004,6 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev, return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, status); - /* Enabling the experimental LL Privay support disables support for - * advertising. - */ - if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) - return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, - MGMT_STATUS_NOT_SUPPORTED); - if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_INVALID_PARAMS); @@ -8051,25 +8096,19 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev, /* We're good to go, update advertising data, parameters, and start * advertising. */ - cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data, + cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data, data_len); if (!cmd) { err = -ENOMEM; goto unlock; } - hci_req_init(&req, hdev); - - err = __hci_req_schedule_adv_instance(&req, schedule_instance, true); - - if (!err) - err = hci_req_run(&req, add_advertising_complete); + cp->instance = schedule_instance; - if (err < 0) { - err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, - MGMT_STATUS_FAILED); - mgmt_pending_remove(cmd); - } + err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd, + add_advertising_complete); + if (err < 0) + mgmt_pending_free(cmd); unlock: hci_dev_unlock(hdev); @@ -8077,30 +8116,25 @@ unlock: return err; } -static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status, - u16 opcode) +static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data, + int err) { - struct mgmt_pending_cmd *cmd; - struct mgmt_cp_add_ext_adv_params *cp; + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_add_ext_adv_params *cp = cmd->param; struct mgmt_rp_add_ext_adv_params rp; - struct adv_info *adv_instance; + struct adv_info *adv; u32 flags; BT_DBG("%s", hdev->name); hci_dev_lock(hdev); - cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev); - if (!cmd) - goto unlock; - - cp = cmd->param; - adv_instance = hci_find_adv_instance(hdev, cp->instance); - if (!adv_instance) + adv = hci_find_adv_instance(hdev, cp->instance); + if (!adv) goto unlock; rp.instance = cp->instance; - rp.tx_power = adv_instance->tx_power; + rp.tx_power = adv->tx_power; /* While we're at it, inform userspace of the available space for this * advertisement, given the flags that will be used. @@ -8109,39 +8143,44 @@ static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status, rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true); rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false); - if (status) { + if (err) { /* If this advertisement was previously advertising and we * failed to update it, we signal that it has been removed and * delete its structure */ - if (!adv_instance->pending) + if (!adv->pending) mgmt_advertising_removed(cmd->sk, hdev, cp->instance); hci_remove_adv_instance(hdev, cp->instance); mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, - mgmt_status(status)); - + mgmt_status(err)); } else { mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, - mgmt_status(status), &rp, sizeof(rp)); + mgmt_status(err), &rp, sizeof(rp)); } unlock: if (cmd) - mgmt_pending_remove(cmd); + mgmt_pending_free(cmd); hci_dev_unlock(hdev); } +static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_add_ext_adv_params *cp = cmd->param; + + return hci_setup_ext_adv_instance_sync(hdev, cp->instance); +} + static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_cp_add_ext_adv_params *cp = data; struct mgmt_rp_add_ext_adv_params rp; struct mgmt_pending_cmd *cmd = NULL; - struct adv_info *adv_instance; - struct hci_request req; u32 flags, min_interval, max_interval; u16 timeout, duration; u8 status; @@ -8223,29 +8262,18 @@ static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev, /* Submit request for advertising params if ext adv available */ if (ext_adv_capable(hdev)) { - hci_req_init(&req, hdev); - adv_instance = hci_find_adv_instance(hdev, cp->instance); - - /* Updating parameters of an active instance will return a - * Command Disallowed error, so we must first disable the - * instance if it is active. - */ - if (!adv_instance->pending) - __hci_req_disable_ext_adv_instance(&req, cp->instance); - - __hci_req_setup_ext_adv_instance(&req, cp->instance); - - err = hci_req_run(&req, add_ext_adv_params_complete); - - if (!err) - cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, - hdev, data, data_len); + cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev, + data, data_len); if (!cmd) { err = -ENOMEM; hci_remove_adv_instance(hdev, cp->instance); goto unlock; } + err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd, + add_ext_adv_params_complete); + if (err < 0) + mgmt_pending_free(cmd); } else { rp.instance = cp->instance; rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE; @@ -8262,6 +8290,49 @@ unlock: return err; } +static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_add_ext_adv_data *cp = cmd->param; + struct mgmt_rp_add_advertising rp; + + add_adv_complete(hdev, cmd->sk, cp->instance, err); + + memset(&rp, 0, sizeof(rp)); + + rp.instance = cp->instance; + + if (err) + mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, + mgmt_status(err)); + else + mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_status(err), &rp, sizeof(rp)); + + mgmt_pending_free(cmd); +} + +static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_add_ext_adv_data *cp = cmd->param; + int err; + + if (ext_adv_capable(hdev)) { + err = hci_update_adv_data_sync(hdev, cp->instance); + if (err) + return err; + + err = hci_update_scan_rsp_data_sync(hdev, cp->instance); + if (err) + return err; + + return hci_enable_ext_advertising_sync(hdev, cp->instance); + } + + return hci_schedule_adv_instance_sync(hdev, cp->instance, true); +} + static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { @@ -8272,7 +8343,6 @@ static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data, struct adv_info *adv_instance; int err = 0; struct mgmt_pending_cmd *cmd; - struct hci_request req; BT_DBG("%s", hdev->name); @@ -8314,78 +8384,52 @@ static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data, cp->data, cp->scan_rsp_len, cp->data + cp->adv_data_len); - /* We're good to go, update advertising data, parameters, and start - * advertising. - */ - - hci_req_init(&req, hdev); - - hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL); - - if (ext_adv_capable(hdev)) { - __hci_req_update_adv_data(&req, cp->instance); - __hci_req_update_scan_rsp_data(&req, cp->instance); - __hci_req_enable_ext_advertising(&req, cp->instance); - - } else { - /* If using software rotation, determine next instance to use */ - - if (hdev->cur_adv_instance == cp->instance) { - /* If the currently advertised instance is being changed - * then cancel the current advertising and schedule the - * next instance. If there is only one instance then the - * overridden advertising data will be visible right - * away - */ - cancel_adv_timeout(hdev); - - next_instance = hci_get_next_instance(hdev, - cp->instance); - if (next_instance) - schedule_instance = next_instance->instance; - } else if (!hdev->adv_instance_timeout) { - /* Immediately advertise the new instance if no other - * instance is currently being advertised. - */ - schedule_instance = cp->instance; - } + /* If using software rotation, determine next instance to use */ + if (hdev->cur_adv_instance == cp->instance) { + /* If the currently advertised instance is being changed + * then cancel the current advertising and schedule the + * next instance. If there is only one instance then the + * overridden advertising data will be visible right + * away + */ + cancel_adv_timeout(hdev); - /* If the HCI_ADVERTISING flag is set or there is no instance to - * be advertised then we have no HCI communication to make. - * Simply return. + next_instance = hci_get_next_instance(hdev, cp->instance); + if (next_instance) + schedule_instance = next_instance->instance; + } else if (!hdev->adv_instance_timeout) { + /* Immediately advertise the new instance if no other + * instance is currently being advertised. */ - if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || - !schedule_instance) { - if (adv_instance->pending) { - mgmt_advertising_added(sk, hdev, cp->instance); - adv_instance->pending = false; - } - rp.instance = cp->instance; - err = mgmt_cmd_complete(sk, hdev->id, - MGMT_OP_ADD_EXT_ADV_DATA, - MGMT_STATUS_SUCCESS, &rp, - sizeof(rp)); - goto unlock; - } + schedule_instance = cp->instance; + } - err = __hci_req_schedule_adv_instance(&req, schedule_instance, - true); + /* If the HCI_ADVERTISING flag is set or there is no instance to + * be advertised then we have no HCI communication to make. + * Simply return. + */ + if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) { + if (adv_instance->pending) { + mgmt_advertising_added(sk, hdev, cp->instance); + adv_instance->pending = false; + } + rp.instance = cp->instance; + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA, + MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); + goto unlock; } - cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data, + cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data, data_len); if (!cmd) { err = -ENOMEM; goto clear_new_instance; } - if (!err) - err = hci_req_run(&req, add_advertising_complete); - + err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd, + add_ext_adv_data_complete); if (err < 0) { - err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA, - MGMT_STATUS_FAILED); - mgmt_pending_remove(cmd); + mgmt_pending_free(cmd); goto clear_new_instance; } @@ -8408,54 +8452,53 @@ unlock: return err; } -static void remove_advertising_complete(struct hci_dev *hdev, u8 status, - u16 opcode) +static void remove_advertising_complete(struct hci_dev *hdev, void *data, + int err) { - struct mgmt_pending_cmd *cmd; - struct mgmt_cp_remove_advertising *cp; + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_remove_advertising *cp = cmd->param; struct mgmt_rp_remove_advertising rp; - bt_dev_dbg(hdev, "status %u", status); + bt_dev_dbg(hdev, "err %d", err); - hci_dev_lock(hdev); + memset(&rp, 0, sizeof(rp)); + rp.instance = cp->instance; - /* A failure status here only means that we failed to disable - * advertising. Otherwise, the advertising instance has been removed, - * so report success. - */ - cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev); - if (!cmd) - goto unlock; + if (err) + mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, + mgmt_status(err)); + else + mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); - cp = cmd->param; - rp.instance = cp->instance; + mgmt_pending_free(cmd); +} - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS, - &rp, sizeof(rp)); - mgmt_pending_remove(cmd); +static int remove_advertising_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_remove_advertising *cp = cmd->param; + int err; -unlock: - hci_dev_unlock(hdev); + err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true); + if (err) + return err; + + if (list_empty(&hdev->adv_instances)) + err = hci_disable_advertising_sync(hdev); + + return err; } static int remove_advertising(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_cp_remove_advertising *cp = data; - struct mgmt_rp_remove_advertising rp; struct mgmt_pending_cmd *cmd; - struct hci_request req; int err; bt_dev_dbg(hdev, "sock %p", sk); - /* Enabling the experimental LL Privay support disables support for - * advertising. - */ - if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) - return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING, - MGMT_STATUS_NOT_SUPPORTED); - hci_dev_lock(hdev); if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) { @@ -8479,44 +8522,17 @@ static int remove_advertising(struct sock *sk, struct hci_dev *hdev, goto unlock; } - hci_req_init(&req, hdev); - - /* If we use extended advertising, instance is disabled and removed */ - if (ext_adv_capable(hdev)) { - __hci_req_disable_ext_adv_instance(&req, cp->instance); - __hci_req_remove_ext_adv_instance(&req, cp->instance); - } - - hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true); - - if (list_empty(&hdev->adv_instances)) - __hci_req_disable_advertising(&req); - - /* If no HCI commands have been collected so far or the HCI_ADVERTISING - * flag is set or the device isn't powered then we have no HCI - * communication to make. Simply return. - */ - if (skb_queue_empty(&req.cmd_q) || - !hdev_is_powered(hdev) || - hci_dev_test_flag(hdev, HCI_ADVERTISING)) { - hci_req_purge(&req); - rp.instance = cp->instance; - err = mgmt_cmd_complete(sk, hdev->id, - MGMT_OP_REMOVE_ADVERTISING, - MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); - goto unlock; - } - - cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data, + cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data, data_len); if (!cmd) { err = -ENOMEM; goto unlock; } - err = hci_req_run(&req, remove_advertising_complete); + err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd, + remove_advertising_complete); if (err < 0) - mgmt_pending_remove(cmd); + mgmt_pending_free(cmd); unlock: hci_dev_unlock(hdev); @@ -8758,31 +8774,6 @@ void mgmt_index_removed(struct hci_dev *hdev) HCI_MGMT_EXT_INDEX_EVENTS); } -/* This function requires the caller holds hdev->lock */ -static void restart_le_actions(struct hci_dev *hdev) -{ - struct hci_conn_params *p; - - list_for_each_entry(p, &hdev->le_conn_params, list) { - /* Needed for AUTO_OFF case where might not "really" - * have been powered off. - */ - list_del_init(&p->action); - - switch (p->auto_connect) { - case HCI_AUTO_CONN_DIRECT: - case HCI_AUTO_CONN_ALWAYS: - list_add(&p->action, &hdev->pend_le_conns); - break; - case HCI_AUTO_CONN_REPORT: - list_add(&p->action, &hdev->pend_le_reports); - break; - default: - break; - } - } -} - void mgmt_power_on(struct hci_dev *hdev, int err) { struct cmd_lookup match = { NULL, hdev }; @@ -8793,7 +8784,7 @@ void mgmt_power_on(struct hci_dev *hdev, int err) if (!err) { restart_le_actions(hdev); - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); } mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); @@ -9349,74 +9340,6 @@ void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status) sock_put(match.sk); } -static void clear_eir(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - struct hci_cp_write_eir cp; - - if (!lmp_ext_inq_capable(hdev)) - return; - - memset(hdev->eir, 0, sizeof(hdev->eir)); - - memset(&cp, 0, sizeof(cp)); - - hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); -} - -void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status) -{ - struct cmd_lookup match = { NULL, hdev }; - struct hci_request req; - bool changed = false; - - if (status) { - u8 mgmt_err = mgmt_status(status); - - if (enable && hci_dev_test_and_clear_flag(hdev, - HCI_SSP_ENABLED)) { - hci_dev_clear_flag(hdev, HCI_HS_ENABLED); - new_settings(hdev, NULL); - } - - mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp, - &mgmt_err); - return; - } - - if (enable) { - changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED); - } else { - changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED); - if (!changed) - changed = hci_dev_test_and_clear_flag(hdev, - HCI_HS_ENABLED); - else - hci_dev_clear_flag(hdev, HCI_HS_ENABLED); - } - - mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match); - - if (changed) - new_settings(hdev, match.sk); - - if (match.sk) - sock_put(match.sk); - - hci_req_init(&req, hdev); - - if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { - if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) - hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE, - sizeof(enable), &enable); - __hci_req_update_eir(&req); - } else { - clear_eir(&req); - } - - hci_req_run(&req, NULL); -} - static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data) { struct cmd_lookup *match = data; diff --git a/net/bluetooth/mgmt_util.c b/net/bluetooth/mgmt_util.c index 0d0a6d77b9e8..83875f2a0604 100644 --- a/net/bluetooth/mgmt_util.c +++ b/net/bluetooth/mgmt_util.c @@ -227,7 +227,7 @@ void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, } } -struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, +struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode, struct hci_dev *hdev, void *data, u16 len) { @@ -251,6 +251,19 @@ struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, cmd->sk = sk; sock_hold(sk); + return cmd; +} + +struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, + struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_pending_cmd *cmd; + + cmd = mgmt_pending_new(sk, opcode, hdev, data, len); + if (!cmd) + return NULL; + list_add(&cmd->list, &hdev->mgmt_pending); return cmd; diff --git a/net/bluetooth/mgmt_util.h b/net/bluetooth/mgmt_util.h index 6559f189213c..63b965eaaaac 100644 --- a/net/bluetooth/mgmt_util.h +++ b/net/bluetooth/mgmt_util.h @@ -27,6 +27,7 @@ struct mgmt_pending_cmd { void *param; size_t param_len; struct sock *sk; + struct sk_buff *skb; void *user_data; int (*cmd_complete)(struct mgmt_pending_cmd *cmd, u8 status); }; @@ -49,5 +50,8 @@ void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, struct hci_dev *hdev, void *data, u16 len); +struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode, + struct hci_dev *hdev, + void *data, u16 len); void mgmt_pending_free(struct mgmt_pending_cmd *cmd); void mgmt_pending_remove(struct mgmt_pending_cmd *cmd); diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c index 255cffa554ee..1122097e1e49 100644 --- a/net/bluetooth/msft.c +++ b/net/bluetooth/msft.c @@ -93,7 +93,7 @@ struct msft_data { struct list_head handle_map; __u16 pending_add_handle; __u16 pending_remove_handle; - __u8 reregistering; + __u8 resuming; __u8 suspending; __u8 filter_enabled; }; @@ -156,7 +156,6 @@ failed: return false; } -/* This function requires the caller holds hdev->lock */ static void reregister_monitor(struct hci_dev *hdev, int handle) { struct adv_monitor *monitor; @@ -166,9 +165,9 @@ static void reregister_monitor(struct hci_dev *hdev, int handle) while (1) { monitor = idr_get_next(&hdev->adv_monitors_idr, &handle); if (!monitor) { - /* All monitors have been reregistered */ - msft->reregistering = false; - hci_update_background_scan(hdev); + /* All monitors have been resumed */ + msft->resuming = false; + hci_update_passive_scan(hdev); return; } @@ -185,67 +184,317 @@ static void reregister_monitor(struct hci_dev *hdev, int handle) } } -/* This function requires the caller holds hdev->lock */ -static void remove_monitor_on_suspend(struct hci_dev *hdev, int handle) +/* is_mgmt = true matches the handle exposed to userspace via mgmt. + * is_mgmt = false matches the handle used by the msft controller. + * This function requires the caller holds hdev->lock + */ +static struct msft_monitor_advertisement_handle_data *msft_find_handle_data + (struct hci_dev *hdev, u16 handle, bool is_mgmt) +{ + struct msft_monitor_advertisement_handle_data *entry; + struct msft_data *msft = hdev->msft_data; + + list_for_each_entry(entry, &msft->handle_map, list) { + if (is_mgmt && entry->mgmt_handle == handle) + return entry; + if (!is_mgmt && entry->msft_handle == handle) + return entry; + } + + return NULL; +} + +static void msft_le_monitor_advertisement_cb(struct hci_dev *hdev, + u8 status, u16 opcode, + struct sk_buff *skb) +{ + struct msft_rp_le_monitor_advertisement *rp; + struct adv_monitor *monitor; + struct msft_monitor_advertisement_handle_data *handle_data; + struct msft_data *msft = hdev->msft_data; + + hci_dev_lock(hdev); + + monitor = idr_find(&hdev->adv_monitors_idr, msft->pending_add_handle); + if (!monitor) { + bt_dev_err(hdev, "msft add advmon: monitor %u is not found!", + msft->pending_add_handle); + status = HCI_ERROR_UNSPECIFIED; + goto unlock; + } + + if (status) + goto unlock; + + rp = (struct msft_rp_le_monitor_advertisement *)skb->data; + if (skb->len < sizeof(*rp)) { + status = HCI_ERROR_UNSPECIFIED; + goto unlock; + } + + handle_data = kmalloc(sizeof(*handle_data), GFP_KERNEL); + if (!handle_data) { + status = HCI_ERROR_UNSPECIFIED; + goto unlock; + } + + handle_data->mgmt_handle = monitor->handle; + handle_data->msft_handle = rp->handle; + INIT_LIST_HEAD(&handle_data->list); + list_add(&handle_data->list, &msft->handle_map); + + monitor->state = ADV_MONITOR_STATE_OFFLOADED; + +unlock: + if (status && monitor) + hci_free_adv_monitor(hdev, monitor); + + hci_dev_unlock(hdev); + + if (!msft->resuming) + hci_add_adv_patterns_monitor_complete(hdev, status); +} + +static void msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev, + u8 status, u16 opcode, + struct sk_buff *skb) { + struct msft_cp_le_cancel_monitor_advertisement *cp; + struct msft_rp_le_cancel_monitor_advertisement *rp; struct adv_monitor *monitor; + struct msft_monitor_advertisement_handle_data *handle_data; struct msft_data *msft = hdev->msft_data; int err; + bool pending; - while (1) { - monitor = idr_get_next(&hdev->adv_monitors_idr, &handle); - if (!monitor) { - /* All monitors have been removed */ - msft->suspending = false; - hci_update_background_scan(hdev); + if (status) + goto done; + + rp = (struct msft_rp_le_cancel_monitor_advertisement *)skb->data; + if (skb->len < sizeof(*rp)) { + status = HCI_ERROR_UNSPECIFIED; + goto done; + } + + hci_dev_lock(hdev); + + cp = hci_sent_cmd_data(hdev, hdev->msft_opcode); + handle_data = msft_find_handle_data(hdev, cp->handle, false); + + if (handle_data) { + monitor = idr_find(&hdev->adv_monitors_idr, + handle_data->mgmt_handle); + + if (monitor && monitor->state == ADV_MONITOR_STATE_OFFLOADED) + monitor->state = ADV_MONITOR_STATE_REGISTERED; + + /* Do not free the monitor if it is being removed due to + * suspend. It will be re-monitored on resume. + */ + if (monitor && !msft->suspending) + hci_free_adv_monitor(hdev, monitor); + + list_del(&handle_data->list); + kfree(handle_data); + } + + /* If remove all monitors is required, we need to continue the process + * here because the earlier it was paused when waiting for the + * response from controller. + */ + if (msft->pending_remove_handle == 0) { + pending = hci_remove_all_adv_monitor(hdev, &err); + if (pending) { + hci_dev_unlock(hdev); return; } - msft->pending_remove_handle = (u16)handle; - err = __msft_remove_monitor(hdev, monitor, handle); + if (err) + status = HCI_ERROR_UNSPECIFIED; + } - /* If success, return and wait for monitor removed callback */ - if (!err) - return; + hci_dev_unlock(hdev); + +done: + if (!msft->suspending) + hci_remove_adv_monitor_complete(hdev, status); +} + +static int msft_remove_monitor_sync(struct hci_dev *hdev, + struct adv_monitor *monitor) +{ + struct msft_cp_le_cancel_monitor_advertisement cp; + struct msft_monitor_advertisement_handle_data *handle_data; + struct sk_buff *skb; + u8 status; + + handle_data = msft_find_handle_data(hdev, monitor->handle, true); + + /* If no matched handle, just remove without telling controller */ + if (!handle_data) + return -ENOENT; + + cp.sub_opcode = MSFT_OP_LE_CANCEL_MONITOR_ADVERTISEMENT; + cp.handle = handle_data->msft_handle; + + skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp, + HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + status = skb->data[0]; + skb_pull(skb, 1); + + msft_le_cancel_monitor_advertisement_cb(hdev, status, hdev->msft_opcode, + skb); + + return status; +} + +/* This function requires the caller holds hci_req_sync_lock */ +int msft_suspend_sync(struct hci_dev *hdev) +{ + struct msft_data *msft = hdev->msft_data; + struct adv_monitor *monitor; + int handle = 0; + + if (!msft || !msft_monitor_supported(hdev)) + return 0; + + msft->suspending = true; + + while (1) { + monitor = idr_get_next(&hdev->adv_monitors_idr, &handle); + if (!monitor) + break; + + msft_remove_monitor_sync(hdev, monitor); - /* Otherwise free the monitor and keep removing */ - hci_free_adv_monitor(hdev, monitor); handle++; } + + /* All monitors have been removed */ + msft->suspending = false; + + return 0; } -/* This function requires the caller holds hdev->lock */ -void msft_suspend(struct hci_dev *hdev) +static bool msft_monitor_rssi_valid(struct adv_monitor *monitor) { - struct msft_data *msft = hdev->msft_data; + struct adv_rssi_thresholds *r = &monitor->rssi; - if (!msft) - return; + if (r->high_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN || + r->high_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX || + r->low_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN || + r->low_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX) + return false; - if (msft_monitor_supported(hdev)) { - msft->suspending = true; - /* Quitely remove all monitors on suspend to avoid waking up - * the system. - */ - remove_monitor_on_suspend(hdev, 0); + /* High_threshold_timeout is not supported, + * once high_threshold is reached, events are immediately reported. + */ + if (r->high_threshold_timeout != 0) + return false; + + if (r->low_threshold_timeout > MSFT_RSSI_LOW_TIMEOUT_MAX) + return false; + + /* Sampling period from 0x00 to 0xFF are all allowed */ + return true; +} + +static bool msft_monitor_pattern_valid(struct adv_monitor *monitor) +{ + return msft_monitor_rssi_valid(monitor); + /* No additional check needed for pattern-based monitor */ +} + +static int msft_add_monitor_sync(struct hci_dev *hdev, + struct adv_monitor *monitor) +{ + struct msft_cp_le_monitor_advertisement *cp; + struct msft_le_monitor_advertisement_pattern_data *pattern_data; + struct msft_le_monitor_advertisement_pattern *pattern; + struct adv_pattern *entry; + size_t total_size = sizeof(*cp) + sizeof(*pattern_data); + ptrdiff_t offset = 0; + u8 pattern_count = 0; + struct sk_buff *skb; + u8 status; + + if (!msft_monitor_pattern_valid(monitor)) + return -EINVAL; + + list_for_each_entry(entry, &monitor->patterns, list) { + pattern_count++; + total_size += sizeof(*pattern) + entry->length; } + + cp = kmalloc(total_size, GFP_KERNEL); + if (!cp) + return -ENOMEM; + + cp->sub_opcode = MSFT_OP_LE_MONITOR_ADVERTISEMENT; + cp->rssi_high = monitor->rssi.high_threshold; + cp->rssi_low = monitor->rssi.low_threshold; + cp->rssi_low_interval = (u8)monitor->rssi.low_threshold_timeout; + cp->rssi_sampling_period = monitor->rssi.sampling_period; + + cp->cond_type = MSFT_MONITOR_ADVERTISEMENT_TYPE_PATTERN; + + pattern_data = (void *)cp->data; + pattern_data->count = pattern_count; + + list_for_each_entry(entry, &monitor->patterns, list) { + pattern = (void *)(pattern_data->data + offset); + /* the length also includes data_type and offset */ + pattern->length = entry->length + 2; + pattern->data_type = entry->ad_type; + pattern->start_byte = entry->offset; + memcpy(pattern->pattern, entry->value, entry->length); + offset += sizeof(*pattern) + entry->length; + } + + skb = __hci_cmd_sync(hdev, hdev->msft_opcode, total_size, cp, + HCI_CMD_TIMEOUT); + kfree(cp); + + if (IS_ERR(skb)) + return PTR_ERR(skb); + + status = skb->data[0]; + skb_pull(skb, 1); + + msft_le_monitor_advertisement_cb(hdev, status, hdev->msft_opcode, skb); + + return status; } -/* This function requires the caller holds hdev->lock */ -void msft_resume(struct hci_dev *hdev) +/* This function requires the caller holds hci_req_sync_lock */ +int msft_resume_sync(struct hci_dev *hdev) { struct msft_data *msft = hdev->msft_data; + struct adv_monitor *monitor; + int handle = 0; - if (!msft) - return; + if (!msft || !msft_monitor_supported(hdev)) + return 0; - if (msft_monitor_supported(hdev)) { - msft->reregistering = true; - /* Monitors are removed on suspend, so we need to add all - * monitors on resume. - */ - reregister_monitor(hdev, 0); + msft->resuming = true; + + while (1) { + monitor = idr_get_next(&hdev->adv_monitors_idr, &handle); + if (!monitor) + break; + + msft_add_monitor_sync(hdev, monitor); + + handle++; } + + /* All monitors have been resumed */ + msft->resuming = false; + + return 0; } void msft_do_open(struct hci_dev *hdev) @@ -275,7 +524,7 @@ void msft_do_open(struct hci_dev *hdev) } if (msft_monitor_supported(hdev)) { - msft->reregistering = true; + msft->resuming = true; msft_set_filter_enable(hdev, true); /* Monitors get removed on power off, so we need to explicitly * tell the controller to re-monitor. @@ -381,151 +630,6 @@ __u64 msft_get_features(struct hci_dev *hdev) return msft ? msft->features : 0; } -/* is_mgmt = true matches the handle exposed to userspace via mgmt. - * is_mgmt = false matches the handle used by the msft controller. - * This function requires the caller holds hdev->lock - */ -static struct msft_monitor_advertisement_handle_data *msft_find_handle_data - (struct hci_dev *hdev, u16 handle, bool is_mgmt) -{ - struct msft_monitor_advertisement_handle_data *entry; - struct msft_data *msft = hdev->msft_data; - - list_for_each_entry(entry, &msft->handle_map, list) { - if (is_mgmt && entry->mgmt_handle == handle) - return entry; - if (!is_mgmt && entry->msft_handle == handle) - return entry; - } - - return NULL; -} - -static void msft_le_monitor_advertisement_cb(struct hci_dev *hdev, - u8 status, u16 opcode, - struct sk_buff *skb) -{ - struct msft_rp_le_monitor_advertisement *rp; - struct adv_monitor *monitor; - struct msft_monitor_advertisement_handle_data *handle_data; - struct msft_data *msft = hdev->msft_data; - - hci_dev_lock(hdev); - - monitor = idr_find(&hdev->adv_monitors_idr, msft->pending_add_handle); - if (!monitor) { - bt_dev_err(hdev, "msft add advmon: monitor %u is not found!", - msft->pending_add_handle); - status = HCI_ERROR_UNSPECIFIED; - goto unlock; - } - - if (status) - goto unlock; - - rp = (struct msft_rp_le_monitor_advertisement *)skb->data; - if (skb->len < sizeof(*rp)) { - status = HCI_ERROR_UNSPECIFIED; - goto unlock; - } - - handle_data = kmalloc(sizeof(*handle_data), GFP_KERNEL); - if (!handle_data) { - status = HCI_ERROR_UNSPECIFIED; - goto unlock; - } - - handle_data->mgmt_handle = monitor->handle; - handle_data->msft_handle = rp->handle; - INIT_LIST_HEAD(&handle_data->list); - list_add(&handle_data->list, &msft->handle_map); - - monitor->state = ADV_MONITOR_STATE_OFFLOADED; - -unlock: - if (status && monitor) - hci_free_adv_monitor(hdev, monitor); - - /* If in restart/reregister sequence, keep registering. */ - if (msft->reregistering) - reregister_monitor(hdev, msft->pending_add_handle + 1); - - hci_dev_unlock(hdev); - - if (!msft->reregistering) - hci_add_adv_patterns_monitor_complete(hdev, status); -} - -static void msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev, - u8 status, u16 opcode, - struct sk_buff *skb) -{ - struct msft_cp_le_cancel_monitor_advertisement *cp; - struct msft_rp_le_cancel_monitor_advertisement *rp; - struct adv_monitor *monitor; - struct msft_monitor_advertisement_handle_data *handle_data; - struct msft_data *msft = hdev->msft_data; - int err; - bool pending; - - if (status) - goto done; - - rp = (struct msft_rp_le_cancel_monitor_advertisement *)skb->data; - if (skb->len < sizeof(*rp)) { - status = HCI_ERROR_UNSPECIFIED; - goto done; - } - - hci_dev_lock(hdev); - - cp = hci_sent_cmd_data(hdev, hdev->msft_opcode); - handle_data = msft_find_handle_data(hdev, cp->handle, false); - - if (handle_data) { - monitor = idr_find(&hdev->adv_monitors_idr, - handle_data->mgmt_handle); - - if (monitor && monitor->state == ADV_MONITOR_STATE_OFFLOADED) - monitor->state = ADV_MONITOR_STATE_REGISTERED; - - /* Do not free the monitor if it is being removed due to - * suspend. It will be re-monitored on resume. - */ - if (monitor && !msft->suspending) - hci_free_adv_monitor(hdev, monitor); - - list_del(&handle_data->list); - kfree(handle_data); - } - - /* If in suspend/remove sequence, keep removing. */ - if (msft->suspending) - remove_monitor_on_suspend(hdev, - msft->pending_remove_handle + 1); - - /* If remove all monitors is required, we need to continue the process - * here because the earlier it was paused when waiting for the - * response from controller. - */ - if (msft->pending_remove_handle == 0) { - pending = hci_remove_all_adv_monitor(hdev, &err); - if (pending) { - hci_dev_unlock(hdev); - return; - } - - if (err) - status = HCI_ERROR_UNSPECIFIED; - } - - hci_dev_unlock(hdev); - -done: - if (!msft->suspending) - hci_remove_adv_monitor_complete(hdev, status); -} - static void msft_le_set_advertisement_filter_enable_cb(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb) @@ -560,35 +664,6 @@ static void msft_le_set_advertisement_filter_enable_cb(struct hci_dev *hdev, hci_dev_unlock(hdev); } -static bool msft_monitor_rssi_valid(struct adv_monitor *monitor) -{ - struct adv_rssi_thresholds *r = &monitor->rssi; - - if (r->high_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN || - r->high_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX || - r->low_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN || - r->low_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX) - return false; - - /* High_threshold_timeout is not supported, - * once high_threshold is reached, events are immediately reported. - */ - if (r->high_threshold_timeout != 0) - return false; - - if (r->low_threshold_timeout > MSFT_RSSI_LOW_TIMEOUT_MAX) - return false; - - /* Sampling period from 0x00 to 0xFF are all allowed */ - return true; -} - -static bool msft_monitor_pattern_valid(struct adv_monitor *monitor) -{ - return msft_monitor_rssi_valid(monitor); - /* No additional check needed for pattern-based monitor */ -} - /* This function requires the caller holds hdev->lock */ static int __msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor) @@ -656,7 +731,7 @@ int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor) if (!msft) return -EOPNOTSUPP; - if (msft->reregistering || msft->suspending) + if (msft->resuming || msft->suspending) return -EBUSY; return __msft_add_monitor_pattern(hdev, monitor); @@ -700,7 +775,7 @@ int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, if (!msft) return -EOPNOTSUPP; - if (msft->reregistering || msft->suspending) + if (msft->resuming || msft->suspending) return -EBUSY; return __msft_remove_monitor(hdev, monitor, handle); diff --git a/net/bluetooth/msft.h b/net/bluetooth/msft.h index 59c6e081c789..b59b63dc0ea8 100644 --- a/net/bluetooth/msft.h +++ b/net/bluetooth/msft.h @@ -24,8 +24,8 @@ int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, u16 handle); void msft_req_add_set_filter_enable(struct hci_request *req, bool enable); int msft_set_filter_enable(struct hci_dev *hdev, bool enable); -void msft_suspend(struct hci_dev *hdev); -void msft_resume(struct hci_dev *hdev); +int msft_suspend_sync(struct hci_dev *hdev); +int msft_resume_sync(struct hci_dev *hdev); bool msft_curve_validity(struct hci_dev *hdev); #else @@ -61,8 +61,15 @@ static inline int msft_set_filter_enable(struct hci_dev *hdev, bool enable) return -EOPNOTSUPP; } -static inline void msft_suspend(struct hci_dev *hdev) {} -static inline void msft_resume(struct hci_dev *hdev) {} +static inline int msft_suspend_sync(struct hci_dev *hdev) +{ + return -EOPNOTSUPP; +} + +static inline int msft_resume_sync(struct hci_dev *hdev) +{ + return -EOPNOTSUPP; +} static inline bool msft_curve_validity(struct hci_dev *hdev) { diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index c1183fef1f21..a52ad81596b7 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -274,7 +274,7 @@ static void destroy_nbp(struct net_bridge_port *p) p->br = NULL; p->dev = NULL; - dev_put(dev); + dev_put_track(dev, &p->dev_tracker); kobject_put(&p->kobj); } @@ -397,10 +397,10 @@ static int find_portno(struct net_bridge *br) if (!inuse) return -ENOMEM; - set_bit(0, inuse); /* zero is reserved */ - list_for_each_entry(p, &br->port_list, list) { - set_bit(p->port_no, inuse); - } + __set_bit(0, inuse); /* zero is reserved */ + list_for_each_entry(p, &br->port_list, list) + __set_bit(p->port_no, inuse); + index = find_first_zero_bit(inuse, BR_MAX_PORTS); bitmap_free(inuse); @@ -423,7 +423,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, return ERR_PTR(-ENOMEM); p->br = br; - dev_hold(dev); + dev_hold_track(dev, &p->dev_tracker, GFP_KERNEL); p->dev = dev; p->path_cost = port_cost(dev); p->priority = 0x8000 >> BR_PORT_BITS; @@ -434,7 +434,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, br_stp_port_timer_init(p); err = br_multicast_add_port(p); if (err) { - dev_put(dev); + dev_put_track(dev, &p->dev_tracker); kfree(p); p = ERR_PTR(err); } @@ -525,8 +525,8 @@ static void br_set_gso_limits(struct net_bridge *br) gso_max_size = min(gso_max_size, p->dev->gso_max_size); gso_max_segs = min(gso_max_segs, p->dev->gso_max_segs); } - br->dev->gso_max_size = gso_max_size; - br->dev->gso_max_segs = gso_max_segs; + netif_set_gso_max_size(br->dev, gso_max_size); + netif_set_gso_max_segs(br->dev, gso_max_segs); } /* diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index b5af68c105a8..4fd882686b04 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -743,6 +743,9 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu) mtu = nf_bridge->frag_max_size; + nf_bridge_update_protocol(skb); + nf_bridge_push_encap_header(skb); + if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) { nf_bridge_info_free(skb); return br_dev_queue_push_xmit(net, sk, skb); @@ -760,8 +763,6 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff IPCB(skb)->frag_max_size = nf_bridge->frag_max_size; - nf_bridge_update_protocol(skb); - data = this_cpu_ptr(&brnf_frag_data_storage); if (skb_vlan_tag_present(skb)) { @@ -789,8 +790,6 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size; - nf_bridge_update_protocol(skb); - data = this_cpu_ptr(&brnf_frag_data_storage); data->encap_size = nf_bridge_encap_header_len(skb); data->size = ETH_HLEN + data->encap_size; diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index c0efd697865a..af2b3512d86c 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -344,6 +344,7 @@ struct net_bridge_mdb_entry { struct net_bridge_port { struct net_bridge *br; struct net_device *dev; + netdevice_tracker dev_tracker; struct list_head list; unsigned long flags; diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index d9a89ddd0331..159590d5c2af 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c @@ -36,15 +36,14 @@ static ssize_t store_bridge_parm(struct device *d, struct net_bridge *br = to_bridge(d); struct netlink_ext_ack extack = {0}; unsigned long val; - char *endp; int err; if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN)) return -EPERM; - val = simple_strtoul(buf, &endp, 0); - if (endp == buf) - return -EINVAL; + err = kstrtoul(buf, 0, &val); + if (err != 0) + return err; if (!rtnl_trylock()) return restart_syscall(); diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 49e105e0a447..84ba456a78cc 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -1063,7 +1063,7 @@ int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid, if (br_vlan_delete(br, old_pvid)) br_vlan_notify(br, NULL, old_pvid, 0, RTM_DELVLAN); br_vlan_notify(br, NULL, pvid, 0, RTM_NEWVLAN); - set_bit(0, changed); + __set_bit(0, changed); } list_for_each_entry(p, &br->port_list, list) { @@ -1085,7 +1085,7 @@ int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid, if (nbp_vlan_delete(p, old_pvid)) br_vlan_notify(br, p, old_pvid, 0, RTM_DELVLAN); br_vlan_notify(p->br, p, pvid, 0, RTM_NEWVLAN); - set_bit(p->port_no, changed); + __set_bit(p->port_no, changed); } br->default_pvid = pvid; diff --git a/net/core/Makefile b/net/core/Makefile index 4268846f2f47..a8e4f737692b 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -11,7 +11,9 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o obj-y += dev.o dev_addr_lists.o dst.o netevent.o \ neighbour.o rtnetlink.o utils.o link_watch.o filter.o \ sock_diag.o dev_ioctl.o tso.o sock_reuseport.o \ - fib_notifier.o xdp.o flow_offload.o + fib_notifier.o xdp.o flow_offload.o gro.o + +obj-$(CONFIG_NETDEV_ADDR_LIST_TEST) += dev_addr_lists_test.o obj-y += net-sysfs.o obj-$(CONFIG_PAGE_POOL) += page_pool.o diff --git a/net/core/dev.c b/net/core/dev.c index c4708e2487fb..644b9c8be3a8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -153,16 +153,10 @@ #include "net-sysfs.h" -#define MAX_GRO_SKBS 8 - -/* This should be increased if a protocol with a bigger head is added. */ -#define GRO_MAX_HEAD (MAX_HEADER + 128) static DEFINE_SPINLOCK(ptype_lock); -static DEFINE_SPINLOCK(offload_lock); struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; struct list_head ptype_all __read_mostly; /* Taps */ -static struct list_head offload_base __read_mostly; static int netif_rx_internal(struct sk_buff *skb); static int call_netdevice_notifiers_info(unsigned long val, @@ -371,12 +365,12 @@ static void list_netdevice(struct net_device *dev) ASSERT_RTNL(); - write_lock_bh(&dev_base_lock); + write_lock(&dev_base_lock); list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); netdev_name_node_add(net, dev->name_node); hlist_add_head_rcu(&dev->index_hlist, dev_index_hash(net, dev->ifindex)); - write_unlock_bh(&dev_base_lock); + write_unlock(&dev_base_lock); dev_base_seq_inc(net); } @@ -389,11 +383,11 @@ static void unlist_netdevice(struct net_device *dev) ASSERT_RTNL(); /* Unlink dev from the device chain */ - write_lock_bh(&dev_base_lock); + write_lock(&dev_base_lock); list_del_rcu(&dev->dev_list); netdev_name_node_del(dev->name_node); hlist_del_rcu(&dev->index_hlist); - write_unlock_bh(&dev_base_lock); + write_unlock(&dev_base_lock); dev_base_seq_inc(dev_net(dev)); } @@ -604,84 +598,6 @@ void dev_remove_pack(struct packet_type *pt) EXPORT_SYMBOL(dev_remove_pack); -/** - * dev_add_offload - register offload handlers - * @po: protocol offload declaration - * - * Add protocol offload handlers to the networking stack. The passed - * &proto_offload is linked into kernel lists and may not be freed until - * it has been removed from the kernel lists. - * - * This call does not sleep therefore it can not - * guarantee all CPU's that are in middle of receiving packets - * will see the new offload handlers (until the next received packet). - */ -void dev_add_offload(struct packet_offload *po) -{ - struct packet_offload *elem; - - spin_lock(&offload_lock); - list_for_each_entry(elem, &offload_base, list) { - if (po->priority < elem->priority) - break; - } - list_add_rcu(&po->list, elem->list.prev); - spin_unlock(&offload_lock); -} -EXPORT_SYMBOL(dev_add_offload); - -/** - * __dev_remove_offload - remove offload handler - * @po: packet offload declaration - * - * Remove a protocol offload handler that was previously added to the - * kernel offload handlers by dev_add_offload(). The passed &offload_type - * is removed from the kernel lists and can be freed or reused once this - * function returns. - * - * The packet type might still be in use by receivers - * and must not be freed until after all the CPU's have gone - * through a quiescent state. - */ -static void __dev_remove_offload(struct packet_offload *po) -{ - struct list_head *head = &offload_base; - struct packet_offload *po1; - - spin_lock(&offload_lock); - - list_for_each_entry(po1, head, list) { - if (po == po1) { - list_del_rcu(&po->list); - goto out; - } - } - - pr_warn("dev_remove_offload: %p not found\n", po); -out: - spin_unlock(&offload_lock); -} - -/** - * dev_remove_offload - remove packet offload handler - * @po: packet offload declaration - * - * Remove a packet offload handler that was previously added to the kernel - * offload handlers by dev_add_offload(). The passed &offload_type is - * removed from the kernel lists and can be freed or reused once this - * function returns. - * - * This call sleeps to guarantee that no CPU is looking at the packet - * type after return. - */ -void dev_remove_offload(struct packet_offload *po) -{ - __dev_remove_offload(po); - - synchronize_net(); -} -EXPORT_SYMBOL(dev_remove_offload); - /******************************************************************************* * * Device Interface Subroutines @@ -1272,15 +1188,15 @@ rollback: netdev_adjacent_rename_links(dev, oldname); - write_lock_bh(&dev_base_lock); + write_lock(&dev_base_lock); netdev_name_node_del(dev->name_node); - write_unlock_bh(&dev_base_lock); + write_unlock(&dev_base_lock); synchronize_rcu(); - write_lock_bh(&dev_base_lock); + write_lock(&dev_base_lock); netdev_name_node_add(net, dev->name_node); - write_unlock_bh(&dev_base_lock); + write_unlock(&dev_base_lock); ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); ret = notifier_to_errno(ret); @@ -1461,6 +1377,7 @@ static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack) int ret; ASSERT_RTNL(); + dev_addr_check(dev); if (!netif_device_present(dev)) { /* may be detached because parent is runtime-suspended */ @@ -3315,40 +3232,6 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth) return __vlan_get_protocol(skb, type, depth); } -/** - * skb_mac_gso_segment - mac layer segmentation handler. - * @skb: buffer to segment - * @features: features for the output path (see dev->features) - */ -struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, - netdev_features_t features) -{ - struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); - struct packet_offload *ptype; - int vlan_depth = skb->mac_len; - __be16 type = skb_network_protocol(skb, &vlan_depth); - - if (unlikely(!type)) - return ERR_PTR(-EINVAL); - - __skb_pull(skb, vlan_depth); - - rcu_read_lock(); - list_for_each_entry_rcu(ptype, &offload_base, list) { - if (ptype->type == type && ptype->callbacks.gso_segment) { - segs = ptype->callbacks.gso_segment(skb, features); - break; - } - } - rcu_read_unlock(); - - __skb_push(skb, skb->data - skb_mac_header(skb)); - - return segs; -} -EXPORT_SYMBOL(skb_mac_gso_segment); - - /* openvswitch calls this on rx path, so we need a different check. */ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) @@ -3513,7 +3396,7 @@ static netdev_features_t gso_features_check(const struct sk_buff *skb, { u16 gso_segs = skb_shinfo(skb)->gso_segs; - if (gso_segs > dev->gso_max_segs) + if (gso_segs > READ_ONCE(dev->gso_max_segs)) return features & ~NETIF_F_GSO_MASK; if (!skb_shinfo(skb)->gso_type) { @@ -3836,8 +3719,12 @@ no_lock_out: * separate lock before trying to get qdisc main lock. * This permits qdisc->running owner to get the lock more * often and dequeue packets faster. + * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit + * and then other tasks will only enqueue packets. The packets will be + * sent after the qdisc owner is scheduled again. To prevent this + * scenario the task always serialize on the lock. */ - contended = qdisc_is_running(q); + contended = qdisc_is_running(q) || IS_ENABLED(CONFIG_PREEMPT_RT); if (unlikely(contended)) spin_lock(&q->busylock); @@ -4323,8 +4210,6 @@ int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */ int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */ int dev_rx_weight __read_mostly = 64; int dev_tx_weight __read_mostly = 64; -/* Maximum number of GRO_NORMAL skbs to batch up for list-RX */ -int gro_normal_batch __read_mostly = 8; /* Called with irq disabled */ static inline void ____napi_schedule(struct softnet_data *sd, @@ -5667,7 +5552,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb) return ret; } -static void netif_receive_skb_list_internal(struct list_head *head) +void netif_receive_skb_list_internal(struct list_head *head) { struct sk_buff *skb, *next; struct list_head sublist; @@ -5845,550 +5730,6 @@ static void flush_all_backlogs(void) cpus_read_unlock(); } -/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */ -static void gro_normal_list(struct napi_struct *napi) -{ - if (!napi->rx_count) - return; - netif_receive_skb_list_internal(&napi->rx_list); - INIT_LIST_HEAD(&napi->rx_list); - napi->rx_count = 0; -} - -/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded, - * pass the whole batch up to the stack. - */ -static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs) -{ - list_add_tail(&skb->list, &napi->rx_list); - napi->rx_count += segs; - if (napi->rx_count >= gro_normal_batch) - gro_normal_list(napi); -} - -static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) -{ - struct packet_offload *ptype; - __be16 type = skb->protocol; - struct list_head *head = &offload_base; - int err = -ENOENT; - - BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); - - if (NAPI_GRO_CB(skb)->count == 1) { - skb_shinfo(skb)->gso_size = 0; - goto out; - } - - rcu_read_lock(); - list_for_each_entry_rcu(ptype, head, list) { - if (ptype->type != type || !ptype->callbacks.gro_complete) - continue; - - err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete, - ipv6_gro_complete, inet_gro_complete, - skb, 0); - break; - } - rcu_read_unlock(); - - if (err) { - WARN_ON(&ptype->list == head); - kfree_skb(skb); - return; - } - -out: - gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count); -} - -static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, - bool flush_old) -{ - struct list_head *head = &napi->gro_hash[index].list; - struct sk_buff *skb, *p; - - list_for_each_entry_safe_reverse(skb, p, head, list) { - if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) - return; - skb_list_del_init(skb); - napi_gro_complete(napi, skb); - napi->gro_hash[index].count--; - } - - if (!napi->gro_hash[index].count) - __clear_bit(index, &napi->gro_bitmask); -} - -/* napi->gro_hash[].list contains packets ordered by age. - * youngest packets at the head of it. - * Complete skbs in reverse order to reduce latencies. - */ -void napi_gro_flush(struct napi_struct *napi, bool flush_old) -{ - unsigned long bitmask = napi->gro_bitmask; - unsigned int i, base = ~0U; - - while ((i = ffs(bitmask)) != 0) { - bitmask >>= i; - base += i; - __napi_gro_flush_chain(napi, base, flush_old); - } -} -EXPORT_SYMBOL(napi_gro_flush); - -static void gro_list_prepare(const struct list_head *head, - const struct sk_buff *skb) -{ - unsigned int maclen = skb->dev->hard_header_len; - u32 hash = skb_get_hash_raw(skb); - struct sk_buff *p; - - list_for_each_entry(p, head, list) { - unsigned long diffs; - - NAPI_GRO_CB(p)->flush = 0; - - if (hash != skb_get_hash_raw(p)) { - NAPI_GRO_CB(p)->same_flow = 0; - continue; - } - - diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; - diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb); - if (skb_vlan_tag_present(p)) - diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb); - diffs |= skb_metadata_differs(p, skb); - if (maclen == ETH_HLEN) - diffs |= compare_ether_header(skb_mac_header(p), - skb_mac_header(skb)); - else if (!diffs) - diffs = memcmp(skb_mac_header(p), - skb_mac_header(skb), - maclen); - - /* in most common scenarions 'slow_gro' is 0 - * otherwise we are already on some slower paths - * either skip all the infrequent tests altogether or - * avoid trying too hard to skip each of them individually - */ - if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) { -#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT) - struct tc_skb_ext *skb_ext; - struct tc_skb_ext *p_ext; -#endif - - diffs |= p->sk != skb->sk; - diffs |= skb_metadata_dst_cmp(p, skb); - diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb); - -#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT) - skb_ext = skb_ext_find(skb, TC_SKB_EXT); - p_ext = skb_ext_find(p, TC_SKB_EXT); - - diffs |= (!!p_ext) ^ (!!skb_ext); - if (!diffs && unlikely(skb_ext)) - diffs |= p_ext->chain ^ skb_ext->chain; -#endif - } - - NAPI_GRO_CB(p)->same_flow = !diffs; - } -} - -static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff) -{ - const struct skb_shared_info *pinfo = skb_shinfo(skb); - const skb_frag_t *frag0 = &pinfo->frags[0]; - - NAPI_GRO_CB(skb)->data_offset = 0; - NAPI_GRO_CB(skb)->frag0 = NULL; - NAPI_GRO_CB(skb)->frag0_len = 0; - - if (!skb_headlen(skb) && pinfo->nr_frags && - !PageHighMem(skb_frag_page(frag0)) && - (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) { - NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); - NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int, - skb_frag_size(frag0), - skb->end - skb->tail); - } -} - -static void gro_pull_from_frag0(struct sk_buff *skb, int grow) -{ - struct skb_shared_info *pinfo = skb_shinfo(skb); - - BUG_ON(skb->end - skb->tail < grow); - - memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); - - skb->data_len -= grow; - skb->tail += grow; - - skb_frag_off_add(&pinfo->frags[0], grow); - skb_frag_size_sub(&pinfo->frags[0], grow); - - if (unlikely(!skb_frag_size(&pinfo->frags[0]))) { - skb_frag_unref(skb, 0); - memmove(pinfo->frags, pinfo->frags + 1, - --pinfo->nr_frags * sizeof(pinfo->frags[0])); - } -} - -static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head) -{ - struct sk_buff *oldest; - - oldest = list_last_entry(head, struct sk_buff, list); - - /* We are called with head length >= MAX_GRO_SKBS, so this is - * impossible. - */ - if (WARN_ON_ONCE(!oldest)) - return; - - /* Do not adjust napi->gro_hash[].count, caller is adding a new - * SKB to the chain. - */ - skb_list_del_init(oldest); - napi_gro_complete(napi, oldest); -} - -static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) -{ - u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1); - struct gro_list *gro_list = &napi->gro_hash[bucket]; - struct list_head *head = &offload_base; - struct packet_offload *ptype; - __be16 type = skb->protocol; - struct sk_buff *pp = NULL; - enum gro_result ret; - int same_flow; - int grow; - - if (netif_elide_gro(skb->dev)) - goto normal; - - gro_list_prepare(&gro_list->list, skb); - - rcu_read_lock(); - list_for_each_entry_rcu(ptype, head, list) { - if (ptype->type != type || !ptype->callbacks.gro_receive) - continue; - - skb_set_network_header(skb, skb_gro_offset(skb)); - skb_reset_mac_len(skb); - NAPI_GRO_CB(skb)->same_flow = 0; - NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb); - NAPI_GRO_CB(skb)->free = 0; - NAPI_GRO_CB(skb)->encap_mark = 0; - NAPI_GRO_CB(skb)->recursion_counter = 0; - NAPI_GRO_CB(skb)->is_fou = 0; - NAPI_GRO_CB(skb)->is_atomic = 1; - NAPI_GRO_CB(skb)->gro_remcsum_start = 0; - - /* Setup for GRO checksum validation */ - switch (skb->ip_summed) { - case CHECKSUM_COMPLETE: - NAPI_GRO_CB(skb)->csum = skb->csum; - NAPI_GRO_CB(skb)->csum_valid = 1; - NAPI_GRO_CB(skb)->csum_cnt = 0; - break; - case CHECKSUM_UNNECESSARY: - NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1; - NAPI_GRO_CB(skb)->csum_valid = 0; - break; - default: - NAPI_GRO_CB(skb)->csum_cnt = 0; - NAPI_GRO_CB(skb)->csum_valid = 0; - } - - pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive, - ipv6_gro_receive, inet_gro_receive, - &gro_list->list, skb); - break; - } - rcu_read_unlock(); - - if (&ptype->list == head) - goto normal; - - if (PTR_ERR(pp) == -EINPROGRESS) { - ret = GRO_CONSUMED; - goto ok; - } - - same_flow = NAPI_GRO_CB(skb)->same_flow; - ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; - - if (pp) { - skb_list_del_init(pp); - napi_gro_complete(napi, pp); - gro_list->count--; - } - - if (same_flow) - goto ok; - - if (NAPI_GRO_CB(skb)->flush) - goto normal; - - if (unlikely(gro_list->count >= MAX_GRO_SKBS)) - gro_flush_oldest(napi, &gro_list->list); - else - gro_list->count++; - - NAPI_GRO_CB(skb)->count = 1; - NAPI_GRO_CB(skb)->age = jiffies; - NAPI_GRO_CB(skb)->last = skb; - skb_shinfo(skb)->gso_size = skb_gro_len(skb); - list_add(&skb->list, &gro_list->list); - ret = GRO_HELD; - -pull: - grow = skb_gro_offset(skb) - skb_headlen(skb); - if (grow > 0) - gro_pull_from_frag0(skb, grow); -ok: - if (gro_list->count) { - if (!test_bit(bucket, &napi->gro_bitmask)) - __set_bit(bucket, &napi->gro_bitmask); - } else if (test_bit(bucket, &napi->gro_bitmask)) { - __clear_bit(bucket, &napi->gro_bitmask); - } - - return ret; - -normal: - ret = GRO_NORMAL; - goto pull; -} - -struct packet_offload *gro_find_receive_by_type(__be16 type) -{ - struct list_head *offload_head = &offload_base; - struct packet_offload *ptype; - - list_for_each_entry_rcu(ptype, offload_head, list) { - if (ptype->type != type || !ptype->callbacks.gro_receive) - continue; - return ptype; - } - return NULL; -} -EXPORT_SYMBOL(gro_find_receive_by_type); - -struct packet_offload *gro_find_complete_by_type(__be16 type) -{ - struct list_head *offload_head = &offload_base; - struct packet_offload *ptype; - - list_for_each_entry_rcu(ptype, offload_head, list) { - if (ptype->type != type || !ptype->callbacks.gro_complete) - continue; - return ptype; - } - return NULL; -} -EXPORT_SYMBOL(gro_find_complete_by_type); - -static gro_result_t napi_skb_finish(struct napi_struct *napi, - struct sk_buff *skb, - gro_result_t ret) -{ - switch (ret) { - case GRO_NORMAL: - gro_normal_one(napi, skb, 1); - break; - - case GRO_MERGED_FREE: - if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) - napi_skb_free_stolen_head(skb); - else if (skb->fclone != SKB_FCLONE_UNAVAILABLE) - __kfree_skb(skb); - else - __kfree_skb_defer(skb); - break; - - case GRO_HELD: - case GRO_MERGED: - case GRO_CONSUMED: - break; - } - - return ret; -} - -gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) -{ - gro_result_t ret; - - skb_mark_napi_id(skb, napi); - trace_napi_gro_receive_entry(skb); - - skb_gro_reset_offset(skb, 0); - - ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb)); - trace_napi_gro_receive_exit(ret); - - return ret; -} -EXPORT_SYMBOL(napi_gro_receive); - -static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) -{ - if (unlikely(skb->pfmemalloc)) { - consume_skb(skb); - return; - } - __skb_pull(skb, skb_headlen(skb)); - /* restore the reserve we had after netdev_alloc_skb_ip_align() */ - skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); - __vlan_hwaccel_clear_tag(skb); - skb->dev = napi->dev; - skb->skb_iif = 0; - - /* eth_type_trans() assumes pkt_type is PACKET_HOST */ - skb->pkt_type = PACKET_HOST; - - skb->encapsulation = 0; - skb_shinfo(skb)->gso_type = 0; - skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); - if (unlikely(skb->slow_gro)) { - skb_orphan(skb); - skb_ext_reset(skb); - nf_reset_ct(skb); - skb->slow_gro = 0; - } - - napi->skb = skb; -} - -struct sk_buff *napi_get_frags(struct napi_struct *napi) -{ - struct sk_buff *skb = napi->skb; - - if (!skb) { - skb = napi_alloc_skb(napi, GRO_MAX_HEAD); - if (skb) { - napi->skb = skb; - skb_mark_napi_id(skb, napi); - } - } - return skb; -} -EXPORT_SYMBOL(napi_get_frags); - -static gro_result_t napi_frags_finish(struct napi_struct *napi, - struct sk_buff *skb, - gro_result_t ret) -{ - switch (ret) { - case GRO_NORMAL: - case GRO_HELD: - __skb_push(skb, ETH_HLEN); - skb->protocol = eth_type_trans(skb, skb->dev); - if (ret == GRO_NORMAL) - gro_normal_one(napi, skb, 1); - break; - - case GRO_MERGED_FREE: - if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) - napi_skb_free_stolen_head(skb); - else - napi_reuse_skb(napi, skb); - break; - - case GRO_MERGED: - case GRO_CONSUMED: - break; - } - - return ret; -} - -/* Upper GRO stack assumes network header starts at gro_offset=0 - * Drivers could call both napi_gro_frags() and napi_gro_receive() - * We copy ethernet header into skb->data to have a common layout. - */ -static struct sk_buff *napi_frags_skb(struct napi_struct *napi) -{ - struct sk_buff *skb = napi->skb; - const struct ethhdr *eth; - unsigned int hlen = sizeof(*eth); - - napi->skb = NULL; - - skb_reset_mac_header(skb); - skb_gro_reset_offset(skb, hlen); - - if (unlikely(skb_gro_header_hard(skb, hlen))) { - eth = skb_gro_header_slow(skb, hlen, 0); - if (unlikely(!eth)) { - net_warn_ratelimited("%s: dropping impossible skb from %s\n", - __func__, napi->dev->name); - napi_reuse_skb(napi, skb); - return NULL; - } - } else { - eth = (const struct ethhdr *)skb->data; - gro_pull_from_frag0(skb, hlen); - NAPI_GRO_CB(skb)->frag0 += hlen; - NAPI_GRO_CB(skb)->frag0_len -= hlen; - } - __skb_pull(skb, hlen); - - /* - * This works because the only protocols we care about don't require - * special handling. - * We'll fix it up properly in napi_frags_finish() - */ - skb->protocol = eth->h_proto; - - return skb; -} - -gro_result_t napi_gro_frags(struct napi_struct *napi) -{ - gro_result_t ret; - struct sk_buff *skb = napi_frags_skb(napi); - - trace_napi_gro_frags_entry(skb); - - ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb)); - trace_napi_gro_frags_exit(ret); - - return ret; -} -EXPORT_SYMBOL(napi_gro_frags); - -/* Compute the checksum from gro_offset and return the folded value - * after adding in any pseudo checksum. - */ -__sum16 __skb_gro_checksum_complete(struct sk_buff *skb) -{ - __wsum wsum; - __sum16 sum; - - wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0); - - /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */ - sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum)); - /* See comments in __skb_checksum_complete(). */ - if (likely(!sum)) { - if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && - !skb->csum_complete_sw) - netdev_rx_csum_fault(skb->dev, skb); - } - - NAPI_GRO_CB(skb)->csum = wsum; - NAPI_GRO_CB(skb)->csum_valid = 1; - - return sum; -} -EXPORT_SYMBOL(__skb_gro_checksum_complete); - static void net_rps_send_ipi(struct softnet_data *remsd) { #ifdef CONFIG_RPS @@ -7200,6 +6541,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h) struct netdev_adjacent { struct net_device *dev; + netdevice_tracker dev_tracker; /* upper master flag, there can only be one master device per list */ bool master; @@ -7964,7 +7306,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev, adj->ref_nr = 1; adj->private = private; adj->ignore = false; - dev_hold(adj_dev); + dev_hold_track(adj_dev, &adj->dev_tracker, GFP_KERNEL); pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n", dev->name, adj_dev->name, adj->ref_nr, adj_dev->name); @@ -7993,8 +7335,8 @@ remove_symlinks: if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); free_adj: + dev_put_track(adj_dev, &adj->dev_tracker); kfree(adj); - dev_put(adj_dev); return ret; } @@ -8035,7 +7377,7 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev, list_del_rcu(&adj->list); pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n", adj_dev->name, dev->name, adj_dev->name); - dev_put(adj_dev); + dev_put_track(adj_dev, &adj->dev_tracker); kfree_rcu(adj, rcu); } @@ -9224,35 +8566,17 @@ bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b) EXPORT_SYMBOL(netdev_port_same_parent_id); /** - * dev_change_proto_down - update protocol port state information + * dev_change_proto_down - set carrier according to proto_down. + * * @dev: device * @proto_down: new value - * - * This info can be used by switch drivers to set the phys state of the - * port. */ int dev_change_proto_down(struct net_device *dev, bool proto_down) { - const struct net_device_ops *ops = dev->netdev_ops; - - if (!ops->ndo_change_proto_down) + if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) return -EOPNOTSUPP; if (!netif_device_present(dev)) return -ENODEV; - return ops->ndo_change_proto_down(dev, proto_down); -} -EXPORT_SYMBOL(dev_change_proto_down); - -/** - * dev_change_proto_down_generic - generic implementation for - * ndo_change_proto_down that sets carrier according to - * proto_down. - * - * @dev: device - * @proto_down: new value - */ -int dev_change_proto_down_generic(struct net_device *dev, bool proto_down) -{ if (proto_down) netif_carrier_off(dev); else @@ -9260,7 +8584,7 @@ int dev_change_proto_down_generic(struct net_device *dev, bool proto_down) dev->proto_down = proto_down; return 0; } -EXPORT_SYMBOL(dev_change_proto_down_generic); +EXPORT_SYMBOL(dev_change_proto_down); /** * dev_change_proto_down_reason - proto down reason @@ -10545,6 +9869,7 @@ static void netdev_wait_allrefs(struct net_device *dev) netdev_unregister_timeout_secs * HZ)) { pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", dev->name, refcnt); + ref_tracker_dir_print(&dev->refcnt_tracker, 10); warning_time = jiffies; } } @@ -10835,6 +10160,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, dev = PTR_ALIGN(p, NETDEV_ALIGN); dev->padded = (char *)dev - (char *)p; + ref_tracker_dir_init(&dev->refcnt_tracker, 128); #ifdef CONFIG_PCPU_DEV_REFCNT dev->pcpu_refcnt = alloc_percpu(int); if (!dev->pcpu_refcnt) @@ -10951,6 +10277,7 @@ void free_netdev(struct net_device *dev) list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) netif_napi_del(p); + ref_tracker_dir_exit(&dev->refcnt_tracker); #ifdef CONFIG_PCPU_DEV_REFCNT free_percpu(dev->pcpu_refcnt); dev->pcpu_refcnt = NULL; @@ -11643,8 +10970,6 @@ static int __init net_dev_init(void) for (i = 0; i < PTYPE_HASH_SIZE; i++) INIT_LIST_HEAD(&ptype_base[i]); - INIT_LIST_HEAD(&offload_base); - if (register_pernet_subsys(&netdev_net_ops)) goto out; diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index f0cb38344126..bead38ca50bd 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -16,6 +16,35 @@ * General list handling functions */ +static int __hw_addr_insert(struct netdev_hw_addr_list *list, + struct netdev_hw_addr *new, int addr_len) +{ + struct rb_node **ins_point = &list->tree.rb_node, *parent = NULL; + struct netdev_hw_addr *ha; + + while (*ins_point) { + int diff; + + ha = rb_entry(*ins_point, struct netdev_hw_addr, node); + diff = memcmp(new->addr, ha->addr, addr_len); + if (diff == 0) + diff = memcmp(&new->type, &ha->type, sizeof(new->type)); + + parent = *ins_point; + if (diff < 0) + ins_point = &parent->rb_left; + else if (diff > 0) + ins_point = &parent->rb_right; + else + return -EEXIST; + } + + rb_link_node_rcu(&new->node, parent, ins_point); + rb_insert_color(&new->node, &list->tree); + + return 0; +} + static struct netdev_hw_addr* __hw_addr_create(const unsigned char *addr, int addr_len, unsigned char addr_type, bool global, bool sync) @@ -50,11 +79,6 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list, if (addr_len > MAX_ADDR_LEN) return -EINVAL; - ha = list_first_entry(&list->list, struct netdev_hw_addr, list); - if (ha && !memcmp(addr, ha->addr, addr_len) && - (!addr_type || addr_type == ha->type)) - goto found_it; - while (*ins_point) { int diff; @@ -69,7 +93,6 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list, } else if (diff > 0) { ins_point = &parent->rb_right; } else { -found_it: if (exclusive) return -EEXIST; if (global) { @@ -94,16 +117,8 @@ found_it: if (!ha) return -ENOMEM; - /* The first address in dev->dev_addrs is pointed to by dev->dev_addr - * and mutated freely by device drivers and netdev ops, so if we insert - * it into the tree we'll end up with an invalid rbtree. - */ - if (list->count > 0) { - rb_link_node(&ha->node, parent, ins_point); - rb_insert_color(&ha->node, &list->tree); - } else { - RB_CLEAR_NODE(&ha->node); - } + rb_link_node(&ha->node, parent, ins_point); + rb_insert_color(&ha->node, &list->tree); list_add_tail_rcu(&ha->list, &list->list); list->count++; @@ -138,8 +153,7 @@ static int __hw_addr_del_entry(struct netdev_hw_addr_list *list, if (--ha->refcount) return 0; - if (!RB_EMPTY_NODE(&ha->node)) - rb_erase(&ha->node, &list->tree); + rb_erase(&ha->node, &list->tree); list_del_rcu(&ha->list); kfree_rcu(ha, rcu_head); @@ -151,18 +165,8 @@ static struct netdev_hw_addr *__hw_addr_lookup(struct netdev_hw_addr_list *list, const unsigned char *addr, int addr_len, unsigned char addr_type) { - struct netdev_hw_addr *ha; struct rb_node *node; - /* The first address isn't inserted into the tree because in the dev->dev_addrs - * list it's the address pointed to by dev->dev_addr which is freely mutated - * in place, so we need to check it separately. - */ - ha = list_first_entry(&list->list, struct netdev_hw_addr, list); - if (ha && !memcmp(addr, ha->addr, addr_len) && - (!addr_type || addr_type == ha->type)) - return ha; - node = list->tree.rb_node; while (node) { @@ -498,6 +502,21 @@ EXPORT_SYMBOL(__hw_addr_init); * Device addresses handling functions */ +/* Check that netdev->dev_addr is not written to directly as this would + * break the rbtree layout. All changes should go thru dev_addr_set() and co. + * Remove this check in mid-2024. + */ +void dev_addr_check(struct net_device *dev) +{ + if (!memcmp(dev->dev_addr, dev->dev_addr_shadow, MAX_ADDR_LEN)) + return; + + netdev_warn(dev, "Current addr: %*ph\n", MAX_ADDR_LEN, dev->dev_addr); + netdev_warn(dev, "Expected addr: %*ph\n", + MAX_ADDR_LEN, dev->dev_addr_shadow); + netdev_WARN(dev, "Incorrect netdev->dev_addr\n"); +} + /** * dev_addr_flush - Flush device address list * @dev: device @@ -509,11 +528,11 @@ EXPORT_SYMBOL(__hw_addr_init); void dev_addr_flush(struct net_device *dev) { /* rtnl_mutex must be held here */ + dev_addr_check(dev); __hw_addr_flush(&dev->dev_addrs); dev->dev_addr = NULL; } -EXPORT_SYMBOL(dev_addr_flush); /** * dev_addr_init - Init device address list @@ -547,7 +566,21 @@ int dev_addr_init(struct net_device *dev) } return err; } -EXPORT_SYMBOL(dev_addr_init); + +void dev_addr_mod(struct net_device *dev, unsigned int offset, + const void *addr, size_t len) +{ + struct netdev_hw_addr *ha; + + dev_addr_check(dev); + + ha = container_of(dev->dev_addr, struct netdev_hw_addr, addr[0]); + rb_erase(&ha->node, &dev->dev_addrs.tree); + memcpy(&ha->addr[offset], addr, len); + memcpy(&dev->dev_addr_shadow[offset], addr, len); + WARN_ON(__hw_addr_insert(&dev->dev_addrs, ha, dev->addr_len)); +} +EXPORT_SYMBOL(dev_addr_mod); /** * dev_addr_add - Add a device address diff --git a/net/core/dev_addr_lists_test.c b/net/core/dev_addr_lists_test.c new file mode 100644 index 000000000000..049cfbc58aa9 --- /dev/null +++ b/net/core/dev_addr_lists_test.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <kunit/test.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/rtnetlink.h> + +static const struct net_device_ops dummy_netdev_ops = { +}; + +struct dev_addr_test_priv { + u32 addr_seen; +}; + +static int dev_addr_test_sync(struct net_device *netdev, const unsigned char *a) +{ + struct dev_addr_test_priv *datp = netdev_priv(netdev); + + if (a[0] < 31 && !memchr_inv(a, a[0], ETH_ALEN)) + datp->addr_seen |= 1 << a[0]; + return 0; +} + +static int dev_addr_test_unsync(struct net_device *netdev, + const unsigned char *a) +{ + struct dev_addr_test_priv *datp = netdev_priv(netdev); + + if (a[0] < 31 && !memchr_inv(a, a[0], ETH_ALEN)) + datp->addr_seen &= ~(1 << a[0]); + return 0; +} + +static int dev_addr_test_init(struct kunit *test) +{ + struct dev_addr_test_priv *datp; + struct net_device *netdev; + int err; + + netdev = alloc_etherdev(sizeof(*datp)); + KUNIT_ASSERT_TRUE(test, !!netdev); + + test->priv = netdev; + netdev->netdev_ops = &dummy_netdev_ops; + + err = register_netdev(netdev); + if (err) { + free_netdev(netdev); + KUNIT_FAIL(test, "Can't register netdev %d", err); + } + + rtnl_lock(); + return 0; +} + +static void dev_addr_test_exit(struct kunit *test) +{ + struct net_device *netdev = test->priv; + + rtnl_unlock(); + unregister_netdev(netdev); + free_netdev(netdev); +} + +static void dev_addr_test_basic(struct kunit *test) +{ + struct net_device *netdev = test->priv; + u8 addr[ETH_ALEN]; + + KUNIT_EXPECT_TRUE(test, !!netdev->dev_addr); + + memset(addr, 2, sizeof(addr)); + eth_hw_addr_set(netdev, addr); + KUNIT_EXPECT_EQ(test, 0, memcmp(netdev->dev_addr, addr, sizeof(addr))); + + memset(addr, 3, sizeof(addr)); + dev_addr_set(netdev, addr); + KUNIT_EXPECT_EQ(test, 0, memcmp(netdev->dev_addr, addr, sizeof(addr))); +} + +static void dev_addr_test_sync_one(struct kunit *test) +{ + struct net_device *netdev = test->priv; + struct dev_addr_test_priv *datp; + u8 addr[ETH_ALEN]; + + datp = netdev_priv(netdev); + + memset(addr, 1, sizeof(addr)); + eth_hw_addr_set(netdev, addr); + + __hw_addr_sync_dev(&netdev->dev_addrs, netdev, dev_addr_test_sync, + dev_addr_test_unsync); + KUNIT_EXPECT_EQ(test, 2, datp->addr_seen); + + memset(addr, 2, sizeof(addr)); + eth_hw_addr_set(netdev, addr); + + datp->addr_seen = 0; + __hw_addr_sync_dev(&netdev->dev_addrs, netdev, dev_addr_test_sync, + dev_addr_test_unsync); + /* It's not going to sync anything because the main address is + * considered synced and we overwrite in place. + */ + KUNIT_EXPECT_EQ(test, 0, datp->addr_seen); +} + +static void dev_addr_test_add_del(struct kunit *test) +{ + struct net_device *netdev = test->priv; + struct dev_addr_test_priv *datp; + u8 addr[ETH_ALEN]; + int i; + + datp = netdev_priv(netdev); + + for (i = 1; i < 4; i++) { + memset(addr, i, sizeof(addr)); + KUNIT_EXPECT_EQ(test, 0, dev_addr_add(netdev, addr, + NETDEV_HW_ADDR_T_LAN)); + } + /* Add 3 again */ + KUNIT_EXPECT_EQ(test, 0, dev_addr_add(netdev, addr, + NETDEV_HW_ADDR_T_LAN)); + + __hw_addr_sync_dev(&netdev->dev_addrs, netdev, dev_addr_test_sync, + dev_addr_test_unsync); + KUNIT_EXPECT_EQ(test, 0xf, datp->addr_seen); + + KUNIT_EXPECT_EQ(test, 0, dev_addr_del(netdev, addr, + NETDEV_HW_ADDR_T_LAN)); + + __hw_addr_sync_dev(&netdev->dev_addrs, netdev, dev_addr_test_sync, + dev_addr_test_unsync); + KUNIT_EXPECT_EQ(test, 0xf, datp->addr_seen); + + for (i = 1; i < 4; i++) { + memset(addr, i, sizeof(addr)); + KUNIT_EXPECT_EQ(test, 0, dev_addr_del(netdev, addr, + NETDEV_HW_ADDR_T_LAN)); + } + + __hw_addr_sync_dev(&netdev->dev_addrs, netdev, dev_addr_test_sync, + dev_addr_test_unsync); + KUNIT_EXPECT_EQ(test, 1, datp->addr_seen); +} + +static void dev_addr_test_del_main(struct kunit *test) +{ + struct net_device *netdev = test->priv; + u8 addr[ETH_ALEN]; + + memset(addr, 1, sizeof(addr)); + eth_hw_addr_set(netdev, addr); + + KUNIT_EXPECT_EQ(test, -ENOENT, dev_addr_del(netdev, addr, + NETDEV_HW_ADDR_T_LAN)); + KUNIT_EXPECT_EQ(test, 0, dev_addr_add(netdev, addr, + NETDEV_HW_ADDR_T_LAN)); + KUNIT_EXPECT_EQ(test, 0, dev_addr_del(netdev, addr, + NETDEV_HW_ADDR_T_LAN)); + KUNIT_EXPECT_EQ(test, -ENOENT, dev_addr_del(netdev, addr, + NETDEV_HW_ADDR_T_LAN)); +} + +static void dev_addr_test_add_set(struct kunit *test) +{ + struct net_device *netdev = test->priv; + struct dev_addr_test_priv *datp; + u8 addr[ETH_ALEN]; + int i; + + datp = netdev_priv(netdev); + + /* There is no external API like dev_addr_add_excl(), + * so shuffle the tree a little bit and exploit aliasing. + */ + for (i = 1; i < 16; i++) { + memset(addr, i, sizeof(addr)); + KUNIT_EXPECT_EQ(test, 0, dev_addr_add(netdev, addr, + NETDEV_HW_ADDR_T_LAN)); + } + + memset(addr, i, sizeof(addr)); + eth_hw_addr_set(netdev, addr); + KUNIT_EXPECT_EQ(test, 0, dev_addr_add(netdev, addr, + NETDEV_HW_ADDR_T_LAN)); + memset(addr, 0, sizeof(addr)); + eth_hw_addr_set(netdev, addr); + + __hw_addr_sync_dev(&netdev->dev_addrs, netdev, dev_addr_test_sync, + dev_addr_test_unsync); + KUNIT_EXPECT_EQ(test, 0xffff, datp->addr_seen); +} + +static void dev_addr_test_add_excl(struct kunit *test) +{ + struct net_device *netdev = test->priv; + u8 addr[ETH_ALEN]; + int i; + + for (i = 0; i < 10; i++) { + memset(addr, i, sizeof(addr)); + KUNIT_EXPECT_EQ(test, 0, dev_uc_add_excl(netdev, addr)); + } + KUNIT_EXPECT_EQ(test, -EEXIST, dev_uc_add_excl(netdev, addr)); + + for (i = 0; i < 10; i += 2) { + memset(addr, i, sizeof(addr)); + KUNIT_EXPECT_EQ(test, 0, dev_uc_del(netdev, addr)); + } + for (i = 1; i < 10; i += 2) { + memset(addr, i, sizeof(addr)); + KUNIT_EXPECT_EQ(test, -EEXIST, dev_uc_add_excl(netdev, addr)); + } +} + +static struct kunit_case dev_addr_test_cases[] = { + KUNIT_CASE(dev_addr_test_basic), + KUNIT_CASE(dev_addr_test_sync_one), + KUNIT_CASE(dev_addr_test_add_del), + KUNIT_CASE(dev_addr_test_del_main), + KUNIT_CASE(dev_addr_test_add_set), + KUNIT_CASE(dev_addr_test_add_excl), + {} +}; + +static struct kunit_suite dev_addr_test_suite = { + .name = "dev-addr-list-test", + .test_cases = dev_addr_test_cases, + .init = dev_addr_test_init, + .exit = dev_addr_test_exit, +}; +kunit_test_suite(dev_addr_test_suite); + +MODULE_LICENSE("GPL"); diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index cbab5fec64b1..1b807d119da5 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -192,7 +192,7 @@ static int net_hwtstamp_validate(struct ifreq *ifr) if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; - if (cfg.flags) /* reserved for future extensions */ + if (cfg.flags & ~HWTSTAMP_FLAG_MASK) return -EINVAL; tx_type = cfg.tx_type; @@ -313,6 +313,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, void __user *data, int err; struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name); const struct net_device_ops *ops; + netdevice_tracker dev_tracker; if (!dev) return -ENODEV; @@ -381,10 +382,10 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, void __user *data, return -ENODEV; if (!netif_is_bridge_master(dev)) return -EOPNOTSUPP; - dev_hold(dev); + dev_hold_track(dev, &dev_tracker, GFP_KERNEL); rtnl_unlock(); err = br_ioctl_call(net, netdev_priv(dev), cmd, ifr, NULL); - dev_put(dev); + dev_put_track(dev, &dev_tracker); rtnl_lock(); return err; diff --git a/net/core/devlink.c b/net/core/devlink.c index c06c9ba6e8c5..6366ce324dce 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -69,6 +69,35 @@ struct devlink { char priv[] __aligned(NETDEV_ALIGN); }; +/** + * struct devlink_resource - devlink resource + * @name: name of the resource + * @id: id, per devlink instance + * @size: size of the resource + * @size_new: updated size of the resource, reload is needed + * @size_valid: valid in case the total size of the resource is valid + * including its children + * @parent: parent resource + * @size_params: size parameters + * @list: parent list + * @resource_list: list of child resources + * @occ_get: occupancy getter callback + * @occ_get_priv: occupancy getter callback priv + */ +struct devlink_resource { + const char *name; + u64 id; + u64 size; + u64 size_new; + bool size_valid; + struct devlink_resource *parent; + struct devlink_resource_size_params size_params; + struct list_head list; + struct list_head resource_list; + devlink_resource_occ_get_t *occ_get; + void *occ_get_priv; +}; + void *devlink_priv(struct devlink *devlink) { return &devlink->priv; @@ -4432,6 +4461,21 @@ static const struct devlink_param devlink_param_generic[] = { .name = DEVLINK_PARAM_GENERIC_ENABLE_VNET_NAME, .type = DEVLINK_PARAM_GENERIC_ENABLE_VNET_TYPE, }, + { + .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP, + .name = DEVLINK_PARAM_GENERIC_ENABLE_IWARP_NAME, + .type = DEVLINK_PARAM_GENERIC_ENABLE_IWARP_TYPE, + }, + { + .id = DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE, + .name = DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_NAME, + .type = DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_TYPE, + }, + { + .id = DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE, + .name = DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_NAME, + .type = DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_TYPE, + }, }; static int devlink_param_generic_verify(const struct devlink_param *param) @@ -8840,8 +8884,6 @@ static const struct genl_small_ops devlink_nl_ops[] = { GENL_DONT_VALIDATE_DUMP_STRICT, .dumpit = devlink_nl_cmd_health_reporter_dump_get_dumpit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT | - DEVLINK_NL_FLAG_NO_LOCK, }, { .cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR, @@ -9905,34 +9947,38 @@ out: } EXPORT_SYMBOL_GPL(devlink_resource_register); +static void devlink_resource_unregister(struct devlink *devlink, + struct devlink_resource *resource) +{ + struct devlink_resource *tmp, *child_resource; + + list_for_each_entry_safe(child_resource, tmp, &resource->resource_list, + list) { + devlink_resource_unregister(devlink, child_resource); + list_del(&child_resource->list); + kfree(child_resource); + } +} + /** * devlink_resources_unregister - free all resources * * @devlink: devlink - * @resource: resource */ -void devlink_resources_unregister(struct devlink *devlink, - struct devlink_resource *resource) +void devlink_resources_unregister(struct devlink *devlink) { struct devlink_resource *tmp, *child_resource; - struct list_head *resource_list; - if (resource) - resource_list = &resource->resource_list; - else - resource_list = &devlink->resource_list; - - if (!resource) - mutex_lock(&devlink->lock); + mutex_lock(&devlink->lock); - list_for_each_entry_safe(child_resource, tmp, resource_list, list) { - devlink_resources_unregister(devlink, child_resource); + list_for_each_entry_safe(child_resource, tmp, &devlink->resource_list, + list) { + devlink_resource_unregister(devlink, child_resource); list_del(&child_resource->list); kfree(child_resource); } - if (!resource) - mutex_unlock(&devlink->lock); + mutex_unlock(&devlink->lock); } EXPORT_SYMBOL_GPL(devlink_resources_unregister); diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 49442cae6f69..3d0ab2eec916 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -850,7 +850,7 @@ net_dm_hw_metadata_copy(const struct devlink_trap_metadata *metadata) } hw_metadata->input_dev = metadata->input_dev; - dev_hold(hw_metadata->input_dev); + dev_hold_track(hw_metadata->input_dev, &hw_metadata->dev_tracker, GFP_ATOMIC); return hw_metadata; @@ -864,9 +864,9 @@ free_hw_metadata: } static void -net_dm_hw_metadata_free(const struct devlink_trap_metadata *hw_metadata) +net_dm_hw_metadata_free(struct devlink_trap_metadata *hw_metadata) { - dev_put(hw_metadata->input_dev); + dev_put_track(hw_metadata->input_dev, &hw_metadata->dev_tracker); kfree(hw_metadata->fa_cookie); kfree(hw_metadata->trap_name); kfree(hw_metadata->trap_group_name); diff --git a/net/core/dst.c b/net/core/dst.c index 497ef9b3fc6a..d16c2c9bfebd 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -49,7 +49,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops, unsigned short flags) { dst->dev = dev; - dev_hold(dev); + dev_hold_track(dev, &dst->dev_tracker, GFP_ATOMIC); dst->ops = ops; dst_init_metrics(dst, dst_default_metrics.metrics, true); dst->expires = 0UL; @@ -117,7 +117,7 @@ struct dst_entry *dst_destroy(struct dst_entry * dst) if (dst->ops->destroy) dst->ops->destroy(dst); - dev_put(dst->dev); + dev_put_track(dst->dev, &dst->dev_tracker); lwtstate_put(dst->lwtstate); @@ -159,8 +159,8 @@ void dst_dev_put(struct dst_entry *dst) dst->input = dst_discard; dst->output = dst_discard_out; dst->dev = blackhole_netdev; - dev_hold(dst->dev); - dev_put(dev); + dev_replace_track(dev, blackhole_netdev, &dst->dev_tracker, + GFP_ATOMIC); } EXPORT_SYMBOL(dst_dev_put); diff --git a/net/core/failover.c b/net/core/failover.c index b5cd3c727285..dcaa92a85ea2 100644 --- a/net/core/failover.c +++ b/net/core/failover.c @@ -252,7 +252,7 @@ struct failover *failover_register(struct net_device *dev, return ERR_PTR(-ENOMEM); rcu_assign_pointer(failover->ops, ops); - dev_hold(dev); + dev_hold_track(dev, &failover->dev_tracker, GFP_KERNEL); dev->priv_flags |= IFF_FAILOVER; rcu_assign_pointer(failover->failover_dev, dev); @@ -285,7 +285,7 @@ void failover_unregister(struct failover *failover) failover_dev->name); failover_dev->priv_flags &= ~IFF_FAILOVER; - dev_put(failover_dev); + dev_put_track(failover_dev, &failover->dev_tracker); spin_lock(&failover_lock); list_del(&failover->list); diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 1bb567a3b329..75282222e0b4 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -750,6 +750,27 @@ static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh, return 0; } +static const struct nla_policy fib_rule_policy[FRA_MAX + 1] = { + [FRA_UNSPEC] = { .strict_start_type = FRA_DPORT_RANGE + 1 }, + [FRA_IIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, + [FRA_OIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, + [FRA_PRIORITY] = { .type = NLA_U32 }, + [FRA_FWMARK] = { .type = NLA_U32 }, + [FRA_FLOW] = { .type = NLA_U32 }, + [FRA_TUN_ID] = { .type = NLA_U64 }, + [FRA_FWMASK] = { .type = NLA_U32 }, + [FRA_TABLE] = { .type = NLA_U32 }, + [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, + [FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 }, + [FRA_GOTO] = { .type = NLA_U32 }, + [FRA_L3MDEV] = { .type = NLA_U8 }, + [FRA_UID_RANGE] = { .len = sizeof(struct fib_rule_uid_range) }, + [FRA_PROTOCOL] = { .type = NLA_U8 }, + [FRA_IP_PROTO] = { .type = NLA_U8 }, + [FRA_SPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) }, + [FRA_DPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) } +}; + int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { @@ -774,7 +795,7 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, } err = nlmsg_parse_deprecated(nlh, sizeof(*frh), tb, FRA_MAX, - ops->policy, extack); + fib_rule_policy, extack); if (err < 0) { NL_SET_ERR_MSG(extack, "Error parsing msg"); goto errout; @@ -882,7 +903,7 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, } err = nlmsg_parse_deprecated(nlh, sizeof(*frh), tb, FRA_MAX, - ops->policy, extack); + fib_rule_policy, extack); if (err < 0) { NL_SET_ERR_MSG(extack, "Error parsing msg"); goto errout; diff --git a/net/core/filter.c b/net/core/filter.c index 6102f093d59a..e4cc3aff5bf7 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -301,7 +301,7 @@ static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg, break; case SKF_AD_PKTTYPE: - *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET()); + *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET); *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX); #ifdef __BIG_ENDIAN_BITFIELD *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5); @@ -323,7 +323,7 @@ static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg, offsetof(struct sk_buff, vlan_tci)); break; case SKF_AD_VLAN_TAG_PRESENT: - *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET()); + *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET); if (PKT_VLAN_PRESENT_BIT) *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT); if (PKT_VLAN_PRESENT_BIT < 7) @@ -1242,10 +1242,9 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) int err, new_len, old_len = fp->len; bool seen_ld_abs = false; - /* We are free to overwrite insns et al right here as it - * won't be used at this point in time anymore internally - * after the migration to the internal BPF instruction - * representation. + /* We are free to overwrite insns et al right here as it won't be used at + * this point in time anymore internally after the migration to the eBPF + * instruction representation. */ BUILD_BUG_ON(sizeof(struct sock_filter) != sizeof(struct bpf_insn)); @@ -1336,8 +1335,8 @@ static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp, */ bpf_jit_compile(fp); - /* JIT compiler couldn't process this filter, so do the - * internal BPF translation for the optimized interpreter. + /* JIT compiler couldn't process this filter, so do the eBPF translation + * for the optimized interpreter. */ if (!fp->jited) fp = bpf_migrate_filter(fp); @@ -8029,7 +8028,7 @@ static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write, * (Fast-path, otherwise approximation that we might be * a clone, do the rest in helper.) */ - *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET()); + *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET); *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK); *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7); @@ -8617,7 +8616,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, case offsetof(struct __sk_buff, pkt_type): *target_size = 1; *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg, - PKT_TYPE_OFFSET()); + PKT_TYPE_OFFSET); *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX); #ifdef __BIG_ENDIAN_BITFIELD *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5); @@ -8642,7 +8641,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, case offsetof(struct __sk_buff, vlan_present): *target_size = 1; *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg, - PKT_VLAN_PRESENT_OFFSET()); + PKT_VLAN_PRESENT_OFFSET); if (PKT_VLAN_PRESENT_BIT) *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, PKT_VLAN_PRESENT_BIT); if (PKT_VLAN_PRESENT_BIT < 7) @@ -10543,6 +10542,7 @@ static bool sk_lookup_is_valid_access(int off, int size, case bpf_ctx_range_till(struct bpf_sk_lookup, local_ip6[0], local_ip6[3]): case bpf_ctx_range(struct bpf_sk_lookup, remote_port): case bpf_ctx_range(struct bpf_sk_lookup, local_port): + case bpf_ctx_range(struct bpf_sk_lookup, ingress_ifindex): bpf_ctx_record_field_size(info, sizeof(__u32)); return bpf_ctx_narrow_access_ok(off, size, sizeof(__u32)); @@ -10632,6 +10632,12 @@ static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type, bpf_target_off(struct bpf_sk_lookup_kern, dport, 2, target_size)); break; + + case offsetof(struct bpf_sk_lookup, ingress_ifindex): + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, + bpf_target_off(struct bpf_sk_lookup_kern, + ingress_ifindex, 4, target_size)); + break; } return insn - insn_buf; @@ -10656,14 +10662,10 @@ void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog) bpf_dispatcher_change_prog(BPF_DISPATCHER_PTR(xdp), prev_prog, prog); } -#ifdef CONFIG_DEBUG_INFO_BTF -BTF_ID_LIST_GLOBAL(btf_sock_ids) +BTF_ID_LIST_GLOBAL(btf_sock_ids, MAX_BTF_SOCK_TYPE) #define BTF_SOCK_TYPE(name, type) BTF_ID(struct, type) BTF_SOCK_TYPE_xxx #undef BTF_SOCK_TYPE -#else -u32 btf_sock_ids[MAX_BTF_SOCK_TYPE]; -#endif BPF_CALL_1(bpf_skc_to_tcp6_sock, struct sock *, sk) { diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 1b094c481f1d..fe9b5063cb73 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -1461,7 +1461,7 @@ out_bad: } EXPORT_SYMBOL(__skb_flow_dissect); -static siphash_key_t hashrnd __read_mostly; +static siphash_aligned_key_t hashrnd; static __always_inline void __flow_hash_secret_init(void) { net_get_random_once(&hashrnd, sizeof(hashrnd)); diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c index 6beaea13564a..73f68d4625f3 100644 --- a/net/core/flow_offload.c +++ b/net/core/flow_offload.c @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/kernel.h> #include <linux/slab.h> +#include <net/act_api.h> #include <net/flow_offload.h> #include <linux/rtnetlink.h> #include <linux/mutex.h> @@ -27,6 +28,26 @@ struct flow_rule *flow_rule_alloc(unsigned int num_actions) } EXPORT_SYMBOL(flow_rule_alloc); +struct flow_offload_action *offload_action_alloc(unsigned int num_actions) +{ + struct flow_offload_action *fl_action; + int i; + + fl_action = kzalloc(struct_size(fl_action, action.entries, num_actions), + GFP_KERNEL); + if (!fl_action) + return NULL; + + fl_action->action.num_entries = num_actions; + /* Pre-fill each action hw_stats with DONT_CARE. + * Caller can override this if it wants stats for a given action. + */ + for (i = 0; i < num_actions; i++) + fl_action->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE; + + return fl_action; +} + #define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \ const struct flow_match *__m = &(__rule)->match; \ struct flow_dissector *__d = (__m)->dissector; \ @@ -397,6 +418,8 @@ int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv) existing_qdiscs_register(cb, cb_priv); mutex_unlock(&flow_indr_block_lock); + tcf_action_reoffload_cb(cb, cb_priv, true); + return 0; } EXPORT_SYMBOL(flow_indr_dev_register); @@ -449,6 +472,7 @@ void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv, __flow_block_indr_cleanup(release, cb_priv, &cleanup_list); mutex_unlock(&flow_indr_block_lock); + tcf_action_reoffload_cb(cb, cb_priv, false); flow_block_indr_notify(&cleanup_list); kfree(indr_dev); } @@ -549,19 +573,25 @@ int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch, void (*cleanup)(struct flow_block_cb *block_cb)) { struct flow_indr_dev *this; + u32 count = 0; + int err; mutex_lock(&flow_indr_block_lock); + if (bo) { + if (bo->command == FLOW_BLOCK_BIND) + indir_dev_add(data, dev, sch, type, cleanup, bo); + else if (bo->command == FLOW_BLOCK_UNBIND) + indir_dev_remove(data); + } - if (bo->command == FLOW_BLOCK_BIND) - indir_dev_add(data, dev, sch, type, cleanup, bo); - else if (bo->command == FLOW_BLOCK_UNBIND) - indir_dev_remove(data); - - list_for_each_entry(this, &flow_block_indr_dev_list, list) - this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup); + list_for_each_entry(this, &flow_block_indr_dev_list, list) { + err = this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup); + if (!err) + count++; + } mutex_unlock(&flow_indr_block_lock); - return list_empty(&bo->cb_list) ? -EOPNOTSUPP : 0; + return (bo && list_empty(&bo->cb_list)) ? -EOPNOTSUPP : count; } EXPORT_SYMBOL(flow_indr_dev_setup_offload); diff --git a/net/core/gro.c b/net/core/gro.c new file mode 100644 index 000000000000..8ec8b44596da --- /dev/null +++ b/net/core/gro.c @@ -0,0 +1,766 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include <net/gro.h> +#include <net/dst_metadata.h> +#include <net/busy_poll.h> +#include <trace/events/net.h> + +#define MAX_GRO_SKBS 8 + +/* This should be increased if a protocol with a bigger head is added. */ +#define GRO_MAX_HEAD (MAX_HEADER + 128) + +static DEFINE_SPINLOCK(offload_lock); +static struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base); +/* Maximum number of GRO_NORMAL skbs to batch up for list-RX */ +int gro_normal_batch __read_mostly = 8; + +/** + * dev_add_offload - register offload handlers + * @po: protocol offload declaration + * + * Add protocol offload handlers to the networking stack. The passed + * &proto_offload is linked into kernel lists and may not be freed until + * it has been removed from the kernel lists. + * + * This call does not sleep therefore it can not + * guarantee all CPU's that are in middle of receiving packets + * will see the new offload handlers (until the next received packet). + */ +void dev_add_offload(struct packet_offload *po) +{ + struct packet_offload *elem; + + spin_lock(&offload_lock); + list_for_each_entry(elem, &offload_base, list) { + if (po->priority < elem->priority) + break; + } + list_add_rcu(&po->list, elem->list.prev); + spin_unlock(&offload_lock); +} +EXPORT_SYMBOL(dev_add_offload); + +/** + * __dev_remove_offload - remove offload handler + * @po: packet offload declaration + * + * Remove a protocol offload handler that was previously added to the + * kernel offload handlers by dev_add_offload(). The passed &offload_type + * is removed from the kernel lists and can be freed or reused once this + * function returns. + * + * The packet type might still be in use by receivers + * and must not be freed until after all the CPU's have gone + * through a quiescent state. + */ +static void __dev_remove_offload(struct packet_offload *po) +{ + struct list_head *head = &offload_base; + struct packet_offload *po1; + + spin_lock(&offload_lock); + + list_for_each_entry(po1, head, list) { + if (po == po1) { + list_del_rcu(&po->list); + goto out; + } + } + + pr_warn("dev_remove_offload: %p not found\n", po); +out: + spin_unlock(&offload_lock); +} + +/** + * dev_remove_offload - remove packet offload handler + * @po: packet offload declaration + * + * Remove a packet offload handler that was previously added to the kernel + * offload handlers by dev_add_offload(). The passed &offload_type is + * removed from the kernel lists and can be freed or reused once this + * function returns. + * + * This call sleeps to guarantee that no CPU is looking at the packet + * type after return. + */ +void dev_remove_offload(struct packet_offload *po) +{ + __dev_remove_offload(po); + + synchronize_net(); +} +EXPORT_SYMBOL(dev_remove_offload); + +/** + * skb_mac_gso_segment - mac layer segmentation handler. + * @skb: buffer to segment + * @features: features for the output path (see dev->features) + */ +struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, + netdev_features_t features) +{ + struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); + struct packet_offload *ptype; + int vlan_depth = skb->mac_len; + __be16 type = skb_network_protocol(skb, &vlan_depth); + + if (unlikely(!type)) + return ERR_PTR(-EINVAL); + + __skb_pull(skb, vlan_depth); + + rcu_read_lock(); + list_for_each_entry_rcu(ptype, &offload_base, list) { + if (ptype->type == type && ptype->callbacks.gso_segment) { + segs = ptype->callbacks.gso_segment(skb, features); + break; + } + } + rcu_read_unlock(); + + __skb_push(skb, skb->data - skb_mac_header(skb)); + + return segs; +} +EXPORT_SYMBOL(skb_mac_gso_segment); + +int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) +{ + struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); + unsigned int offset = skb_gro_offset(skb); + unsigned int headlen = skb_headlen(skb); + unsigned int len = skb_gro_len(skb); + unsigned int delta_truesize; + unsigned int new_truesize; + struct sk_buff *lp; + + if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush)) + return -E2BIG; + + lp = NAPI_GRO_CB(p)->last; + pinfo = skb_shinfo(lp); + + if (headlen <= offset) { + skb_frag_t *frag; + skb_frag_t *frag2; + int i = skbinfo->nr_frags; + int nr_frags = pinfo->nr_frags + i; + + if (nr_frags > MAX_SKB_FRAGS) + goto merge; + + offset -= headlen; + pinfo->nr_frags = nr_frags; + skbinfo->nr_frags = 0; + + frag = pinfo->frags + nr_frags; + frag2 = skbinfo->frags + i; + do { + *--frag = *--frag2; + } while (--i); + + skb_frag_off_add(frag, offset); + skb_frag_size_sub(frag, offset); + + /* all fragments truesize : remove (head size + sk_buff) */ + new_truesize = SKB_TRUESIZE(skb_end_offset(skb)); + delta_truesize = skb->truesize - new_truesize; + + skb->truesize = new_truesize; + skb->len -= skb->data_len; + skb->data_len = 0; + + NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; + goto done; + } else if (skb->head_frag) { + int nr_frags = pinfo->nr_frags; + skb_frag_t *frag = pinfo->frags + nr_frags; + struct page *page = virt_to_head_page(skb->head); + unsigned int first_size = headlen - offset; + unsigned int first_offset; + + if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) + goto merge; + + first_offset = skb->data - + (unsigned char *)page_address(page) + + offset; + + pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; + + __skb_frag_set_page(frag, page); + skb_frag_off_set(frag, first_offset); + skb_frag_size_set(frag, first_size); + + memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); + /* We dont need to clear skbinfo->nr_frags here */ + + new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff)); + delta_truesize = skb->truesize - new_truesize; + skb->truesize = new_truesize; + NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; + goto done; + } + +merge: + /* sk owenrship - if any - completely transferred to the aggregated packet */ + skb->destructor = NULL; + delta_truesize = skb->truesize; + if (offset > headlen) { + unsigned int eat = offset - headlen; + + skb_frag_off_add(&skbinfo->frags[0], eat); + skb_frag_size_sub(&skbinfo->frags[0], eat); + skb->data_len -= eat; + skb->len -= eat; + offset = headlen; + } + + __skb_pull(skb, offset); + + if (NAPI_GRO_CB(p)->last == p) + skb_shinfo(p)->frag_list = skb; + else + NAPI_GRO_CB(p)->last->next = skb; + NAPI_GRO_CB(p)->last = skb; + __skb_header_release(skb); + lp = p; + +done: + NAPI_GRO_CB(p)->count++; + p->data_len += len; + p->truesize += delta_truesize; + p->len += len; + if (lp != p) { + lp->data_len += len; + lp->truesize += delta_truesize; + lp->len += len; + } + NAPI_GRO_CB(skb)->same_flow = 1; + return 0; +} + + +static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) +{ + struct packet_offload *ptype; + __be16 type = skb->protocol; + struct list_head *head = &offload_base; + int err = -ENOENT; + + BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); + + if (NAPI_GRO_CB(skb)->count == 1) { + skb_shinfo(skb)->gso_size = 0; + goto out; + } + + rcu_read_lock(); + list_for_each_entry_rcu(ptype, head, list) { + if (ptype->type != type || !ptype->callbacks.gro_complete) + continue; + + err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete, + ipv6_gro_complete, inet_gro_complete, + skb, 0); + break; + } + rcu_read_unlock(); + + if (err) { + WARN_ON(&ptype->list == head); + kfree_skb(skb); + return; + } + +out: + gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count); +} + +static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, + bool flush_old) +{ + struct list_head *head = &napi->gro_hash[index].list; + struct sk_buff *skb, *p; + + list_for_each_entry_safe_reverse(skb, p, head, list) { + if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) + return; + skb_list_del_init(skb); + napi_gro_complete(napi, skb); + napi->gro_hash[index].count--; + } + + if (!napi->gro_hash[index].count) + __clear_bit(index, &napi->gro_bitmask); +} + +/* napi->gro_hash[].list contains packets ordered by age. + * youngest packets at the head of it. + * Complete skbs in reverse order to reduce latencies. + */ +void napi_gro_flush(struct napi_struct *napi, bool flush_old) +{ + unsigned long bitmask = napi->gro_bitmask; + unsigned int i, base = ~0U; + + while ((i = ffs(bitmask)) != 0) { + bitmask >>= i; + base += i; + __napi_gro_flush_chain(napi, base, flush_old); + } +} +EXPORT_SYMBOL(napi_gro_flush); + +static void gro_list_prepare(const struct list_head *head, + const struct sk_buff *skb) +{ + unsigned int maclen = skb->dev->hard_header_len; + u32 hash = skb_get_hash_raw(skb); + struct sk_buff *p; + + list_for_each_entry(p, head, list) { + unsigned long diffs; + + NAPI_GRO_CB(p)->flush = 0; + + if (hash != skb_get_hash_raw(p)) { + NAPI_GRO_CB(p)->same_flow = 0; + continue; + } + + diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; + diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb); + if (skb_vlan_tag_present(p)) + diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb); + diffs |= skb_metadata_differs(p, skb); + if (maclen == ETH_HLEN) + diffs |= compare_ether_header(skb_mac_header(p), + skb_mac_header(skb)); + else if (!diffs) + diffs = memcmp(skb_mac_header(p), + skb_mac_header(skb), + maclen); + + /* in most common scenarions 'slow_gro' is 0 + * otherwise we are already on some slower paths + * either skip all the infrequent tests altogether or + * avoid trying too hard to skip each of them individually + */ + if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) { +#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT) + struct tc_skb_ext *skb_ext; + struct tc_skb_ext *p_ext; +#endif + + diffs |= p->sk != skb->sk; + diffs |= skb_metadata_dst_cmp(p, skb); + diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb); + +#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT) + skb_ext = skb_ext_find(skb, TC_SKB_EXT); + p_ext = skb_ext_find(p, TC_SKB_EXT); + + diffs |= (!!p_ext) ^ (!!skb_ext); + if (!diffs && unlikely(skb_ext)) + diffs |= p_ext->chain ^ skb_ext->chain; +#endif + } + + NAPI_GRO_CB(p)->same_flow = !diffs; + } +} + +static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff) +{ + const struct skb_shared_info *pinfo = skb_shinfo(skb); + const skb_frag_t *frag0 = &pinfo->frags[0]; + + NAPI_GRO_CB(skb)->data_offset = 0; + NAPI_GRO_CB(skb)->frag0 = NULL; + NAPI_GRO_CB(skb)->frag0_len = 0; + + if (!skb_headlen(skb) && pinfo->nr_frags && + !PageHighMem(skb_frag_page(frag0)) && + (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) { + NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); + NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int, + skb_frag_size(frag0), + skb->end - skb->tail); + } +} + +static void gro_pull_from_frag0(struct sk_buff *skb, int grow) +{ + struct skb_shared_info *pinfo = skb_shinfo(skb); + + BUG_ON(skb->end - skb->tail < grow); + + memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); + + skb->data_len -= grow; + skb->tail += grow; + + skb_frag_off_add(&pinfo->frags[0], grow); + skb_frag_size_sub(&pinfo->frags[0], grow); + + if (unlikely(!skb_frag_size(&pinfo->frags[0]))) { + skb_frag_unref(skb, 0); + memmove(pinfo->frags, pinfo->frags + 1, + --pinfo->nr_frags * sizeof(pinfo->frags[0])); + } +} + +static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head) +{ + struct sk_buff *oldest; + + oldest = list_last_entry(head, struct sk_buff, list); + + /* We are called with head length >= MAX_GRO_SKBS, so this is + * impossible. + */ + if (WARN_ON_ONCE(!oldest)) + return; + + /* Do not adjust napi->gro_hash[].count, caller is adding a new + * SKB to the chain. + */ + skb_list_del_init(oldest); + napi_gro_complete(napi, oldest); +} + +static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) +{ + u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1); + struct gro_list *gro_list = &napi->gro_hash[bucket]; + struct list_head *head = &offload_base; + struct packet_offload *ptype; + __be16 type = skb->protocol; + struct sk_buff *pp = NULL; + enum gro_result ret; + int same_flow; + int grow; + + if (netif_elide_gro(skb->dev)) + goto normal; + + gro_list_prepare(&gro_list->list, skb); + + rcu_read_lock(); + list_for_each_entry_rcu(ptype, head, list) { + if (ptype->type != type || !ptype->callbacks.gro_receive) + continue; + + skb_set_network_header(skb, skb_gro_offset(skb)); + skb_reset_mac_len(skb); + NAPI_GRO_CB(skb)->same_flow = 0; + NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb); + NAPI_GRO_CB(skb)->free = 0; + NAPI_GRO_CB(skb)->encap_mark = 0; + NAPI_GRO_CB(skb)->recursion_counter = 0; + NAPI_GRO_CB(skb)->is_fou = 0; + NAPI_GRO_CB(skb)->is_atomic = 1; + NAPI_GRO_CB(skb)->gro_remcsum_start = 0; + + /* Setup for GRO checksum validation */ + switch (skb->ip_summed) { + case CHECKSUM_COMPLETE: + NAPI_GRO_CB(skb)->csum = skb->csum; + NAPI_GRO_CB(skb)->csum_valid = 1; + NAPI_GRO_CB(skb)->csum_cnt = 0; + break; + case CHECKSUM_UNNECESSARY: + NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1; + NAPI_GRO_CB(skb)->csum_valid = 0; + break; + default: + NAPI_GRO_CB(skb)->csum_cnt = 0; + NAPI_GRO_CB(skb)->csum_valid = 0; + } + + pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive, + ipv6_gro_receive, inet_gro_receive, + &gro_list->list, skb); + break; + } + rcu_read_unlock(); + + if (&ptype->list == head) + goto normal; + + if (PTR_ERR(pp) == -EINPROGRESS) { + ret = GRO_CONSUMED; + goto ok; + } + + same_flow = NAPI_GRO_CB(skb)->same_flow; + ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; + + if (pp) { + skb_list_del_init(pp); + napi_gro_complete(napi, pp); + gro_list->count--; + } + + if (same_flow) + goto ok; + + if (NAPI_GRO_CB(skb)->flush) + goto normal; + + if (unlikely(gro_list->count >= MAX_GRO_SKBS)) + gro_flush_oldest(napi, &gro_list->list); + else + gro_list->count++; + + NAPI_GRO_CB(skb)->count = 1; + NAPI_GRO_CB(skb)->age = jiffies; + NAPI_GRO_CB(skb)->last = skb; + skb_shinfo(skb)->gso_size = skb_gro_len(skb); + list_add(&skb->list, &gro_list->list); + ret = GRO_HELD; + +pull: + grow = skb_gro_offset(skb) - skb_headlen(skb); + if (grow > 0) + gro_pull_from_frag0(skb, grow); +ok: + if (gro_list->count) { + if (!test_bit(bucket, &napi->gro_bitmask)) + __set_bit(bucket, &napi->gro_bitmask); + } else if (test_bit(bucket, &napi->gro_bitmask)) { + __clear_bit(bucket, &napi->gro_bitmask); + } + + return ret; + +normal: + ret = GRO_NORMAL; + goto pull; +} + +struct packet_offload *gro_find_receive_by_type(__be16 type) +{ + struct list_head *offload_head = &offload_base; + struct packet_offload *ptype; + + list_for_each_entry_rcu(ptype, offload_head, list) { + if (ptype->type != type || !ptype->callbacks.gro_receive) + continue; + return ptype; + } + return NULL; +} +EXPORT_SYMBOL(gro_find_receive_by_type); + +struct packet_offload *gro_find_complete_by_type(__be16 type) +{ + struct list_head *offload_head = &offload_base; + struct packet_offload *ptype; + + list_for_each_entry_rcu(ptype, offload_head, list) { + if (ptype->type != type || !ptype->callbacks.gro_complete) + continue; + return ptype; + } + return NULL; +} +EXPORT_SYMBOL(gro_find_complete_by_type); + +static gro_result_t napi_skb_finish(struct napi_struct *napi, + struct sk_buff *skb, + gro_result_t ret) +{ + switch (ret) { + case GRO_NORMAL: + gro_normal_one(napi, skb, 1); + break; + + case GRO_MERGED_FREE: + if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) + napi_skb_free_stolen_head(skb); + else if (skb->fclone != SKB_FCLONE_UNAVAILABLE) + __kfree_skb(skb); + else + __kfree_skb_defer(skb); + break; + + case GRO_HELD: + case GRO_MERGED: + case GRO_CONSUMED: + break; + } + + return ret; +} + +gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) +{ + gro_result_t ret; + + skb_mark_napi_id(skb, napi); + trace_napi_gro_receive_entry(skb); + + skb_gro_reset_offset(skb, 0); + + ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb)); + trace_napi_gro_receive_exit(ret); + + return ret; +} +EXPORT_SYMBOL(napi_gro_receive); + +static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) +{ + if (unlikely(skb->pfmemalloc)) { + consume_skb(skb); + return; + } + __skb_pull(skb, skb_headlen(skb)); + /* restore the reserve we had after netdev_alloc_skb_ip_align() */ + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); + __vlan_hwaccel_clear_tag(skb); + skb->dev = napi->dev; + skb->skb_iif = 0; + + /* eth_type_trans() assumes pkt_type is PACKET_HOST */ + skb->pkt_type = PACKET_HOST; + + skb->encapsulation = 0; + skb_shinfo(skb)->gso_type = 0; + skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); + if (unlikely(skb->slow_gro)) { + skb_orphan(skb); + skb_ext_reset(skb); + nf_reset_ct(skb); + skb->slow_gro = 0; + } + + napi->skb = skb; +} + +struct sk_buff *napi_get_frags(struct napi_struct *napi) +{ + struct sk_buff *skb = napi->skb; + + if (!skb) { + skb = napi_alloc_skb(napi, GRO_MAX_HEAD); + if (skb) { + napi->skb = skb; + skb_mark_napi_id(skb, napi); + } + } + return skb; +} +EXPORT_SYMBOL(napi_get_frags); + +static gro_result_t napi_frags_finish(struct napi_struct *napi, + struct sk_buff *skb, + gro_result_t ret) +{ + switch (ret) { + case GRO_NORMAL: + case GRO_HELD: + __skb_push(skb, ETH_HLEN); + skb->protocol = eth_type_trans(skb, skb->dev); + if (ret == GRO_NORMAL) + gro_normal_one(napi, skb, 1); + break; + + case GRO_MERGED_FREE: + if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) + napi_skb_free_stolen_head(skb); + else + napi_reuse_skb(napi, skb); + break; + + case GRO_MERGED: + case GRO_CONSUMED: + break; + } + + return ret; +} + +/* Upper GRO stack assumes network header starts at gro_offset=0 + * Drivers could call both napi_gro_frags() and napi_gro_receive() + * We copy ethernet header into skb->data to have a common layout. + */ +static struct sk_buff *napi_frags_skb(struct napi_struct *napi) +{ + struct sk_buff *skb = napi->skb; + const struct ethhdr *eth; + unsigned int hlen = sizeof(*eth); + + napi->skb = NULL; + + skb_reset_mac_header(skb); + skb_gro_reset_offset(skb, hlen); + + if (unlikely(skb_gro_header_hard(skb, hlen))) { + eth = skb_gro_header_slow(skb, hlen, 0); + if (unlikely(!eth)) { + net_warn_ratelimited("%s: dropping impossible skb from %s\n", + __func__, napi->dev->name); + napi_reuse_skb(napi, skb); + return NULL; + } + } else { + eth = (const struct ethhdr *)skb->data; + gro_pull_from_frag0(skb, hlen); + NAPI_GRO_CB(skb)->frag0 += hlen; + NAPI_GRO_CB(skb)->frag0_len -= hlen; + } + __skb_pull(skb, hlen); + + /* + * This works because the only protocols we care about don't require + * special handling. + * We'll fix it up properly in napi_frags_finish() + */ + skb->protocol = eth->h_proto; + + return skb; +} + +gro_result_t napi_gro_frags(struct napi_struct *napi) +{ + gro_result_t ret; + struct sk_buff *skb = napi_frags_skb(napi); + + trace_napi_gro_frags_entry(skb); + + ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb)); + trace_napi_gro_frags_exit(ret); + + return ret; +} +EXPORT_SYMBOL(napi_gro_frags); + +/* Compute the checksum from gro_offset and return the folded value + * after adding in any pseudo checksum. + */ +__sum16 __skb_gro_checksum_complete(struct sk_buff *skb) +{ + __wsum wsum; + __sum16 sum; + + wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0); + + /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */ + sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum)); + /* See comments in __skb_checksum_complete(). */ + if (likely(!sum)) { + if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && + !skb->csum_complete_sw) + netdev_rx_csum_fault(skb->dev, skb); + } + + NAPI_GRO_CB(skb)->csum = wsum; + NAPI_GRO_CB(skb)->csum_valid = 1; + + return sum; +} +EXPORT_SYMBOL(__skb_gro_checksum_complete); diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 1a455847da54..b0f5344d1185 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -55,7 +55,7 @@ static void rfc2863_policy(struct net_device *dev) if (operstate == dev->operstate) return; - write_lock_bh(&dev_base_lock); + write_lock(&dev_base_lock); switch(dev->link_mode) { case IF_LINK_MODE_TESTING: @@ -74,7 +74,7 @@ static void rfc2863_policy(struct net_device *dev) dev->operstate = operstate; - write_unlock_bh(&dev_base_lock); + write_unlock(&dev_base_lock); } @@ -109,7 +109,7 @@ static void linkwatch_add_event(struct net_device *dev) spin_lock_irqsave(&lweventlist_lock, flags); if (list_empty(&dev->link_watch_list)) { list_add_tail(&dev->link_watch_list, &lweventlist); - dev_hold(dev); + dev_hold_track(dev, &dev->linkwatch_dev_tracker, GFP_ATOMIC); } spin_unlock_irqrestore(&lweventlist_lock, flags); } @@ -166,6 +166,9 @@ static void linkwatch_do_dev(struct net_device *dev) netdev_state_change(dev); } + /* Note: our callers are responsible for + * calling netdev_tracker_free(). + */ dev_put(dev); } @@ -209,6 +212,10 @@ static void __linkwatch_run_queue(int urgent_only) list_add_tail(&dev->link_watch_list, &lweventlist); continue; } + /* We must free netdev tracker under + * the spinlock protection. + */ + netdev_tracker_free(dev, &dev->linkwatch_dev_tracker); spin_unlock_irq(&lweventlist_lock); linkwatch_do_dev(dev); do_dev--; @@ -232,6 +239,10 @@ void linkwatch_forget_dev(struct net_device *dev) if (!list_empty(&dev->link_watch_list)) { list_del_init(&dev->link_watch_list); clean = 1; + /* We must release netdev tracker under + * the spinlock protection. + */ + netdev_tracker_free(dev, &dev->linkwatch_dev_tracker); } spin_unlock_irqrestore(&lweventlist_lock, flags); if (clean) diff --git a/net/core/neighbour.c b/net/core/neighbour.c index dda12fbd177b..213cb7b26b7a 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -624,7 +624,7 @@ ___neigh_create(struct neigh_table *tbl, const void *pkey, memcpy(n->primary_key, pkey, key_len); n->dev = dev; - dev_hold(dev); + dev_hold_track(dev, &n->dev_tracker, GFP_ATOMIC); /* Protocol specific setup. */ if (tbl->constructor && (error = tbl->constructor(n)) < 0) { @@ -770,10 +770,10 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, write_pnet(&n->net, net); memcpy(n->key, pkey, key_len); n->dev = dev; - dev_hold(dev); + dev_hold_track(dev, &n->dev_tracker, GFP_KERNEL); if (tbl->pconstructor && tbl->pconstructor(n)) { - dev_put(dev); + dev_put_track(dev, &n->dev_tracker); kfree(n); n = NULL; goto out; @@ -805,7 +805,7 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey, write_unlock_bh(&tbl->lock); if (tbl->pdestructor) tbl->pdestructor(n); - dev_put(n->dev); + dev_put_track(n->dev, &n->dev_tracker); kfree(n); return 0; } @@ -838,7 +838,7 @@ static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, n->next = NULL; if (tbl->pdestructor) tbl->pdestructor(n); - dev_put(n->dev); + dev_put_track(n->dev, &n->dev_tracker); kfree(n); } return -ENOENT; @@ -879,7 +879,7 @@ void neigh_destroy(struct neighbour *neigh) if (dev->netdev_ops->ndo_neigh_destroy) dev->netdev_ops->ndo_neigh_destroy(dev, neigh); - dev_put(dev); + dev_put_track(dev, &neigh->dev_tracker); neigh_parms_put(neigh->parms); neigh_dbg(2, "neigh %p is destroyed\n", neigh); @@ -1665,13 +1665,13 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev, refcount_set(&p->refcnt, 1); p->reachable_time = neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); - dev_hold(dev); + dev_hold_track(dev, &p->dev_tracker, GFP_KERNEL); p->dev = dev; write_pnet(&p->net, net); p->sysctl_table = NULL; if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) { - dev_put(dev); + dev_put_track(dev, &p->dev_tracker); kfree(p); return NULL; } @@ -1702,7 +1702,7 @@ void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms) list_del(&parms->list); parms->dead = 1; write_unlock_bh(&tbl->lock); - dev_put(parms->dev); + dev_put_track(parms->dev, &parms->dev_tracker); call_rcu(&parms->rcu_head, neigh_rcu_free_parms); } EXPORT_SYMBOL(neigh_parms_release); @@ -3770,10 +3770,6 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, neigh_proc_base_reachable_time; } - /* Don't export sysctls to unprivileged users */ - if (neigh_parms_net(p)->user_ns != &init_user_ns) - t->neigh_vars[0].procname = NULL; - switch (neigh_parms_family(p)) { case AF_INET: p_name = "ipv4"; diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 9c01c642cf9e..53ea262ecafd 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -488,14 +488,6 @@ static ssize_t proto_down_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { - struct net_device *netdev = to_net_dev(dev); - - /* The check is also done in change_proto_down; this helps returning - * early without hitting the trylock/restart in netdev_store. - */ - if (!netdev->netdev_ops->ndo_change_proto_down) - return -EOPNOTSUPP; - return netdev_store(dev, attr, buf, len, change_proto_down); } NETDEVICE_SHOW_RW(proto_down, fmt_dec); @@ -1012,7 +1004,7 @@ static void rx_queue_release(struct kobject *kobj) #endif memset(kobj, 0, sizeof(*kobj)); - dev_put(queue->dev); + dev_put_track(queue->dev, &queue->dev_tracker); } static const void *rx_queue_namespace(struct kobject *kobj) @@ -1052,7 +1044,7 @@ static int rx_queue_add_kobject(struct net_device *dev, int index) /* Kobject_put later will trigger rx_queue_release call which * decreases dev refcount: Take that reference here */ - dev_hold(queue->dev); + dev_hold_track(queue->dev, &queue->dev_tracker, GFP_KERNEL); kobj->kset = dev->queues_kset; error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, @@ -1201,11 +1193,7 @@ static const struct sysfs_ops netdev_queue_sysfs_ops = { static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf) { - unsigned long trans_timeout; - - spin_lock_irq(&queue->_xmit_lock); - trans_timeout = queue->trans_timeout; - spin_unlock_irq(&queue->_xmit_lock); + unsigned long trans_timeout = atomic_long_read(&queue->trans_timeout); return sprintf(buf, fmt_ulong, trans_timeout); } @@ -1452,7 +1440,7 @@ static ssize_t xps_queue_show(struct net_device *dev, unsigned int index, for (i = map->len; i--;) { if (map->queues[i] == index) { - set_bit(j, mask); + __set_bit(j, mask); break; } } @@ -1619,7 +1607,7 @@ static void netdev_queue_release(struct kobject *kobj) struct netdev_queue *queue = to_netdev_queue(kobj); memset(kobj, 0, sizeof(*kobj)); - dev_put(queue->dev); + dev_put_track(queue->dev, &queue->dev_tracker); } static const void *netdev_queue_namespace(struct kobject *kobj) @@ -1659,7 +1647,7 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index) /* Kobject_put later will trigger netdev_queue_release call * which decreases dev refcount: Take that reference here */ - dev_hold(queue->dev); + dev_hold_track(queue->dev, &queue->dev_tracker, GFP_KERNEL); kobj->kset = dev->queues_kset; error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL, @@ -1706,6 +1694,13 @@ netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) int i; int error = 0; + /* Tx queue kobjects are allowed to be updated when a device is being + * unregistered, but solely to remove queues from qdiscs. Any path + * adding queues should be fixed. + */ + WARN(dev->reg_state == NETREG_UNREGISTERING && new_num > old_num, + "New queues can't be registered after device unregistration."); + for (i = old_num; i < new_num; i++) { error = netdev_queue_add_kobject(dev, i); if (error) { @@ -1820,6 +1815,9 @@ static void remove_queue_kobjects(struct net_device *dev) net_rx_queue_update_kobjects(dev, real_rx, 0); netdev_queue_update_kobjects(dev, real_tx, 0); + + dev->real_num_rx_queues = 0; + dev->real_num_tx_queues = 0; #ifdef CONFIG_SYSFS kset_unregister(dev->queues_kset); #endif diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 202fa5eacd0f..9b7171c40434 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -311,6 +311,8 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) LIST_HEAD(net_exit_list); refcount_set(&net->ns.count, 1); + ref_tracker_dir_init(&net->refcnt_tracker, 128); + refcount_set(&net->passive, 1); get_random_bytes(&net->hash_mix, sizeof(u32)); preempt_disable(); @@ -635,6 +637,7 @@ static DECLARE_WORK(net_cleanup_work, cleanup_net); void __put_net(struct net *net) { + ref_tracker_dir_exit(&net->refcnt_tracker); /* Cleanup the network namespace in process context */ if (llist_add(&net->cleanup_list, &cleanup_list)) queue_work(netns_wq, &net_cleanup_work); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index edfc0f8011f8..db724463e7cd 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -776,7 +776,7 @@ put_noaddr: err = __netpoll_setup(np, ndev); if (err) goto put; - + netdev_tracker_alloc(ndev, &np->dev_tracker, GFP_KERNEL); rtnl_unlock(); return 0; @@ -853,7 +853,7 @@ void netpoll_cleanup(struct netpoll *np) if (!np->dev) goto out; __netpoll_cleanup(np); - dev_put(np->dev); + dev_put_track(np->dev, &np->dev_tracker); np->dev = NULL; out: rtnl_unlock(); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index a3d74e2704c4..560a5e712dc3 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -410,6 +410,7 @@ struct pktgen_dev { * device name (not when the inject is * started as it used to do.) */ + netdevice_tracker dev_tracker; char odevname[32]; struct flow_state *flows; unsigned int cflows; /* Concurrent flows (config) */ @@ -2099,7 +2100,7 @@ static int pktgen_setup_dev(const struct pktgen_net *pn, /* Clean old setups */ if (pkt_dev->odev) { - dev_put(pkt_dev->odev); + dev_put_track(pkt_dev->odev, &pkt_dev->dev_tracker); pkt_dev->odev = NULL; } @@ -2117,6 +2118,7 @@ static int pktgen_setup_dev(const struct pktgen_net *pn, err = -ENETDOWN; } else { pkt_dev->odev = odev; + netdev_tracker_alloc(odev, &pkt_dev->dev_tracker, GFP_KERNEL); return 0; } @@ -3805,7 +3807,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname) return add_dev_to_thread(t, pkt_dev); out2: - dev_put(pkt_dev->odev); + dev_put_track(pkt_dev->odev, &pkt_dev->dev_tracker); out1: #ifdef CONFIG_XFRM free_SAs(pkt_dev); @@ -3899,7 +3901,7 @@ static int pktgen_remove_device(struct pktgen_thread *t, /* Dis-associate from the interface */ if (pkt_dev->odev) { - dev_put(pkt_dev->odev); + dev_put_track(pkt_dev->odev, &pkt_dev->dev_tracker); pkt_dev->odev = NULL; } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 2af8aeeadadf..d6eba554b137 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -842,9 +842,9 @@ static void set_operstate(struct net_device *dev, unsigned char transition) } if (dev->operstate != operstate) { - write_lock_bh(&dev_base_lock); + write_lock(&dev_base_lock); dev->operstate = operstate; - write_unlock_bh(&dev_base_lock); + write_unlock(&dev_base_lock); netdev_state_change(dev); } } @@ -2539,13 +2539,12 @@ static int do_set_proto_down(struct net_device *dev, struct netlink_ext_ack *extack) { struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1]; - const struct net_device_ops *ops = dev->netdev_ops; unsigned long mask = 0; u32 value; bool proto_down; int err; - if (!ops->ndo_change_proto_down) { + if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) { NL_SET_ERR_MSG(extack, "Protodown not supported by device"); return -EOPNOTSUPP; } @@ -2768,7 +2767,7 @@ static int do_setlink(const struct sk_buff *skb, } if (dev->gso_max_segs ^ max_segs) { - dev->gso_max_segs = max_segs; + netif_set_gso_max_segs(dev, max_segs); status |= DO_SETLINK_MODIFIED; } } @@ -2779,11 +2778,11 @@ static int do_setlink(const struct sk_buff *skb, if (tb[IFLA_LINKMODE]) { unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]); - write_lock_bh(&dev_base_lock); + write_lock(&dev_base_lock); if (dev->link_mode ^ value) status |= DO_SETLINK_NOTIFY; dev->link_mode = value; - write_unlock_bh(&dev_base_lock); + write_unlock(&dev_base_lock); } if (tb[IFLA_VFINFO_LIST]) { @@ -3222,7 +3221,7 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname, if (tb[IFLA_GSO_MAX_SIZE]) netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE])); if (tb[IFLA_GSO_MAX_SEGS]) - dev->gso_max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]); + netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS])); return dev; } diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c index b5bc680d4755..9b8443774449 100644 --- a/net/core/secure_seq.c +++ b/net/core/secure_seq.c @@ -19,8 +19,8 @@ #include <linux/in6.h> #include <net/tcp.h> -static siphash_key_t net_secret __read_mostly; -static siphash_key_t ts_secret __read_mostly; +static siphash_aligned_key_t net_secret; +static siphash_aligned_key_t ts_secret; static __always_inline void net_secret_init(void) { diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 909db87d7383..275f7b8416fe 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -992,12 +992,10 @@ void napi_consume_skb(struct sk_buff *skb, int budget) } EXPORT_SYMBOL(napi_consume_skb); -/* Make sure a field is enclosed inside headers_start/headers_end section */ +/* Make sure a field is contained by headers group */ #define CHECK_SKB_FIELD(field) \ - BUILD_BUG_ON(offsetof(struct sk_buff, field) < \ - offsetof(struct sk_buff, headers_start)); \ - BUILD_BUG_ON(offsetof(struct sk_buff, field) > \ - offsetof(struct sk_buff, headers_end)); \ + BUILD_BUG_ON(offsetof(struct sk_buff, field) != \ + offsetof(struct sk_buff, headers.field)); \ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) { @@ -1009,14 +1007,12 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) __skb_ext_copy(new, old); __nf_copy(new, old, false); - /* Note : this field could be in headers_start/headers_end section + /* Note : this field could be in the headers group. * It is not yet because we do not want to have a 16 bit hole */ new->queue_mapping = old->queue_mapping; - memcpy(&new->headers_start, &old->headers_start, - offsetof(struct sk_buff, headers_end) - - offsetof(struct sk_buff, headers_start)); + memcpy(&new->headers, &old->headers, sizeof(new->headers)); CHECK_SKB_FIELD(protocol); CHECK_SKB_FIELD(csum); CHECK_SKB_FIELD(hash); @@ -3919,32 +3915,6 @@ err_linearize: } EXPORT_SYMBOL_GPL(skb_segment_list); -int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb) -{ - if (unlikely(p->len + skb->len >= 65536)) - return -E2BIG; - - if (NAPI_GRO_CB(p)->last == p) - skb_shinfo(p)->frag_list = skb; - else - NAPI_GRO_CB(p)->last->next = skb; - - skb_pull(skb, skb_gro_offset(skb)); - - NAPI_GRO_CB(p)->last = skb; - NAPI_GRO_CB(p)->count++; - p->data_len += skb->len; - - /* sk owenrship - if any - completely transferred to the aggregated packet */ - skb->destructor = NULL; - p->truesize += skb->truesize; - p->len += skb->len; - - NAPI_GRO_CB(skb)->same_flow = 1; - - return 0; -} - /** * skb_segment - Perform protocol segmentation on skb. * @head_skb: buffer to segment @@ -4297,122 +4267,6 @@ err: } EXPORT_SYMBOL_GPL(skb_segment); -int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) -{ - struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); - unsigned int offset = skb_gro_offset(skb); - unsigned int headlen = skb_headlen(skb); - unsigned int len = skb_gro_len(skb); - unsigned int delta_truesize; - unsigned int new_truesize; - struct sk_buff *lp; - - if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush)) - return -E2BIG; - - lp = NAPI_GRO_CB(p)->last; - pinfo = skb_shinfo(lp); - - if (headlen <= offset) { - skb_frag_t *frag; - skb_frag_t *frag2; - int i = skbinfo->nr_frags; - int nr_frags = pinfo->nr_frags + i; - - if (nr_frags > MAX_SKB_FRAGS) - goto merge; - - offset -= headlen; - pinfo->nr_frags = nr_frags; - skbinfo->nr_frags = 0; - - frag = pinfo->frags + nr_frags; - frag2 = skbinfo->frags + i; - do { - *--frag = *--frag2; - } while (--i); - - skb_frag_off_add(frag, offset); - skb_frag_size_sub(frag, offset); - - /* all fragments truesize : remove (head size + sk_buff) */ - new_truesize = SKB_TRUESIZE(skb_end_offset(skb)); - delta_truesize = skb->truesize - new_truesize; - - skb->truesize = new_truesize; - skb->len -= skb->data_len; - skb->data_len = 0; - - NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; - goto done; - } else if (skb->head_frag) { - int nr_frags = pinfo->nr_frags; - skb_frag_t *frag = pinfo->frags + nr_frags; - struct page *page = virt_to_head_page(skb->head); - unsigned int first_size = headlen - offset; - unsigned int first_offset; - - if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) - goto merge; - - first_offset = skb->data - - (unsigned char *)page_address(page) + - offset; - - pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; - - __skb_frag_set_page(frag, page); - skb_frag_off_set(frag, first_offset); - skb_frag_size_set(frag, first_size); - - memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); - /* We dont need to clear skbinfo->nr_frags here */ - - new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff)); - delta_truesize = skb->truesize - new_truesize; - skb->truesize = new_truesize; - NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; - goto done; - } - -merge: - /* sk owenrship - if any - completely transferred to the aggregated packet */ - skb->destructor = NULL; - delta_truesize = skb->truesize; - if (offset > headlen) { - unsigned int eat = offset - headlen; - - skb_frag_off_add(&skbinfo->frags[0], eat); - skb_frag_size_sub(&skbinfo->frags[0], eat); - skb->data_len -= eat; - skb->len -= eat; - offset = headlen; - } - - __skb_pull(skb, offset); - - if (NAPI_GRO_CB(p)->last == p) - skb_shinfo(p)->frag_list = skb; - else - NAPI_GRO_CB(p)->last->next = skb; - NAPI_GRO_CB(p)->last = skb; - __skb_header_release(skb); - lp = p; - -done: - NAPI_GRO_CB(p)->count++; - p->data_len += len; - p->truesize += delta_truesize; - p->len += len; - if (lp != p) { - lp->data_len += len; - lp->truesize += delta_truesize; - lp->len += len; - } - NAPI_GRO_CB(skb)->same_flow = 1; - return 0; -} - #ifdef CONFIG_SKB_EXTENSIONS #define SKB_EXT_ALIGN_VALUE 8 #define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE) @@ -4849,8 +4703,7 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb, serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { serr->ee.ee_data = skb_shinfo(skb)->tskey; - if (sk->sk_protocol == IPPROTO_TCP && - sk->sk_type == SOCK_STREAM) + if (sk_is_tcp(sk)) serr->ee.ee_data -= sk->sk_tskey; } @@ -4919,8 +4772,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, if (tsonly) { #ifdef CONFIG_INET if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) && - sk->sk_protocol == IPPROTO_TCP && - sk->sk_type == SOCK_STREAM) { + sk_is_tcp(sk)) { skb = tcp_get_timestamping_opt_stats(sk, orig_skb, ack_skb); opt_stats = true; diff --git a/net/core/sock.c b/net/core/sock.c index 41e91d0f7061..e21485ab285d 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -144,8 +144,6 @@ static DEFINE_MUTEX(proto_list_mutex); static LIST_HEAD(proto_list); -static void sock_inuse_add(struct net *net, int val); - /** * sk_ns_capable - General socket capability test * @sk: Socket to use a capability on or through @@ -327,7 +325,10 @@ int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) BUG_ON(!sock_flag(sk, SOCK_MEMALLOC)); noreclaim_flag = memalloc_noreclaim_save(); - ret = sk->sk_backlog_rcv(sk, skb); + ret = INDIRECT_CALL_INET(sk->sk_backlog_rcv, + tcp_v6_do_rcv, + tcp_v4_do_rcv, + sk, skb); memalloc_noreclaim_restore(noreclaim_flag); return ret; @@ -872,8 +873,7 @@ int sock_set_timestamping(struct sock *sk, int optname, if (val & SOF_TIMESTAMPING_OPT_ID && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { - if (sk->sk_protocol == IPPROTO_TCP && - sk->sk_type == SOCK_STREAM) { + if (sk_is_tcp(sk)) { if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) return -EINVAL; @@ -1135,6 +1135,7 @@ set_sndbuf: case SO_PRIORITY: if ((val >= 0 && val <= 6) || + ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) || ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) sk->sk_priority = val; else @@ -1280,7 +1281,8 @@ set_sndbuf: clear_bit(SOCK_PASSSEC, &sock->flags); break; case SO_MARK: - if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { + if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) && + !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { ret = -EPERM; break; } @@ -1370,8 +1372,7 @@ set_sndbuf: case SO_ZEROCOPY: if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) { - if (!((sk->sk_type == SOCK_STREAM && - sk->sk_protocol == IPPROTO_TCP) || + if (!(sk_is_tcp(sk) || (sk->sk_type == SOCK_DGRAM && sk->sk_protocol == IPPROTO_UDP))) ret = -ENOTSUPP; @@ -1982,7 +1983,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, sock_lock_init(sk); sk->sk_net_refcnt = kern ? 0 : 1; if (likely(sk->sk_net_refcnt)) { - get_net(net); + get_net_track(net, &sk->ns_tracker, priority); sock_inuse_add(net, 1); } @@ -2038,7 +2039,7 @@ static void __sk_destruct(struct rcu_head *head) put_pid(sk->sk_peer_pid); if (likely(sk->sk_net_refcnt)) - put_net(sock_net(sk)); + put_net_track(sock_net(sk), &sk->ns_tracker); sk_prot_free(sk->sk_prot_creator, sk); } @@ -2125,7 +2126,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) /* SANITY */ if (likely(newsk->sk_net_refcnt)) { - get_net(sock_net(newsk)); + get_net_track(sock_net(newsk), &newsk->ns_tracker, priority); sock_inuse_add(sock_net(newsk), 1); } sk_node_init(&newsk->sk_node); @@ -2246,17 +2247,22 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) u32 max_segs = 1; sk_dst_set(sk, dst); - sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps; + sk->sk_route_caps = dst->dev->features; + if (sk_is_tcp(sk)) + sk->sk_route_caps |= NETIF_F_GSO; if (sk->sk_route_caps & NETIF_F_GSO) sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; - sk->sk_route_caps &= ~sk->sk_route_nocaps; + if (unlikely(sk->sk_gso_disabled)) + sk->sk_route_caps &= ~NETIF_F_GSO_MASK; if (sk_can_gso(sk)) { if (dst->header_len && !xfrm_dst_offload_ok(dst)) { sk->sk_route_caps &= ~NETIF_F_GSO_MASK; } else { sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; - sk->sk_gso_max_size = dst->dev->gso_max_size; - max_segs = max_t(u32, dst->dev->gso_max_segs, 1); + /* pairs with the WRITE_ONCE() in netif_set_gso_max_size() */ + sk->sk_gso_max_size = READ_ONCE(dst->dev->gso_max_size); + /* pairs with the WRITE_ONCE() in netif_set_gso_max_segs() */ + max_segs = max_t(u32, READ_ONCE(dst->dev->gso_max_segs), 1); } } sk->sk_gso_max_segs = max_segs; @@ -3286,7 +3292,7 @@ void lock_sock_nested(struct sock *sk, int subclass) might_sleep(); spin_lock_bh(&sk->sk_lock.slock); - if (sk->sk_lock.owned) + if (sock_owned_by_user_nocheck(sk)) __lock_sock(sk); sk->sk_lock.owned = 1; spin_unlock_bh(&sk->sk_lock.slock); @@ -3317,7 +3323,7 @@ bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock) might_sleep(); spin_lock_bh(&sk->sk_lock.slock); - if (!sk->sk_lock.owned) { + if (!sock_owned_by_user_nocheck(sk)) { /* * Fast path return with bottom halves disabled and * sock::sk_lock.slock held. @@ -3532,19 +3538,8 @@ void sk_get_meminfo(const struct sock *sk, u32 *mem) } #ifdef CONFIG_PROC_FS -#define PROTO_INUSE_NR 64 /* should be enough for the first time */ -struct prot_inuse { - int val[PROTO_INUSE_NR]; -}; - static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); -void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) -{ - __this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val); -} -EXPORT_SYMBOL_GPL(sock_prot_inuse_add); - int sock_prot_inuse_get(struct net *net, struct proto *prot) { int cpu, idx = prot->inuse_idx; @@ -3557,17 +3552,12 @@ int sock_prot_inuse_get(struct net *net, struct proto *prot) } EXPORT_SYMBOL_GPL(sock_prot_inuse_get); -static void sock_inuse_add(struct net *net, int val) -{ - this_cpu_add(*net->core.sock_inuse, val); -} - int sock_inuse_get(struct net *net) { int cpu, res = 0; for_each_possible_cpu(cpu) - res += *per_cpu_ptr(net->core.sock_inuse, cpu); + res += per_cpu_ptr(net->core.prot_inuse, cpu)->all; return res; } @@ -3579,22 +3569,12 @@ static int __net_init sock_inuse_init_net(struct net *net) net->core.prot_inuse = alloc_percpu(struct prot_inuse); if (net->core.prot_inuse == NULL) return -ENOMEM; - - net->core.sock_inuse = alloc_percpu(int); - if (net->core.sock_inuse == NULL) - goto out; - return 0; - -out: - free_percpu(net->core.prot_inuse); - return -ENOMEM; } static void __net_exit sock_inuse_exit_net(struct net *net) { free_percpu(net->core.prot_inuse); - free_percpu(net->core.sock_inuse); } static struct pernet_operations net_inuse_ops = { @@ -3640,9 +3620,6 @@ static inline void release_proto_idx(struct proto *prot) { } -static void sock_inuse_add(struct net *net, int val) -{ -} #endif static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot) diff --git a/net/core/xdp.c b/net/core/xdp.c index 5ddc29f29bad..7fe1df85f505 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -159,6 +159,11 @@ static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq) int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, struct net_device *dev, u32 queue_index, unsigned int napi_id) { + if (!dev) { + WARN(1, "Missing net_device from driver"); + return -ENODEV; + } + if (xdp_rxq->reg_state == REG_STATE_UNUSED) { WARN(1, "Driver promised not to register this"); return -EINVAL; @@ -169,11 +174,6 @@ int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, xdp_rxq_info_unreg(xdp_rxq); } - if (!dev) { - WARN(1, "Missing net_device from driver"); - return -ENODEV; - } - /* State either UNREGISTERED or NEW */ xdp_rxq_info_init(xdp_rxq); xdp_rxq->dev = dev; diff --git a/net/dccp/proto.c b/net/dccp/proto.c index fc44dadc778b..a976b4d29892 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -238,17 +238,6 @@ void dccp_destroy_sock(struct sock *sk) EXPORT_SYMBOL_GPL(dccp_destroy_sock); -static inline int dccp_listen_start(struct sock *sk, int backlog) -{ - struct dccp_sock *dp = dccp_sk(sk); - - dp->dccps_role = DCCP_ROLE_LISTEN; - /* do not start to listen if feature negotiation setup fails */ - if (dccp_feat_finalise_settings(dp)) - return -EPROTO; - return inet_csk_listen_start(sk, backlog); -} - static inline int dccp_need_reset(int state) { return state != DCCP_CLOSED && state != DCCP_LISTEN && @@ -931,11 +920,17 @@ int inet_dccp_listen(struct socket *sock, int backlog) * we can only allow the backlog to be adjusted. */ if (old_state != DCCP_LISTEN) { - /* - * FIXME: here it probably should be sk->sk_prot->listen_start - * see tcp_listen_start - */ - err = dccp_listen_start(sk, backlog); + struct dccp_sock *dp = dccp_sk(sk); + + dp->dccps_role = DCCP_ROLE_LISTEN; + + /* do not start to listen if feature negotiation setup fails */ + if (dccp_feat_finalise_settings(dp)) { + err = -EPROTO; + goto out; + } + + err = inet_csk_listen_start(sk); if (err) goto out; } diff --git a/net/dccp/trace.h b/net/dccp/trace.h index 5062421beee9..5a43b3508c7f 100644 --- a/net/dccp/trace.h +++ b/net/dccp/trace.h @@ -60,9 +60,7 @@ TRACE_EVENT(dccp_probe, __entry->tx_t_ipi = hc->tx_t_ipi; } else { __entry->tx_s = 0; - memset(&__entry->tx_rtt, 0, (void *)&__entry->tx_t_ipi - - (void *)&__entry->tx_rtt + - sizeof(__entry->tx_t_ipi)); + memset_startat(__entry, 0, tx_rtt); } ), diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c index 4a4e3c17740c..ee73057529cf 100644 --- a/net/decnet/dn_rules.c +++ b/net/decnet/dn_rules.c @@ -101,10 +101,6 @@ errout: return err; } -static const struct nla_policy dn_fib_rule_policy[FRA_MAX+1] = { - FRA_GENERIC_POLICY, -}; - static int dn_fib_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) { struct dn_fib_rule *r = (struct dn_fib_rule *)rule; @@ -235,7 +231,6 @@ static const struct fib_rules_ops __net_initconst dn_fib_rules_ops_template = { .fill = dn_fib_rule_fill, .flush_cache = dn_fib_rule_flush_cache, .nlgroup = RTNLGRP_DECnet_RULE, - .policy = dn_fib_rule_policy, .owner = THIS_MODULE, .fro_net = &init_net, }; diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index ea5169e671ae..d9d0d227092c 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -406,7 +406,7 @@ EXPORT_SYMBOL_GPL(dsa_devlink_resource_register); void dsa_devlink_resources_unregister(struct dsa_switch *ds) { - devlink_resources_unregister(ds->devlink, NULL); + devlink_resources_unregister(ds->devlink); } EXPORT_SYMBOL_GPL(dsa_devlink_resources_unregister); diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 826957b6442b..c18b22c0bf55 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -129,35 +129,52 @@ void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag) } } +struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst, + const struct net_device *br) +{ + struct dsa_port *dp; + + list_for_each_entry(dp, &dst->ports, list) + if (dsa_port_bridge_dev_get(dp) == br) + return dp->bridge; + + return NULL; +} + static int dsa_bridge_num_find(const struct net_device *bridge_dev) { struct dsa_switch_tree *dst; - struct dsa_port *dp; - /* When preparing the offload for a port, it will have a valid - * dp->bridge_dev pointer but a not yet valid dp->bridge_num. - * However there might be other ports having the same dp->bridge_dev - * and a valid dp->bridge_num, so just ignore this port. - */ - list_for_each_entry(dst, &dsa_tree_list, list) - list_for_each_entry(dp, &dst->ports, list) - if (dp->bridge_dev == bridge_dev && - dp->bridge_num != -1) - return dp->bridge_num; + list_for_each_entry(dst, &dsa_tree_list, list) { + struct dsa_bridge *bridge; + + bridge = dsa_tree_bridge_find(dst, bridge_dev); + if (bridge) + return bridge->num; + } - return -1; + return 0; } -int dsa_bridge_num_get(const struct net_device *bridge_dev, int max) +unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max) { - int bridge_num = dsa_bridge_num_find(bridge_dev); + unsigned int bridge_num = dsa_bridge_num_find(bridge_dev); + + /* Switches without FDB isolation support don't get unique + * bridge numbering + */ + if (!max) + return 0; - if (bridge_num < 0) { - /* First port that offloads TX forwarding for this bridge */ - bridge_num = find_first_zero_bit(&dsa_fwd_offloading_bridges, - DSA_MAX_NUM_OFFLOADING_BRIDGES); + if (!bridge_num) { + /* First port that requests FDB isolation or TX forwarding + * offload for this bridge + */ + bridge_num = find_next_zero_bit(&dsa_fwd_offloading_bridges, + DSA_MAX_NUM_OFFLOADING_BRIDGES, + 1); if (bridge_num >= max) - return -1; + return 0; set_bit(bridge_num, &dsa_fwd_offloading_bridges); } @@ -165,13 +182,14 @@ int dsa_bridge_num_get(const struct net_device *bridge_dev, int max) return bridge_num; } -void dsa_bridge_num_put(const struct net_device *bridge_dev, int bridge_num) +void dsa_bridge_num_put(const struct net_device *bridge_dev, + unsigned int bridge_num) { - /* Check if the bridge is still in use, otherwise it is time - * to clean it up so we can reuse this bridge_num later. + /* Since we refcount bridges, we know that when we call this function + * it is no longer in use, so we can just go ahead and remove it from + * the bit mask. */ - if (dsa_bridge_num_find(bridge_dev) < 0) - clear_bit(bridge_num, &dsa_fwd_offloading_bridges); + clear_bit(bridge_num, &dsa_fwd_offloading_bridges); } struct dsa_switch *dsa_switch_find(int tree_index, int sw_index) @@ -804,7 +822,7 @@ static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds) int err; if (tag_ops->proto == dst->default_proto) - return 0; + goto connect; dsa_switch_for_each_cpu_port(cpu_dp, ds) { rtnl_lock(); @@ -818,7 +836,30 @@ static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds) } } +connect: + if (tag_ops->connect) { + err = tag_ops->connect(ds); + if (err) + return err; + } + + if (ds->ops->connect_tag_protocol) { + err = ds->ops->connect_tag_protocol(ds, tag_ops->proto); + if (err) { + dev_err(ds->dev, + "Unable to connect to tag protocol \"%s\": %pe\n", + tag_ops->name, ERR_PTR(err)); + goto disconnect; + } + } + return 0; + +disconnect: + if (tag_ops->disconnect) + tag_ops->disconnect(ds); + + return err; } static int dsa_switch_setup(struct dsa_switch *ds) @@ -1118,6 +1159,37 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst) dst->setup = false; } +static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst, + const struct dsa_device_ops *tag_ops) +{ + const struct dsa_device_ops *old_tag_ops = dst->tag_ops; + struct dsa_notifier_tag_proto_info info; + int err; + + dst->tag_ops = tag_ops; + + /* Notify the switches from this tree about the connection + * to the new tagger + */ + info.tag_ops = tag_ops; + err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_CONNECT, &info); + if (err && err != -EOPNOTSUPP) + goto out_disconnect; + + /* Notify the old tagger about the disconnection from this tree */ + info.tag_ops = old_tag_ops; + dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info); + + return 0; + +out_disconnect: + info.tag_ops = tag_ops; + dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info); + dst->tag_ops = old_tag_ops; + + return err; +} + /* Since the dsa/tagging sysfs device attribute is per master, the assumption * is that all DSA switches within a tree share the same tagger, otherwise * they would have formed disjoint trees (different "dsa,member" values). @@ -1150,12 +1222,15 @@ int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst, goto out_unlock; } + /* Notify the tag protocol change */ info.tag_ops = tag_ops; err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info); if (err) - goto out_unwind_tagger; + return err; - dst->tag_ops = tag_ops; + err = dsa_tree_bind_tag_proto(dst, tag_ops); + if (err) + goto out_unwind_tagger; rtnl_unlock(); @@ -1184,7 +1259,6 @@ static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index) dp->ds = ds; dp->index = index; - dp->bridge_num = -1; INIT_LIST_HEAD(&dp->list); list_add_tail(&dp->list, &dst->ports); diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index a5c9bc7b66c6..edfaae7b5967 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -37,6 +37,8 @@ enum { DSA_NOTIFIER_VLAN_DEL, DSA_NOTIFIER_MTU, DSA_NOTIFIER_TAG_PROTO, + DSA_NOTIFIER_TAG_PROTO_CONNECT, + DSA_NOTIFIER_TAG_PROTO_DISCONNECT, DSA_NOTIFIER_MRP_ADD, DSA_NOTIFIER_MRP_DEL, DSA_NOTIFIER_MRP_ADD_RING_ROLE, @@ -52,10 +54,11 @@ struct dsa_notifier_ageing_time_info { /* DSA_NOTIFIER_BRIDGE_* */ struct dsa_notifier_bridge_info { - struct net_device *br; + struct dsa_bridge bridge; int tree_index; int sw_index; int port; + bool tx_fwd_offload; }; /* DSA_NOTIFIER_FDB_* */ @@ -258,54 +261,13 @@ int dsa_port_mrp_add_ring_role(const struct dsa_port *dp, const struct switchdev_obj_ring_role_mrp *mrp); int dsa_port_mrp_del_ring_role(const struct dsa_port *dp, const struct switchdev_obj_ring_role_mrp *mrp); +int dsa_port_phylink_create(struct dsa_port *dp); int dsa_port_link_register_of(struct dsa_port *dp); void dsa_port_link_unregister_of(struct dsa_port *dp); int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr); void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr); int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast); void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast); -extern const struct phylink_mac_ops dsa_port_phylink_mac_ops; - -static inline bool dsa_port_offloads_bridge_port(struct dsa_port *dp, - const struct net_device *dev) -{ - return dsa_port_to_bridge_port(dp) == dev; -} - -static inline bool dsa_port_offloads_bridge(struct dsa_port *dp, - const struct net_device *bridge_dev) -{ - /* DSA ports connected to a bridge, and event was emitted - * for the bridge. - */ - return dp->bridge_dev == bridge_dev; -} - -/* Returns true if any port of this tree offloads the given net_device */ -static inline bool dsa_tree_offloads_bridge_port(struct dsa_switch_tree *dst, - const struct net_device *dev) -{ - struct dsa_port *dp; - - list_for_each_entry(dp, &dst->ports, list) - if (dsa_port_offloads_bridge_port(dp, dev)) - return true; - - return false; -} - -/* Returns true if any port of this tree offloads the given bridge */ -static inline bool dsa_tree_offloads_bridge(struct dsa_switch_tree *dst, - const struct net_device *bridge_dev) -{ - struct dsa_port *dp; - - list_for_each_entry(dp, &dst->ports, list) - if (dsa_port_offloads_bridge(dp, bridge_dev)) - return true; - - return false; -} /* slave.c */ extern const struct dsa_device_ops notag_netdev_ops; @@ -345,7 +307,7 @@ dsa_slave_to_master(const struct net_device *dev) static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb) { struct dsa_port *dp = dsa_slave_to_port(skb->dev); - struct net_device *br = dp->bridge_dev; + struct net_device *br = dsa_port_bridge_dev_get(dp); struct net_device *dev = skb->dev; struct net_device *upper_dev; u16 vid, pvid, proto; @@ -415,7 +377,7 @@ dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid) if (dp->type != DSA_PORT_TYPE_USER) continue; - if (!dp->bridge_dev) + if (!dp->bridge) continue; if (dp->stp_state != BR_STATE_LEARNING && @@ -444,7 +406,7 @@ dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid) /* If the ingress port offloads the bridge, we mark the frame as autonomously * forwarded by hardware, so the software bridge doesn't forward in twice, back * to us, because we already did. However, if we're in fallback mode and we do - * software bridging, we are not offloading it, therefore the dp->bridge_dev + * software bridging, we are not offloading it, therefore the dp->bridge * pointer is not populated, and flooding needs to be done by software (we are * effectively operating in standalone ports mode). */ @@ -452,7 +414,7 @@ static inline void dsa_default_offload_fwd_mark(struct sk_buff *skb) { struct dsa_port *dp = dsa_slave_to_port(skb->dev); - skb->offload_fwd_mark = !!(dp->bridge_dev); + skb->offload_fwd_mark = !!(dp->bridge); } /* Helper for removing DSA header tags from packets in the RX path. @@ -546,8 +508,11 @@ int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst, struct net_device *master, const struct dsa_device_ops *tag_ops, const struct dsa_device_ops *old_tag_ops); -int dsa_bridge_num_get(const struct net_device *bridge_dev, int max); -void dsa_bridge_num_put(const struct net_device *bridge_dev, int bridge_num); +unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max); +void dsa_bridge_num_put(const struct net_device *bridge_dev, + unsigned int bridge_num); +struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst, + const struct net_device *br); /* tag_8021q.c */ int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, diff --git a/net/dsa/port.c b/net/dsa/port.c index f6f12ad2b525..05677e016982 100644 --- a/net/dsa/port.c +++ b/net/dsa/port.c @@ -130,7 +130,7 @@ int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy) return err; } - if (!dp->bridge_dev) + if (!dp->bridge) dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false); if (dp->pl) @@ -158,7 +158,7 @@ void dsa_port_disable_rt(struct dsa_port *dp) if (dp->pl) phylink_stop(dp->pl); - if (!dp->bridge_dev) + if (!dp->bridge) dsa_port_set_state_now(dp, BR_STATE_DISABLED, false); if (ds->ops->port_disable) @@ -221,7 +221,7 @@ static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp, struct netlink_ext_ack *extack) { struct net_device *brport_dev = dsa_port_to_bridge_port(dp); - struct net_device *br = dp->bridge_dev; + struct net_device *br = dsa_port_bridge_dev_get(dp); int err; err = dsa_port_inherit_brport_flags(dp, extack); @@ -270,52 +270,55 @@ static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp) */ } -static void dsa_port_bridge_tx_fwd_unoffload(struct dsa_port *dp, - struct net_device *bridge_dev) +static int dsa_port_bridge_create(struct dsa_port *dp, + struct net_device *br, + struct netlink_ext_ack *extack) { - int bridge_num = dp->bridge_num; struct dsa_switch *ds = dp->ds; + struct dsa_bridge *bridge; - /* No bridge TX forwarding offload => do nothing */ - if (!ds->ops->port_bridge_tx_fwd_unoffload || dp->bridge_num == -1) - return; + bridge = dsa_tree_bridge_find(ds->dst, br); + if (bridge) { + refcount_inc(&bridge->refcount); + dp->bridge = bridge; + return 0; + } - dp->bridge_num = -1; + bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); + if (!bridge) + return -ENOMEM; - dsa_bridge_num_put(bridge_dev, bridge_num); + refcount_set(&bridge->refcount, 1); - /* Notify the chips only once the offload has been deactivated, so - * that they can update their configuration accordingly. - */ - ds->ops->port_bridge_tx_fwd_unoffload(ds, dp->index, bridge_dev, - bridge_num); + bridge->dev = br; + + bridge->num = dsa_bridge_num_get(br, ds->max_num_bridges); + if (ds->max_num_bridges && !bridge->num) { + NL_SET_ERR_MSG_MOD(extack, + "Range of offloadable bridges exceeded"); + kfree(bridge); + return -EOPNOTSUPP; + } + + dp->bridge = bridge; + + return 0; } -static bool dsa_port_bridge_tx_fwd_offload(struct dsa_port *dp, - struct net_device *bridge_dev) +static void dsa_port_bridge_destroy(struct dsa_port *dp, + const struct net_device *br) { - struct dsa_switch *ds = dp->ds; - int bridge_num, err; + struct dsa_bridge *bridge = dp->bridge; - if (!ds->ops->port_bridge_tx_fwd_offload) - return false; + dp->bridge = NULL; - bridge_num = dsa_bridge_num_get(bridge_dev, - ds->num_fwd_offloading_bridges); - if (bridge_num < 0) - return false; + if (!refcount_dec_and_test(&bridge->refcount)) + return; - dp->bridge_num = bridge_num; + if (bridge->num) + dsa_bridge_num_put(br, bridge->num); - /* Notify the driver */ - err = ds->ops->port_bridge_tx_fwd_offload(ds, dp->index, bridge_dev, - bridge_num); - if (err) { - dsa_port_bridge_tx_fwd_unoffload(dp, bridge_dev); - return false; - } - - return true; + kfree(bridge); } int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br, @@ -325,30 +328,32 @@ int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br, .tree_index = dp->ds->dst->index, .sw_index = dp->ds->index, .port = dp->index, - .br = br, }; struct net_device *dev = dp->slave; struct net_device *brport_dev; - bool tx_fwd_offload; int err; /* Here the interface is already bridged. Reflect the current * configuration so that drivers can program their chips accordingly. */ - dp->bridge_dev = br; + err = dsa_port_bridge_create(dp, br, extack); + if (err) + return err; brport_dev = dsa_port_to_bridge_port(dp); + info.bridge = *dp->bridge; err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info); if (err) goto out_rollback; - tx_fwd_offload = dsa_port_bridge_tx_fwd_offload(dp, br); + /* Drivers which support bridge TX forwarding should set this */ + dp->bridge->tx_fwd_offload = info.tx_fwd_offload; err = switchdev_bridge_port_offload(brport_dev, dev, dp, &dsa_slave_switchdev_notifier, &dsa_slave_switchdev_blocking_notifier, - tx_fwd_offload, extack); + dp->bridge->tx_fwd_offload, extack); if (err) goto out_rollback_unbridge; @@ -365,7 +370,7 @@ out_rollback_unoffload: out_rollback_unbridge: dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); out_rollback: - dp->bridge_dev = NULL; + dsa_port_bridge_destroy(dp, br); return err; } @@ -390,16 +395,14 @@ void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br) .tree_index = dp->ds->dst->index, .sw_index = dp->ds->index, .port = dp->index, - .br = br, + .bridge = *dp->bridge, }; int err; /* Here the port is already unbridged. Reflect the current configuration * so that drivers can program their chips accordingly. */ - dp->bridge_dev = NULL; - - dsa_port_bridge_tx_fwd_unoffload(dp, br); + dsa_port_bridge_destroy(dp, br); err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); if (err) @@ -477,12 +480,15 @@ err_lag_join: void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag) { - if (dp->bridge_dev) - dsa_port_pre_bridge_leave(dp, dp->bridge_dev); + struct net_device *br = dsa_port_bridge_dev_get(dp); + + if (br) + dsa_port_pre_bridge_leave(dp, br); } void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag) { + struct net_device *br = dsa_port_bridge_dev_get(dp); struct dsa_notifier_lag_info info = { .sw_index = dp->ds->index, .port = dp->index, @@ -496,8 +502,8 @@ void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag) /* Port might have been part of a LAG that in turn was * attached to a bridge. */ - if (dp->bridge_dev) - dsa_port_bridge_leave(dp, dp->bridge_dev); + if (br) + dsa_port_bridge_leave(dp, br); dp->lag_tx_enabled = false; dp->lag_dev = NULL; @@ -526,8 +532,8 @@ static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp, * as long as we have 8021q uppers. */ if (vlan_filtering && dsa_port_is_user(dp)) { + struct net_device *br = dsa_port_bridge_dev_get(dp); struct net_device *upper_dev, *slave = dp->slave; - struct net_device *br = dp->bridge_dev; struct list_head *iter; netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) { @@ -561,17 +567,15 @@ static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp, * different setting than what is being requested. */ dsa_switch_for_each_port(other_dp, ds) { - struct net_device *other_bridge; + struct net_device *other_br = dsa_port_bridge_dev_get(other_dp); - other_bridge = other_dp->bridge_dev; - if (!other_bridge) - continue; /* If it's the same bridge, it also has same * vlan_filtering setting => no need to check */ - if (other_bridge == dp->bridge_dev) + if (!other_br || other_br == dsa_port_bridge_dev_get(dp)) continue; - if (br_vlan_enabled(other_bridge) != vlan_filtering) { + + if (br_vlan_enabled(other_br) != vlan_filtering) { NL_SET_ERR_MSG_MOD(extack, "VLAN filtering is a global setting"); return false; @@ -655,13 +659,13 @@ restore: */ bool dsa_port_skip_vlan_configuration(struct dsa_port *dp) { + struct net_device *br = dsa_port_bridge_dev_get(dp); struct dsa_switch *ds = dp->ds; - if (!dp->bridge_dev) + if (!br) return false; - return (!ds->configure_vlan_while_not_filtering && - !br_vlan_enabled(dp->bridge_dev)); + return !ds->configure_vlan_while_not_filtering && !br_vlan_enabled(br); } int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock) @@ -981,8 +985,11 @@ static void dsa_port_phylink_validate(struct phylink_config *config, struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); struct dsa_switch *ds = dp->ds; - if (!ds->ops->phylink_validate) + if (!ds->ops->phylink_validate) { + if (config->mac_capabilities) + phylink_generic_validate(config, supported, state); return; + } ds->ops->phylink_validate(ds, dp->index, supported, state); } @@ -1072,7 +1079,7 @@ static void dsa_port_phylink_mac_link_up(struct phylink_config *config, speed, duplex, tx_pause, rx_pause); } -const struct phylink_mac_ops dsa_port_phylink_mac_ops = { +static const struct phylink_mac_ops dsa_port_phylink_mac_ops = { .validate = dsa_port_phylink_validate, .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state, .mac_config = dsa_port_phylink_mac_config, @@ -1081,6 +1088,36 @@ const struct phylink_mac_ops dsa_port_phylink_mac_ops = { .mac_link_up = dsa_port_phylink_mac_link_up, }; +int dsa_port_phylink_create(struct dsa_port *dp) +{ + struct dsa_switch *ds = dp->ds; + phy_interface_t mode; + int err; + + err = of_get_phy_mode(dp->dn, &mode); + if (err) + mode = PHY_INTERFACE_MODE_NA; + + /* Presence of phylink_mac_link_state or phylink_mac_an_restart is + * an indicator of a legacy phylink driver. + */ + if (ds->ops->phylink_mac_link_state || + ds->ops->phylink_mac_an_restart) + dp->pl_config.legacy_pre_march2020 = true; + + if (ds->ops->phylink_get_caps) + ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config); + + dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn), + mode, &dsa_port_phylink_mac_ops); + if (IS_ERR(dp->pl)) { + pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl)); + return PTR_ERR(dp->pl); + } + + return 0; +} + static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable) { struct dsa_switch *ds = dp->ds; @@ -1157,27 +1194,15 @@ static int dsa_port_phylink_register(struct dsa_port *dp) { struct dsa_switch *ds = dp->ds; struct device_node *port_dn = dp->dn; - phy_interface_t mode; int err; - err = of_get_phy_mode(port_dn, &mode); - if (err) - mode = PHY_INTERFACE_MODE_NA; - dp->pl_config.dev = ds->dev; dp->pl_config.type = PHYLINK_DEV; dp->pl_config.pcs_poll = ds->pcs_poll; - if (ds->ops->phylink_get_interfaces) - ds->ops->phylink_get_interfaces(ds, dp->index, - dp->pl_config.supported_interfaces); - - dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), - mode, &dsa_port_phylink_mac_ops); - if (IS_ERR(dp->pl)) { - pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl)); - return PTR_ERR(dp->pl); - } + err = dsa_port_phylink_create(dp); + if (err) + return err; err = phylink_of_phy_connect(dp->pl, port_dn, 0); if (err && err != -ENODEV) { diff --git a/net/dsa/slave.c b/net/dsa/slave.c index ad61f6bc8886..88f7b8686dac 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -289,14 +289,14 @@ static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx, ret = dsa_port_set_state(dp, attr->u.stp_state, true); break; case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: - if (!dsa_port_offloads_bridge(dp, attr->orig_dev)) + if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) return -EOPNOTSUPP; ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering, extack); break; case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: - if (!dsa_port_offloads_bridge(dp, attr->orig_dev)) + if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) return -EOPNOTSUPP; ret = dsa_port_ageing_time(dp, attr->u.ageing_time); @@ -363,7 +363,7 @@ static int dsa_slave_vlan_add(struct net_device *dev, /* Deny adding a bridge VLAN when there is already an 802.1Q upper with * the same VID. */ - if (br_vlan_enabled(dp->bridge_dev)) { + if (br_vlan_enabled(dsa_port_bridge_dev_get(dp))) { rcu_read_lock(); err = dsa_slave_vlan_check_for_8021q_uppers(dev, &vlan); rcu_read_unlock(); @@ -409,7 +409,7 @@ static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx, err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); break; case SWITCHDEV_OBJ_ID_HOST_MDB: - if (!dsa_port_offloads_bridge(dp, obj->orig_dev)) + if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) return -EOPNOTSUPP; err = dsa_port_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); @@ -421,13 +421,13 @@ static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx, err = dsa_slave_vlan_add(dev, obj, extack); break; case SWITCHDEV_OBJ_ID_MRP: - if (!dsa_port_offloads_bridge(dp, obj->orig_dev)) + if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) return -EOPNOTSUPP; err = dsa_port_mrp_add(dp, SWITCHDEV_OBJ_MRP(obj)); break; case SWITCHDEV_OBJ_ID_RING_ROLE_MRP: - if (!dsa_port_offloads_bridge(dp, obj->orig_dev)) + if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) return -EOPNOTSUPP; err = dsa_port_mrp_add_ring_role(dp, @@ -483,7 +483,7 @@ static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx, err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); break; case SWITCHDEV_OBJ_ID_HOST_MDB: - if (!dsa_port_offloads_bridge(dp, obj->orig_dev)) + if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) return -EOPNOTSUPP; err = dsa_port_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); @@ -495,13 +495,13 @@ static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx, err = dsa_slave_vlan_del(dev, obj); break; case SWITCHDEV_OBJ_ID_MRP: - if (!dsa_port_offloads_bridge(dp, obj->orig_dev)) + if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) return -EOPNOTSUPP; err = dsa_port_mrp_del(dp, SWITCHDEV_OBJ_MRP(obj)); break; case SWITCHDEV_OBJ_ID_RING_ROLE_MRP: - if (!dsa_port_offloads_bridge(dp, obj->orig_dev)) + if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) return -EOPNOTSUPP; err = dsa_port_mrp_del_ring_role(dp, @@ -1564,7 +1564,7 @@ static void dsa_bridge_mtu_normalization(struct dsa_port *dp) if (!dp->ds->mtu_enforcement_ingress) return; - if (!dp->bridge_dev) + if (!dp->bridge) return; INIT_LIST_HEAD(&hw_port_list); @@ -1580,7 +1580,7 @@ static void dsa_bridge_mtu_normalization(struct dsa_port *dp) if (other_dp->type != DSA_PORT_TYPE_USER) continue; - if (other_dp->bridge_dev != dp->bridge_dev) + if (!dsa_port_bridge_same(dp, other_dp)) continue; if (!other_dp->ds->mtu_enforcement_ingress) @@ -1851,14 +1851,9 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev) struct dsa_port *dp = dsa_slave_to_port(slave_dev); struct device_node *port_dn = dp->dn; struct dsa_switch *ds = dp->ds; - phy_interface_t mode; u32 phy_flags = 0; int ret; - ret = of_get_phy_mode(port_dn, &mode); - if (ret) - mode = PHY_INTERFACE_MODE_NA; - dp->pl_config.dev = &slave_dev->dev; dp->pl_config.type = PHYLINK_NETDEV; @@ -1871,17 +1866,9 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev) dp->pl_config.poll_fixed_state = true; } - if (ds->ops->phylink_get_interfaces) - ds->ops->phylink_get_interfaces(ds, dp->index, - dp->pl_config.supported_interfaces); - - dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), mode, - &dsa_port_phylink_mac_ops); - if (IS_ERR(dp->pl)) { - netdev_err(slave_dev, - "error creating PHYLINK: %ld\n", PTR_ERR(dp->pl)); - return PTR_ERR(dp->pl); - } + ret = dsa_port_phylink_create(dp); + if (ret) + return ret; if (ds->ops->get_phy_flags) phy_flags = ds->ops->get_phy_flags(ds, dp->index); @@ -2233,7 +2220,7 @@ dsa_prevent_bridging_8021q_upper(struct net_device *dev, struct netdev_notifier_changeupper_info *info) { struct netlink_ext_ack *ext_ack; - struct net_device *slave; + struct net_device *slave, *br; struct dsa_port *dp; ext_ack = netdev_notifier_info_to_extack(&info->info); @@ -2246,11 +2233,12 @@ dsa_prevent_bridging_8021q_upper(struct net_device *dev, return NOTIFY_DONE; dp = dsa_slave_to_port(slave); - if (!dp->bridge_dev) + br = dsa_port_bridge_dev_get(dp); + if (!br) return NOTIFY_DONE; /* Deny enslaving a VLAN device into a VLAN-aware bridge */ - if (br_vlan_enabled(dp->bridge_dev) && + if (br_vlan_enabled(br) && netif_is_bridge_master(info->upper_dev) && info->linking) { NL_SET_ERR_MSG_MOD(ext_ack, "Cannot enslave VLAN device into VLAN aware bridge"); @@ -2265,7 +2253,7 @@ dsa_slave_check_8021q_upper(struct net_device *dev, struct netdev_notifier_changeupper_info *info) { struct dsa_port *dp = dsa_slave_to_port(dev); - struct net_device *br = dp->bridge_dev; + struct net_device *br = dsa_port_bridge_dev_get(dp); struct bridge_vlan_info br_info; struct netlink_ext_ack *extack; int err = NOTIFY_DONE; @@ -2462,7 +2450,7 @@ static bool dsa_foreign_dev_check(const struct net_device *dev, struct dsa_switch_tree *dst = dp->ds->dst; if (netif_is_bridge_master(foreign_dev)) - return !dsa_tree_offloads_bridge(dst, foreign_dev); + return !dsa_tree_offloads_bridge_dev(dst, foreign_dev); if (netif_is_bridge_port(foreign_dev)) return !dsa_tree_offloads_bridge_port(dst, foreign_dev); diff --git a/net/dsa/switch.c b/net/dsa/switch.c index bb155a16d454..393f2d8a860a 100644 --- a/net/dsa/switch.c +++ b/net/dsa/switch.c @@ -95,7 +95,8 @@ static int dsa_switch_bridge_join(struct dsa_switch *ds, if (!ds->ops->port_bridge_join) return -EOPNOTSUPP; - err = ds->ops->port_bridge_join(ds, info->port, info->br); + err = ds->ops->port_bridge_join(ds, info->port, info->bridge, + &info->tx_fwd_offload); if (err) return err; } @@ -104,7 +105,7 @@ static int dsa_switch_bridge_join(struct dsa_switch *ds, ds->ops->crosschip_bridge_join) { err = ds->ops->crosschip_bridge_join(ds, info->tree_index, info->sw_index, - info->port, info->br); + info->port, info->bridge); if (err) return err; } @@ -124,19 +125,20 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds, if (dst->index == info->tree_index && ds->index == info->sw_index && ds->ops->port_bridge_leave) - ds->ops->port_bridge_leave(ds, info->port, info->br); + ds->ops->port_bridge_leave(ds, info->port, info->bridge); if ((dst->index != info->tree_index || ds->index != info->sw_index) && ds->ops->crosschip_bridge_leave) ds->ops->crosschip_bridge_leave(ds, info->tree_index, info->sw_index, info->port, - info->br); + info->bridge); - if (ds->needs_standalone_vlan_filtering && !br_vlan_enabled(info->br)) { + if (ds->needs_standalone_vlan_filtering && + !br_vlan_enabled(info->bridge.dev)) { change_vlan_filtering = true; vlan_filtering = true; } else if (!ds->needs_standalone_vlan_filtering && - br_vlan_enabled(info->br)) { + br_vlan_enabled(info->bridge.dev)) { change_vlan_filtering = true; vlan_filtering = false; } @@ -151,11 +153,9 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds, */ if (change_vlan_filtering && ds->vlan_filtering_is_global) { dsa_switch_for_each_port(dp, ds) { - struct net_device *bridge_dev; + struct net_device *br = dsa_port_bridge_dev_get(dp); - bridge_dev = dp->bridge_dev; - - if (bridge_dev && br_vlan_enabled(bridge_dev)) { + if (br && br_vlan_enabled(br)) { change_vlan_filtering = false; break; } @@ -647,6 +647,60 @@ static int dsa_switch_change_tag_proto(struct dsa_switch *ds, return 0; } +/* We use the same cross-chip notifiers to inform both the tagger side, as well + * as the switch side, of connection and disconnection events. + * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the + * switch side doesn't support connecting to this tagger, and therefore, the + * fact that we don't disconnect the tagger side doesn't constitute a memory + * leak: the tagger will still operate with persistent per-switch memory, just + * with the switch side unconnected to it. What does constitute a hard error is + * when the switch side supports connecting but fails. + */ +static int +dsa_switch_connect_tag_proto(struct dsa_switch *ds, + struct dsa_notifier_tag_proto_info *info) +{ + const struct dsa_device_ops *tag_ops = info->tag_ops; + int err; + + /* Notify the new tagger about the connection to this switch */ + if (tag_ops->connect) { + err = tag_ops->connect(ds); + if (err) + return err; + } + + if (!ds->ops->connect_tag_protocol) + return -EOPNOTSUPP; + + /* Notify the switch about the connection to the new tagger */ + err = ds->ops->connect_tag_protocol(ds, tag_ops->proto); + if (err) { + /* Revert the new tagger's connection to this tree */ + if (tag_ops->disconnect) + tag_ops->disconnect(ds); + return err; + } + + return 0; +} + +static int +dsa_switch_disconnect_tag_proto(struct dsa_switch *ds, + struct dsa_notifier_tag_proto_info *info) +{ + const struct dsa_device_ops *tag_ops = info->tag_ops; + + /* Notify the tagger about the disconnection from this switch */ + if (tag_ops->disconnect && ds->tagger_data) + tag_ops->disconnect(ds); + + /* No need to notify the switch, since it shouldn't have any + * resources to tear down + */ + return 0; +} + static int dsa_switch_mrp_add(struct dsa_switch *ds, struct dsa_notifier_mrp_info *info) { @@ -766,6 +820,12 @@ static int dsa_switch_event(struct notifier_block *nb, case DSA_NOTIFIER_TAG_PROTO: err = dsa_switch_change_tag_proto(ds, info); break; + case DSA_NOTIFIER_TAG_PROTO_CONNECT: + err = dsa_switch_connect_tag_proto(ds, info); + break; + case DSA_NOTIFIER_TAG_PROTO_DISCONNECT: + err = dsa_switch_disconnect_tag_proto(ds, info); + break; case DSA_NOTIFIER_MRP_ADD: err = dsa_switch_mrp_add(ds, info); break; diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c index 72cac2c0af7b..27712a81c967 100644 --- a/net/dsa/tag_8021q.c +++ b/net/dsa/tag_8021q.c @@ -67,10 +67,12 @@ #define DSA_8021Q_PORT(x) (((x) << DSA_8021Q_PORT_SHIFT) & \ DSA_8021Q_PORT_MASK) -u16 dsa_8021q_bridge_tx_fwd_offload_vid(int bridge_num) +u16 dsa_8021q_bridge_tx_fwd_offload_vid(unsigned int bridge_num) { - /* The VBID value of 0 is reserved for precise TX */ - return DSA_8021Q_DIR_TX | DSA_8021Q_VBID(bridge_num + 1); + /* The VBID value of 0 is reserved for precise TX, but it is also + * reserved/invalid for the bridge_num, so all is well. + */ + return DSA_8021Q_DIR_TX | DSA_8021Q_VBID(bridge_num); } EXPORT_SYMBOL_GPL(dsa_8021q_bridge_tx_fwd_offload_vid); @@ -335,7 +337,7 @@ dsa_port_tag_8021q_bridge_match(struct dsa_port *dp, return false; if (dsa_port_is_user(dp)) - return dp->bridge_dev == info->br; + return dsa_port_offloads_bridge(dp, &info->bridge); return false; } @@ -408,10 +410,9 @@ int dsa_tag_8021q_bridge_leave(struct dsa_switch *ds, } int dsa_tag_8021q_bridge_tx_fwd_offload(struct dsa_switch *ds, int port, - struct net_device *br, - int bridge_num) + struct dsa_bridge bridge) { - u16 tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge_num); + u16 tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge.num); return dsa_port_tag_8021q_vlan_add(dsa_to_port(ds, port), tx_vid, true); @@ -419,10 +420,9 @@ int dsa_tag_8021q_bridge_tx_fwd_offload(struct dsa_switch *ds, int port, EXPORT_SYMBOL_GPL(dsa_tag_8021q_bridge_tx_fwd_offload); void dsa_tag_8021q_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port, - struct net_device *br, - int bridge_num) + struct dsa_bridge bridge) { - u16 tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge_num); + u16 tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge.num); dsa_port_tag_8021q_vlan_del(dsa_to_port(ds, port), tx_vid, true); } diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c index b3da4b2ea11c..8abf39dcac64 100644 --- a/net/dsa/tag_dsa.c +++ b/net/dsa/tag_dsa.c @@ -132,6 +132,7 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev, u8 *dsa_header; if (skb->offload_fwd_mark) { + unsigned int bridge_num = dsa_port_bridge_num_get(dp); struct dsa_switch_tree *dst = dp->ds->dst; cmd = DSA_CMD_FORWARD; @@ -140,7 +141,7 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev, * packets on behalf of a virtual switch device with an index * past the physical switches. */ - tag_dev = dst->last_switch + 1 + dp->bridge_num; + tag_dev = dst->last_switch + bridge_num; tag_port = 0; } else { cmd = DSA_CMD_FROM_CPU; @@ -165,7 +166,7 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev, dsa_header[2] &= ~0x10; } } else { - struct net_device *br = dp->bridge_dev; + struct net_device *br = dsa_port_bridge_dev_get(dp); u16 vid; vid = br ? MV88E6XXX_VID_BRIDGED : MV88E6XXX_VID_STANDALONE; diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c index 4ed74d509d6a..0d81f172b7a6 100644 --- a/net/dsa/tag_ocelot.c +++ b/net/dsa/tag_ocelot.c @@ -12,7 +12,7 @@ static void ocelot_xmit_get_vlan_info(struct sk_buff *skb, struct dsa_port *dp, u64 *vlan_tci, u64 *tag_type) { - struct net_device *br = READ_ONCE(dp->bridge_dev); + struct net_device *br = dsa_port_bridge_dev_get(dp); struct vlan_ethhdr *hdr; u16 proto, tci; diff --git a/net/dsa/tag_ocelot_8021q.c b/net/dsa/tag_ocelot_8021q.c index a1919ea5e828..68982b2789a5 100644 --- a/net/dsa/tag_ocelot_8021q.c +++ b/net/dsa/tag_ocelot_8021q.c @@ -12,25 +12,39 @@ #include <linux/dsa/ocelot.h> #include "dsa_priv.h" +struct ocelot_8021q_tagger_private { + struct ocelot_8021q_tagger_data data; /* Must be first */ + struct kthread_worker *xmit_worker; +}; + static struct sk_buff *ocelot_defer_xmit(struct dsa_port *dp, struct sk_buff *skb) { + struct ocelot_8021q_tagger_private *priv = dp->ds->tagger_data; + struct ocelot_8021q_tagger_data *data = &priv->data; + void (*xmit_work_fn)(struct kthread_work *work); struct felix_deferred_xmit_work *xmit_work; - struct felix_port *felix_port = dp->priv; + struct kthread_worker *xmit_worker; + + xmit_work_fn = data->xmit_work_fn; + xmit_worker = priv->xmit_worker; + + if (!xmit_work_fn || !xmit_worker) + return NULL; xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC); if (!xmit_work) return NULL; /* Calls felix_port_deferred_xmit in felix.c */ - kthread_init_work(&xmit_work->work, felix_port->xmit_work_fn); + kthread_init_work(&xmit_work->work, xmit_work_fn); /* Increase refcount so the kfree_skb in dsa_slave_xmit * won't really free the packet. */ xmit_work->dp = dp; xmit_work->skb = skb_get(skb); - kthread_queue_work(felix_port->xmit_worker, &xmit_work->work); + kthread_queue_work(xmit_worker, &xmit_work->work); return NULL; } @@ -67,11 +81,43 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb, return skb; } +static void ocelot_disconnect(struct dsa_switch *ds) +{ + struct ocelot_8021q_tagger_private *priv = ds->tagger_data; + + kthread_destroy_worker(priv->xmit_worker); + kfree(priv); + ds->tagger_data = NULL; +} + +static int ocelot_connect(struct dsa_switch *ds) +{ + struct ocelot_8021q_tagger_private *priv; + int err; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->xmit_worker = kthread_create_worker(0, "felix_xmit"); + if (IS_ERR(priv->xmit_worker)) { + err = PTR_ERR(priv->xmit_worker); + kfree(priv); + return err; + } + + ds->tagger_data = priv; + + return 0; +} + static const struct dsa_device_ops ocelot_8021q_netdev_ops = { .name = "ocelot-8021q", .proto = DSA_TAG_PROTO_OCELOT_8021Q, .xmit = ocelot_xmit, .rcv = ocelot_rcv, + .connect = ocelot_connect, + .disconnect = ocelot_disconnect, .needed_headroom = VLAN_HLEN, .promisc_on_master = true, }; diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c index 262c8833a910..72d5e0ef8dcf 100644 --- a/net/dsa/tag_sja1105.c +++ b/net/dsa/tag_sja1105.c @@ -4,7 +4,6 @@ #include <linux/if_vlan.h> #include <linux/dsa/sja1105.h> #include <linux/dsa/8021q.h> -#include <linux/skbuff.h> #include <linux/packing.h> #include "dsa_priv.h" @@ -54,11 +53,25 @@ #define SJA1110_TX_TRAILER_LEN 4 #define SJA1110_MAX_PADDING_LEN 15 -enum sja1110_meta_tstamp { - SJA1110_META_TSTAMP_TX = 0, - SJA1110_META_TSTAMP_RX = 1, +#define SJA1105_HWTS_RX_EN 0 + +struct sja1105_tagger_private { + struct sja1105_tagger_data data; /* Must be first */ + unsigned long state; + /* Protects concurrent access to the meta state machine + * from taggers running on multiple ports on SMP systems + */ + spinlock_t meta_lock; + struct sk_buff *stampable_skb; + struct kthread_worker *xmit_worker; }; +static struct sja1105_tagger_private * +sja1105_tagger_private(struct dsa_switch *ds) +{ + return ds->tagger_data; +} + /* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */ static inline bool sja1105_is_link_local(const struct sk_buff *skb) { @@ -125,16 +138,30 @@ static inline bool sja1105_is_meta_frame(const struct sk_buff *skb) static struct sk_buff *sja1105_defer_xmit(struct dsa_port *dp, struct sk_buff *skb) { - struct sja1105_port *sp = dp->priv; + struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(dp->ds); + struct sja1105_tagger_private *priv = sja1105_tagger_private(dp->ds); + void (*xmit_work_fn)(struct kthread_work *work); + struct sja1105_deferred_xmit_work *xmit_work; + struct kthread_worker *xmit_worker; - if (!dsa_port_is_sja1105(dp)) - return skb; + xmit_work_fn = tagger_data->xmit_work_fn; + xmit_worker = priv->xmit_worker; + + if (!xmit_work_fn || !xmit_worker) + return NULL; + + xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC); + if (!xmit_work) + return NULL; + kthread_init_work(&xmit_work->work, xmit_work_fn); /* Increase refcount so the kfree_skb in dsa_slave_xmit * won't really free the packet. */ - skb_queue_tail(&sp->xmit_queue, skb_get(skb)); - kthread_queue_work(sp->xmit_worker, &sp->xmit_work); + xmit_work->dp = dp; + xmit_work->skb = skb_get(skb); + + kthread_queue_work(xmit_worker, &xmit_work->work); return NULL; } @@ -159,14 +186,16 @@ static u16 sja1105_xmit_tpid(struct dsa_port *dp) * need to find it. */ dsa_switch_for_each_port(other_dp, ds) { - if (!other_dp->bridge_dev) + struct net_device *br = dsa_port_bridge_dev_get(other_dp); + + if (!br) continue; /* Error is returned only if CONFIG_BRIDGE_VLAN_FILTERING, * which seems pointless to handle, as our port cannot become * VLAN-aware in that case. */ - br_vlan_get_proto(other_dp->bridge_dev, &proto); + br_vlan_get_proto(br, &proto); return proto; } @@ -180,7 +209,8 @@ static struct sk_buff *sja1105_imprecise_xmit(struct sk_buff *skb, struct net_device *netdev) { struct dsa_port *dp = dsa_slave_to_port(netdev); - struct net_device *br = dp->bridge_dev; + unsigned int bridge_num = dsa_port_bridge_num_get(dp); + struct net_device *br = dsa_port_bridge_dev_get(dp); u16 tx_vid; /* If the port is under a VLAN-aware bridge, just slide the @@ -196,7 +226,7 @@ static struct sk_buff *sja1105_imprecise_xmit(struct sk_buff *skb, * TX VLAN that targets the bridge's entire broadcast domain, * instead of just the specific port. */ - tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(dp->bridge_num); + tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge_num); return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp), tx_vid); } @@ -352,32 +382,32 @@ static struct sk_buff */ if (is_link_local) { struct dsa_port *dp = dsa_slave_to_port(skb->dev); - struct sja1105_port *sp = dp->priv; + struct sja1105_tagger_private *priv; + struct dsa_switch *ds = dp->ds; - if (unlikely(!dsa_port_is_sja1105(dp))) - return skb; + priv = sja1105_tagger_private(ds); - if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state)) + if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state)) /* Do normal processing. */ return skb; - spin_lock(&sp->data->meta_lock); + spin_lock(&priv->meta_lock); /* Was this a link-local frame instead of the meta * that we were expecting? */ - if (sp->data->stampable_skb) { - dev_err_ratelimited(dp->ds->dev, + if (priv->stampable_skb) { + dev_err_ratelimited(ds->dev, "Expected meta frame, is %12llx " "in the DSA master multicast filter?\n", SJA1105_META_DMAC); - kfree_skb(sp->data->stampable_skb); + kfree_skb(priv->stampable_skb); } /* Hold a reference to avoid dsa_switch_rcv * from freeing the skb. */ - sp->data->stampable_skb = skb_get(skb); - spin_unlock(&sp->data->meta_lock); + priv->stampable_skb = skb_get(skb); + spin_unlock(&priv->meta_lock); /* Tell DSA we got nothing */ return NULL; @@ -390,37 +420,37 @@ static struct sk_buff */ } else if (is_meta) { struct dsa_port *dp = dsa_slave_to_port(skb->dev); - struct sja1105_port *sp = dp->priv; + struct sja1105_tagger_private *priv; + struct dsa_switch *ds = dp->ds; struct sk_buff *stampable_skb; - if (unlikely(!dsa_port_is_sja1105(dp))) - return skb; + priv = sja1105_tagger_private(ds); /* Drop the meta frame if we're not in the right state * to process it. */ - if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state)) + if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state)) return NULL; - spin_lock(&sp->data->meta_lock); + spin_lock(&priv->meta_lock); - stampable_skb = sp->data->stampable_skb; - sp->data->stampable_skb = NULL; + stampable_skb = priv->stampable_skb; + priv->stampable_skb = NULL; /* Was this a meta frame instead of the link-local * that we were expecting? */ if (!stampable_skb) { - dev_err_ratelimited(dp->ds->dev, + dev_err_ratelimited(ds->dev, "Unexpected meta frame\n"); - spin_unlock(&sp->data->meta_lock); + spin_unlock(&priv->meta_lock); return NULL; } if (stampable_skb->dev != skb->dev) { - dev_err_ratelimited(dp->ds->dev, + dev_err_ratelimited(ds->dev, "Meta frame on wrong port\n"); - spin_unlock(&sp->data->meta_lock); + spin_unlock(&priv->meta_lock); return NULL; } @@ -431,12 +461,36 @@ static struct sk_buff skb = stampable_skb; sja1105_transfer_meta(skb, meta); - spin_unlock(&sp->data->meta_lock); + spin_unlock(&priv->meta_lock); } return skb; } +static bool sja1105_rxtstamp_get_state(struct dsa_switch *ds) +{ + struct sja1105_tagger_private *priv = sja1105_tagger_private(ds); + + return test_bit(SJA1105_HWTS_RX_EN, &priv->state); +} + +static void sja1105_rxtstamp_set_state(struct dsa_switch *ds, bool on) +{ + struct sja1105_tagger_private *priv = sja1105_tagger_private(ds); + + if (on) + set_bit(SJA1105_HWTS_RX_EN, &priv->state); + else + clear_bit(SJA1105_HWTS_RX_EN, &priv->state); + + /* Initialize the meta state machine to a known state */ + if (!priv->stampable_skb) + return; + + kfree_skb(priv->stampable_skb); + priv->stampable_skb = NULL; +} + static bool sja1105_skb_has_tag_8021q(const struct sk_buff *skb) { u16 tpid = ntohs(eth_hdr(skb)->h_proto); @@ -523,48 +577,12 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb, is_meta); } -static void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, - u8 ts_id, enum sja1110_meta_tstamp dir, - u64 tstamp) -{ - struct sk_buff *skb, *skb_tmp, *skb_match = NULL; - struct dsa_port *dp = dsa_to_port(ds, port); - struct skb_shared_hwtstamps shwt = {0}; - struct sja1105_port *sp = dp->priv; - - if (!dsa_port_is_sja1105(dp)) - return; - - /* We don't care about RX timestamps on the CPU port */ - if (dir == SJA1110_META_TSTAMP_RX) - return; - - spin_lock(&sp->data->skb_txtstamp_queue.lock); - - skb_queue_walk_safe(&sp->data->skb_txtstamp_queue, skb, skb_tmp) { - if (SJA1105_SKB_CB(skb)->ts_id != ts_id) - continue; - - __skb_unlink(skb, &sp->data->skb_txtstamp_queue); - skb_match = skb; - - break; - } - - spin_unlock(&sp->data->skb_txtstamp_queue.lock); - - if (WARN_ON(!skb_match)) - return; - - shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp)); - skb_complete_tx_timestamp(skb_match, &shwt); -} - static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header) { u8 *buf = dsa_etype_header_pos_rx(skb) + SJA1110_HEADER_LEN; int switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header); int n_ts = SJA1110_RX_HEADER_N_TS(rx_header); + struct sja1105_tagger_data *tagger_data; struct net_device *master = skb->dev; struct dsa_port *cpu_dp; struct dsa_switch *ds; @@ -578,6 +596,10 @@ static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header) return NULL; } + tagger_data = sja1105_tagger_data(ds); + if (!tagger_data->meta_tstamp_handler) + return NULL; + for (i = 0; i <= n_ts; i++) { u8 ts_id, source_port, dir; u64 tstamp; @@ -587,8 +609,8 @@ static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header) dir = (buf[1] & BIT(3)) >> 3; tstamp = be64_to_cpu(*(__be64 *)(buf + 2)); - sja1110_process_meta_tstamp(ds, source_port, ts_id, dir, - tstamp); + tagger_data->meta_tstamp_handler(ds, source_port, ts_id, dir, + tstamp); buf += SJA1110_META_TSTAMP_SIZE; } @@ -719,11 +741,53 @@ static void sja1110_flow_dissect(const struct sk_buff *skb, __be16 *proto, *proto = ((__be16 *)skb->data)[(VLAN_HLEN / 2) - 1]; } +static void sja1105_disconnect(struct dsa_switch *ds) +{ + struct sja1105_tagger_private *priv = ds->tagger_data; + + kthread_destroy_worker(priv->xmit_worker); + kfree(priv); + ds->tagger_data = NULL; +} + +static int sja1105_connect(struct dsa_switch *ds) +{ + struct sja1105_tagger_data *tagger_data; + struct sja1105_tagger_private *priv; + struct kthread_worker *xmit_worker; + int err; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + spin_lock_init(&priv->meta_lock); + + xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit", + ds->dst->index, ds->index); + if (IS_ERR(xmit_worker)) { + err = PTR_ERR(xmit_worker); + kfree(priv); + return err; + } + + priv->xmit_worker = xmit_worker; + /* Export functions for switch driver use */ + tagger_data = &priv->data; + tagger_data->rxtstamp_get_state = sja1105_rxtstamp_get_state; + tagger_data->rxtstamp_set_state = sja1105_rxtstamp_set_state; + ds->tagger_data = priv; + + return 0; +} + static const struct dsa_device_ops sja1105_netdev_ops = { .name = "sja1105", .proto = DSA_TAG_PROTO_SJA1105, .xmit = sja1105_xmit, .rcv = sja1105_rcv, + .connect = sja1105_connect, + .disconnect = sja1105_disconnect, .needed_headroom = VLAN_HLEN, .flow_dissect = sja1105_flow_dissect, .promisc_on_master = true, @@ -737,6 +801,8 @@ static const struct dsa_device_ops sja1110_netdev_ops = { .proto = DSA_TAG_PROTO_SJA1110, .xmit = sja1110_xmit, .rcv = sja1110_rcv, + .connect = sja1105_connect, + .disconnect = sja1105_disconnect, .flow_dissect = sja1110_flow_dissect, .needed_headroom = SJA1110_HEADER_LEN + VLAN_HLEN, .needed_tailroom = SJA1110_RX_TRAILER_LEN + SJA1110_MAX_PADDING_LEN, diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index c7d9e08107cb..ebcc812735a4 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -436,11 +436,10 @@ struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb) type = eh->h_proto; - rcu_read_lock(); ptype = gro_find_receive_by_type(type); if (ptype == NULL) { flush = 1; - goto out_unlock; + goto out; } skb_gro_pull(skb, sizeof(*eh)); @@ -450,8 +449,6 @@ struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb) ipv6_gro_receive, inet_gro_receive, head, skb); -out_unlock: - rcu_read_unlock(); out: skb_gro_flush_final(skb, pp, flush); @@ -469,14 +466,12 @@ int eth_gro_complete(struct sk_buff *skb, int nhoff) if (skb->encapsulation) skb_set_inner_mac_header(skb, nhoff); - rcu_read_lock(); ptype = gro_find_complete_by_type(type); if (ptype != NULL) err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete, ipv6_gro_complete, inet_gro_complete, skb, nhoff + sizeof(*eh)); - rcu_read_unlock(); return err; } EXPORT_SYMBOL(eth_gro_complete); diff --git a/net/ethtool/cabletest.c b/net/ethtool/cabletest.c index 63560bbb7d1f..920aac02fe39 100644 --- a/net/ethtool/cabletest.c +++ b/net/ethtool/cabletest.c @@ -96,7 +96,7 @@ int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info) out_rtnl: rtnl_unlock(); out_dev_put: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } @@ -353,7 +353,7 @@ int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info) out_rtnl: rtnl_unlock(); out_dev_put: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/channels.c b/net/ethtool/channels.c index 6a070dc8e4b0..403158862011 100644 --- a/net/ethtool/channels.c +++ b/net/ethtool/channels.c @@ -219,6 +219,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/coalesce.c b/net/ethtool/coalesce.c index 46776ea42a92..487bdf345541 100644 --- a/net/ethtool/coalesce.c +++ b/net/ethtool/coalesce.c @@ -336,6 +336,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/common.c b/net/ethtool/common.c index c63e0739dc6a..0c5210015911 100644 --- a/net/ethtool/common.c +++ b/net/ethtool/common.c @@ -89,6 +89,7 @@ tunable_strings[__ETHTOOL_TUNABLE_COUNT][ETH_GSTRING_LEN] = { [ETHTOOL_RX_COPYBREAK] = "rx-copybreak", [ETHTOOL_TX_COPYBREAK] = "tx-copybreak", [ETHTOOL_PFC_PREVENTION_TOUT] = "pfc-prevention-tout", + [ETHTOOL_TX_COPYBREAK_BUF_SIZE] = "tx-copybreak-buf-size", }; const char diff --git a/net/ethtool/debug.c b/net/ethtool/debug.c index f99912d7957e..d73888c7d19c 100644 --- a/net/ethtool/debug.c +++ b/net/ethtool/debug.c @@ -123,6 +123,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/eee.c b/net/ethtool/eee.c index e10bfcc07853..45c42b2d5f17 100644 --- a/net/ethtool/eee.c +++ b/net/ethtool/eee.c @@ -185,6 +185,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/features.c b/net/ethtool/features.c index 1c9f4df273bd..55d449a2d3fc 100644 --- a/net/ethtool/features.c +++ b/net/ethtool/features.c @@ -136,7 +136,6 @@ static void ethnl_features_to_bitmap(unsigned long *dest, netdev_features_t val) const unsigned int words = BITS_TO_LONGS(NETDEV_FEATURE_COUNT); unsigned int i; - bitmap_zero(dest, NETDEV_FEATURE_COUNT); for (i = 0; i < words; i++) dest[i] = (unsigned long)(val >> (i * BITS_PER_LONG)); } @@ -284,6 +283,6 @@ int ethnl_set_features(struct sk_buff *skb, struct genl_info *info) out_rtnl: rtnl_unlock(); - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/fec.c b/net/ethtool/fec.c index 8738dafd5417..9f5a134e2e01 100644 --- a/net/ethtool/fec.c +++ b/net/ethtool/fec.c @@ -305,6 +305,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c index 20bcf86970ff..9a113d893521 100644 --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c @@ -734,6 +734,9 @@ ethtool_get_drvinfo(struct net_device *dev, struct ethtool_devlink_compat *rsp) sizeof(rsp->info.bus_info)); strlcpy(rsp->info.driver, dev->dev.parent->driver->name, sizeof(rsp->info.driver)); + } else if (dev->rtnl_link_ops) { + strlcpy(rsp->info.driver, dev->rtnl_link_ops->kind, + sizeof(rsp->info.driver)); } else { return -EOPNOTSUPP; } @@ -1743,11 +1746,13 @@ static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr) { struct ethtool_ringparam ringparam = { .cmd = ETHTOOL_GRINGPARAM }; + struct kernel_ethtool_ringparam kernel_ringparam = {}; if (!dev->ethtool_ops->get_ringparam) return -EOPNOTSUPP; - dev->ethtool_ops->get_ringparam(dev, &ringparam); + dev->ethtool_ops->get_ringparam(dev, &ringparam, + &kernel_ringparam, NULL); if (copy_to_user(useraddr, &ringparam, sizeof(ringparam))) return -EFAULT; @@ -1757,6 +1762,7 @@ static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr) static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr) { struct ethtool_ringparam ringparam, max = { .cmd = ETHTOOL_GRINGPARAM }; + struct kernel_ethtool_ringparam kernel_ringparam; int ret; if (!dev->ethtool_ops->set_ringparam || !dev->ethtool_ops->get_ringparam) @@ -1765,7 +1771,7 @@ static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr) if (copy_from_user(&ringparam, useraddr, sizeof(ringparam))) return -EFAULT; - dev->ethtool_ops->get_ringparam(dev, &max); + dev->ethtool_ops->get_ringparam(dev, &max, &kernel_ringparam, NULL); /* ensure new ring parameters are within the maximums */ if (ringparam.rx_pending > max.rx_max_pending || @@ -1774,7 +1780,8 @@ static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr) ringparam.tx_pending > max.tx_max_pending) return -EINVAL; - ret = dev->ethtool_ops->set_ringparam(dev, &ringparam); + ret = dev->ethtool_ops->set_ringparam(dev, &ringparam, + &kernel_ringparam, NULL); if (!ret) ethtool_notify(dev, ETHTOOL_MSG_RINGS_NTF, NULL); return ret; @@ -1982,6 +1989,7 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) struct ethtool_value id; static bool busy; const struct ethtool_ops *ops = dev->ethtool_ops; + netdevice_tracker dev_tracker; int rc; if (!ops->set_phys_id) @@ -2001,7 +2009,7 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) * removal of the device. */ busy = true; - dev_hold(dev); + dev_hold_track(dev, &dev_tracker, GFP_KERNEL); rtnl_unlock(); if (rc == 0) { @@ -2025,7 +2033,7 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) } rtnl_lock(); - dev_put(dev); + dev_put_track(dev, &dev_tracker); busy = false; (void) ops->set_phys_id(dev, ETHTOOL_ID_INACTIVE); @@ -2396,6 +2404,7 @@ static int ethtool_tunable_valid(const struct ethtool_tunable *tuna) switch (tuna->id) { case ETHTOOL_RX_COPYBREAK: case ETHTOOL_TX_COPYBREAK: + case ETHTOOL_TX_COPYBREAK_BUF_SIZE: if (tuna->len != sizeof(u32) || tuna->type_id != ETHTOOL_TUNABLE_U32) return -EINVAL; diff --git a/net/ethtool/linkinfo.c b/net/ethtool/linkinfo.c index b91839870efc..efa0f7f48836 100644 --- a/net/ethtool/linkinfo.c +++ b/net/ethtool/linkinfo.c @@ -149,6 +149,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/linkmodes.c b/net/ethtool/linkmodes.c index f9eda596f301..99b29b4fe947 100644 --- a/net/ethtool/linkmodes.c +++ b/net/ethtool/linkmodes.c @@ -358,6 +358,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/module.c b/net/ethtool/module.c index bc2cef11bbda..898ed436b9e4 100644 --- a/net/ethtool/module.c +++ b/net/ethtool/module.c @@ -175,6 +175,6 @@ out_ops: ethnl_ops_complete(dev); out_rtnl: rtnl_unlock(); - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c index 96f4180aabd2..f09c62302a9a 100644 --- a/net/ethtool/netlink.c +++ b/net/ethtool/netlink.c @@ -142,6 +142,8 @@ int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info, } req_info->dev = dev; + if (dev) + netdev_tracker_alloc(dev, &req_info->dev_tracker, GFP_KERNEL); req_info->flags = flags; return 0; } @@ -400,7 +402,7 @@ static int ethnl_default_doit(struct sk_buff *skb, struct genl_info *info) ops->cleanup_data(reply_data); genlmsg_end(rskb, reply_payload); - dev_put(req_info->dev); + dev_put_track(req_info->dev, &req_info->dev_tracker); kfree(reply_data); kfree(req_info); return genlmsg_reply(rskb, info); @@ -412,7 +414,7 @@ err_cleanup: if (ops->cleanup_data) ops->cleanup_data(reply_data); err_dev: - dev_put(req_info->dev); + dev_put_track(req_info->dev, &req_info->dev_tracker); kfree(reply_data); kfree(req_info); return ret; @@ -548,7 +550,7 @@ static int ethnl_default_start(struct netlink_callback *cb) * same parser as for non-dump (doit) requests is used, it * would take reference to the device if it finds one */ - dev_put(req_info->dev); + dev_put_track(req_info->dev, &req_info->dev_tracker); req_info->dev = NULL; } if (ret < 0) @@ -625,6 +627,7 @@ static void ethnl_default_notify(struct net_device *dev, unsigned int cmd, } req_info->dev = dev; + netdev_tracker_alloc(dev, &req_info->dev_tracker, GFP_KERNEL); req_info->flags |= ETHTOOL_FLAG_COMPACT_BITSETS; ethnl_init_reply_data(reply_data, ops, dev); diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h index 836ee7157848..75856db299e9 100644 --- a/net/ethtool/netlink.h +++ b/net/ethtool/netlink.h @@ -222,6 +222,7 @@ static inline unsigned int ethnl_reply_header_size(void) /** * struct ethnl_req_info - base type of request information for GET requests * @dev: network device the request is for (may be null) + * @dev_tracker: refcount tracker for @dev reference * @flags: request flags common for all request types * * This is a common base for request specific structures holding data from @@ -230,9 +231,15 @@ static inline unsigned int ethnl_reply_header_size(void) */ struct ethnl_req_info { struct net_device *dev; + netdevice_tracker dev_tracker; u32 flags; }; +static inline void ethnl_parse_header_dev_put(struct ethnl_req_info *req_info) +{ + dev_put_track(req_info->dev, &req_info->dev_tracker); +} + /** * struct ethnl_reply_data - base type of reply data for GET requests * @dev: device for current reply message; in single shot requests it is @@ -356,7 +363,7 @@ extern const struct nla_policy ethnl_features_set_policy[ETHTOOL_A_FEATURES_WANT extern const struct nla_policy ethnl_privflags_get_policy[ETHTOOL_A_PRIVFLAGS_HEADER + 1]; extern const struct nla_policy ethnl_privflags_set_policy[ETHTOOL_A_PRIVFLAGS_FLAGS + 1]; extern const struct nla_policy ethnl_rings_get_policy[ETHTOOL_A_RINGS_HEADER + 1]; -extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_TX + 1]; +extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_RX_BUF_LEN + 1]; extern const struct nla_policy ethnl_channels_get_policy[ETHTOOL_A_CHANNELS_HEADER + 1]; extern const struct nla_policy ethnl_channels_set_policy[ETHTOOL_A_CHANNELS_COMBINED_COUNT + 1]; extern const struct nla_policy ethnl_coalesce_get_policy[ETHTOOL_A_COALESCE_HEADER + 1]; diff --git a/net/ethtool/pause.c b/net/ethtool/pause.c index ee1e5806bc93..a8c113d244db 100644 --- a/net/ethtool/pause.c +++ b/net/ethtool/pause.c @@ -181,6 +181,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/privflags.c b/net/ethtool/privflags.c index fc9f3be23a19..4c7bfa81e4ab 100644 --- a/net/ethtool/privflags.c +++ b/net/ethtool/privflags.c @@ -196,6 +196,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/rings.c b/net/ethtool/rings.c index 4e097812a967..c1d5f5e0fdc9 100644 --- a/net/ethtool/rings.c +++ b/net/ethtool/rings.c @@ -10,6 +10,7 @@ struct rings_req_info { struct rings_reply_data { struct ethnl_reply_data base; struct ethtool_ringparam ringparam; + struct kernel_ethtool_ringparam kernel_ringparam; }; #define RINGS_REPDATA(__reply_base) \ @@ -25,6 +26,7 @@ static int rings_prepare_data(const struct ethnl_req_info *req_base, struct genl_info *info) { struct rings_reply_data *data = RINGS_REPDATA(reply_base); + struct netlink_ext_ack *extack = info ? info->extack : NULL; struct net_device *dev = reply_base->dev; int ret; @@ -33,7 +35,8 @@ static int rings_prepare_data(const struct ethnl_req_info *req_base, ret = ethnl_ops_begin(dev); if (ret < 0) return ret; - dev->ethtool_ops->get_ringparam(dev, &data->ringparam); + dev->ethtool_ops->get_ringparam(dev, &data->ringparam, + &data->kernel_ringparam, extack); ethnl_ops_complete(dev); return 0; @@ -49,7 +52,8 @@ static int rings_reply_size(const struct ethnl_req_info *req_base, nla_total_size(sizeof(u32)) + /* _RINGS_RX */ nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI */ nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO */ - nla_total_size(sizeof(u32)); /* _RINGS_TX */ + nla_total_size(sizeof(u32)) + /* _RINGS_TX */ + nla_total_size(sizeof(u32)); /* _RINGS_RX_BUF_LEN */ } static int rings_fill_reply(struct sk_buff *skb, @@ -57,6 +61,7 @@ static int rings_fill_reply(struct sk_buff *skb, const struct ethnl_reply_data *reply_base) { const struct rings_reply_data *data = RINGS_REPDATA(reply_base); + const struct kernel_ethtool_ringparam *kernel_ringparam = &data->kernel_ringparam; const struct ethtool_ringparam *ringparam = &data->ringparam; if ((ringparam->rx_max_pending && @@ -78,7 +83,10 @@ static int rings_fill_reply(struct sk_buff *skb, (nla_put_u32(skb, ETHTOOL_A_RINGS_TX_MAX, ringparam->tx_max_pending) || nla_put_u32(skb, ETHTOOL_A_RINGS_TX, - ringparam->tx_pending)))) + ringparam->tx_pending))) || + (kernel_ringparam->rx_buf_len && + (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_BUF_LEN, + kernel_ringparam->rx_buf_len)))) return -EMSGSIZE; return 0; @@ -105,10 +113,12 @@ const struct nla_policy ethnl_rings_set_policy[] = { [ETHTOOL_A_RINGS_RX_MINI] = { .type = NLA_U32 }, [ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_U32 }, [ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 }, + [ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1), }; int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) { + struct kernel_ethtool_ringparam kernel_ringparam = {}; struct ethtool_ringparam ringparam = {}; struct ethnl_req_info req_info = {}; struct nlattr **tb = info->attrs; @@ -134,7 +144,7 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) ret = ethnl_ops_begin(dev); if (ret < 0) goto out_rtnl; - ops->get_ringparam(dev, &ringparam); + ops->get_ringparam(dev, &ringparam, &kernel_ringparam, info->extack); ethnl_update_u32(&ringparam.rx_pending, tb[ETHTOOL_A_RINGS_RX], &mod); ethnl_update_u32(&ringparam.rx_mini_pending, @@ -142,6 +152,8 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) ethnl_update_u32(&ringparam.rx_jumbo_pending, tb[ETHTOOL_A_RINGS_RX_JUMBO], &mod); ethnl_update_u32(&ringparam.tx_pending, tb[ETHTOOL_A_RINGS_TX], &mod); + ethnl_update_u32(&kernel_ringparam.rx_buf_len, + tb[ETHTOOL_A_RINGS_RX_BUF_LEN], &mod); ret = 0; if (!mod) goto out_ops; @@ -164,7 +176,17 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) goto out_ops; } - ret = dev->ethtool_ops->set_ringparam(dev, &ringparam); + if (kernel_ringparam.rx_buf_len != 0 && + !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_BUF_LEN)) { + ret = -EOPNOTSUPP; + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_RINGS_RX_BUF_LEN], + "setting rx buf len not supported"); + goto out_ops; + } + + ret = dev->ethtool_ops->set_ringparam(dev, &ringparam, + &kernel_ringparam, info->extack); if (ret < 0) goto out_ops; ethtool_notify(dev, ETHTOOL_MSG_RINGS_NTF, NULL); @@ -174,6 +196,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/stats.c b/net/ethtool/stats.c index ec07f5765e03..a20e0a24ff61 100644 --- a/net/ethtool/stats.c +++ b/net/ethtool/stats.c @@ -14,10 +14,12 @@ struct stats_req_info { struct stats_reply_data { struct ethnl_reply_data base; - struct ethtool_eth_phy_stats phy_stats; - struct ethtool_eth_mac_stats mac_stats; - struct ethtool_eth_ctrl_stats ctrl_stats; - struct ethtool_rmon_stats rmon_stats; + struct_group(stats, + struct ethtool_eth_phy_stats phy_stats; + struct ethtool_eth_mac_stats mac_stats; + struct ethtool_eth_ctrl_stats ctrl_stats; + struct ethtool_rmon_stats rmon_stats; + ); const struct ethtool_rmon_hist_range *rmon_ranges; }; @@ -117,10 +119,7 @@ static int stats_prepare_data(const struct ethnl_req_info *req_base, /* Mark all stats as unset (see ETHTOOL_STAT_NOT_SET) to prevent them * from being reported to user space in case driver did not set them. */ - memset(&data->phy_stats, 0xff, sizeof(data->phy_stats)); - memset(&data->mac_stats, 0xff, sizeof(data->mac_stats)); - memset(&data->ctrl_stats, 0xff, sizeof(data->ctrl_stats)); - memset(&data->rmon_stats, 0xff, sizeof(data->rmon_stats)); + memset(&data->stats, 0xff, sizeof(data->stats)); if (test_bit(ETHTOOL_STATS_ETH_PHY, req_info->stat_mask) && dev->ethtool_ops->get_eth_phy_stats) diff --git a/net/ethtool/tunnels.c b/net/ethtool/tunnels.c index e7f2ee0d2471..efde33536687 100644 --- a/net/ethtool/tunnels.c +++ b/net/ethtool/tunnels.c @@ -195,7 +195,7 @@ int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info) if (ret) goto err_free_msg; rtnl_unlock(); - dev_put(req_info.dev); + ethnl_parse_header_dev_put(&req_info); genlmsg_end(rskb, reply_payload); return genlmsg_reply(rskb, info); @@ -204,7 +204,7 @@ err_free_msg: nlmsg_free(rskb); err_unlock_rtnl: rtnl_unlock(); - dev_put(req_info.dev); + ethnl_parse_header_dev_put(&req_info); return ret; } @@ -230,7 +230,7 @@ int ethnl_tunnel_info_start(struct netlink_callback *cb) sock_net(cb->skb->sk), cb->extack, false); if (ctx->req_info.dev) { - dev_put(ctx->req_info.dev); + ethnl_parse_header_dev_put(&ctx->req_info); ctx->req_info.dev = NULL; } diff --git a/net/ethtool/wol.c b/net/ethtool/wol.c index ada7df2331d2..88f435e76481 100644 --- a/net/ethtool/wol.c +++ b/net/ethtool/wol.c @@ -165,6 +165,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 737e4f17e1c6..e57fdad9ef94 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -30,13 +30,13 @@ static bool is_slave_up(struct net_device *dev) static void __hsr_set_operstate(struct net_device *dev, int transition) { - write_lock_bh(&dev_base_lock); + write_lock(&dev_base_lock); if (dev->operstate != transition) { dev->operstate = transition; - write_unlock_bh(&dev_base_lock); + write_unlock(&dev_base_lock); netdev_state_change(dev); } else { - write_unlock_bh(&dev_base_lock); + write_unlock(&dev_base_lock); } } diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index 7bb9ef35c570..3b2366a88c3c 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -174,8 +174,8 @@ static int raw_hash(struct sock *sk) { write_lock_bh(&raw_lock); sk_add_node(sk, &raw_head); - sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); write_unlock_bh(&raw_lock); + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); return 0; } @@ -453,8 +453,8 @@ static int dgram_hash(struct sock *sk) { write_lock_bh(&dgram_lock); sk_add_node(sk, &dgram_head); - sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); write_unlock_bh(&dgram_lock); + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); return 0; } diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 6b5956500436..5d18d32557d2 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -99,6 +99,7 @@ #include <net/route.h> #include <net/ip_fib.h> #include <net/inet_connection_sock.h> +#include <net/gro.h> #include <net/tcp.h> #include <net/udp.h> #include <net/udplite.h> @@ -224,7 +225,7 @@ int inet_listen(struct socket *sock, int backlog) tcp_fastopen_init_key_once(sock_net(sk)); } - err = inet_csk_listen_start(sk, backlog); + err = inet_csk_listen_start(sk); if (err) goto out; tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_LISTEN_CB, 0, NULL); @@ -488,11 +489,8 @@ int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, * is temporarily down) */ err = -EADDRNOTAVAIL; - if (!inet_can_nonlocal_bind(net, inet) && - addr->sin_addr.s_addr != htonl(INADDR_ANY) && - chk_addr_ret != RTN_LOCAL && - chk_addr_ret != RTN_MULTICAST && - chk_addr_ret != RTN_BROADCAST) + if (!inet_addr_valid_or_nonlocal(net, inet, addr->sin_addr.s_addr, + chk_addr_ret)) goto out; snum = ntohs(addr->sin_port); @@ -1454,19 +1452,18 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb) proto = iph->protocol; - rcu_read_lock(); ops = rcu_dereference(inet_offloads[proto]); if (!ops || !ops->callbacks.gro_receive) - goto out_unlock; + goto out; if (*(u8 *)iph != 0x45) - goto out_unlock; + goto out; if (ip_is_fragment(iph)) - goto out_unlock; + goto out; if (unlikely(ip_fast_csum((u8 *)iph, 5))) - goto out_unlock; + goto out; id = ntohl(*(__be32 *)&iph->id); flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF)); @@ -1543,9 +1540,6 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb) pp = indirect_call_gro_receive(tcp4_gro_receive, udp4_gro_receive, ops->callbacks.gro_receive, head, skb); -out_unlock: - rcu_read_unlock(); - out: skb_gro_flush_final(skb, pp, flush); @@ -1618,10 +1612,9 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff) csum_replace2(&iph->check, iph->tot_len, newlen); iph->tot_len = newlen; - rcu_read_lock(); ops = rcu_dereference(inet_offloads[proto]); if (WARN_ON(!ops || !ops->callbacks.gro_complete)) - goto out_unlock; + goto out; /* Only need to add sizeof(*iph) to get to the next hdr below * because any hdr with option will have been flushed in @@ -1631,9 +1624,7 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff) tcp4_gro_complete, udp4_gro_complete, skb, nhoff + sizeof(*iph)); -out_unlock: - rcu_read_unlock(); - +out: return err; } diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 857a144b1ea9..4db0325f6e1a 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -1299,21 +1299,6 @@ static struct packet_type arp_packet_type __read_mostly = { .func = arp_rcv, }; -static int arp_proc_init(void); - -void __init arp_init(void) -{ - neigh_table_init(NEIGH_ARP_TABLE, &arp_tbl); - - dev_add_pack(&arp_packet_type); - arp_proc_init(); -#ifdef CONFIG_SYSCTL - neigh_sysctl_register(NULL, &arp_tbl.parms, NULL); -#endif - register_netdevice_notifier(&arp_netdev_notifier); -} - -#ifdef CONFIG_PROC_FS #if IS_ENABLED(CONFIG_AX25) /* ------------------------------------------------------------------------ */ @@ -1451,16 +1436,14 @@ static struct pernet_operations arp_net_ops = { .exit = arp_net_exit, }; -static int __init arp_proc_init(void) +void __init arp_init(void) { - return register_pernet_subsys(&arp_net_ops); -} - -#else /* CONFIG_PROC_FS */ + neigh_table_init(NEIGH_ARP_TABLE, &arp_tbl); -static int __init arp_proc_init(void) -{ - return 0; + dev_add_pack(&arp_packet_type); + register_pernet_subsys(&arp_net_ops); +#ifdef CONFIG_SYSCTL + neigh_sysctl_register(NULL, &arp_tbl.parms, NULL); +#endif + register_netdevice_notifier(&arp_netdev_notifier); } - -#endif /* CONFIG_PROC_FS */ diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c index 4bb9401b0a3f..de610cb83694 100644 --- a/net/ipv4/bpf_tcp_ca.c +++ b/net/ipv4/bpf_tcp_ca.c @@ -169,7 +169,7 @@ static u32 prog_ops_moff(const struct bpf_prog *prog) t = bpf_tcp_congestion_ops.type; m = &btf_type_member(t)[midx]; - return btf_member_bit_offset(t, m) / 8; + return __btf_member_bit_offset(t, m) / 8; } static const struct bpf_func_proto * @@ -246,7 +246,7 @@ static int bpf_tcp_ca_init_member(const struct btf_type *t, utcp_ca = (const struct tcp_congestion_ops *)udata; tcp_ca = (struct tcp_congestion_ops *)kdata; - moff = btf_member_bit_offset(t, member) / 8; + moff = __btf_member_bit_offset(t, member) / 8; switch (moff) { case offsetof(struct tcp_congestion_ops, flags): if (utcp_ca->flags & ~TCP_CONG_MASK) @@ -276,7 +276,7 @@ static int bpf_tcp_ca_init_member(const struct btf_type *t, static int bpf_tcp_ca_check_member(const struct btf_type *t, const struct btf_member *member) { - if (is_unsupported(btf_member_bit_offset(t, member) / 8)) + if (is_unsupported(__btf_member_bit_offset(t, member) / 8)) return -ENOTSUPP; return 0; } diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 323e622ff9b7..fba2bffd65f7 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -243,7 +243,7 @@ void in_dev_finish_destroy(struct in_device *idev) #ifdef NET_REFCNT_DEBUG pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL"); #endif - dev_put(dev); + dev_put_track(dev, &idev->dev_tracker); if (!idev->dead) pr_err("Freeing alive in_device %p\n", idev); else @@ -271,7 +271,7 @@ static struct in_device *inetdev_init(struct net_device *dev) if (IPV4_DEVCONF(in_dev->cnf, FORWARDING)) dev_disable_lro(dev); /* Reference in_dev->dev */ - dev_hold(dev); + dev_hold_track(dev, &in_dev->dev_tracker, GFP_KERNEL); /* Account for reference dev->ip_ptr (below) */ refcount_set(&in_dev->refcnt, 1); diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index 8e4e9aa12130..d87f02a6e934 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c @@ -16,6 +16,7 @@ #include <crypto/authenc.h> #include <linux/err.h> #include <linux/module.h> +#include <net/gro.h> #include <net/ip.h> #include <net/xfrm.h> #include <net/esp.h> diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index d279cb8ac158..e0b6c8b6de57 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -216,11 +216,6 @@ static struct fib_table *fib_empty_table(struct net *net) return NULL; } -static const struct nla_policy fib4_rule_policy[FRA_MAX+1] = { - FRA_GENERIC_POLICY, - [FRA_FLOW] = { .type = NLA_U32 }, -}; - static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb, struct fib_rule_hdr *frh, struct nlattr **tb, @@ -386,7 +381,6 @@ static const struct fib_rules_ops __net_initconst fib4_rules_ops_template = { .nlmsg_payload = fib4_rule_nlmsg_payload, .flush_cache = fib4_rule_flush_cache, .nlgroup = RTNLGRP_IPV4_RULE, - .policy = fib4_rule_policy, .owner = THIS_MODULE, }; diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index fde7797b5806..3cad543dc747 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -208,7 +208,7 @@ static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp) void fib_nh_common_release(struct fib_nh_common *nhc) { - dev_put(nhc->nhc_dev); + dev_put_track(nhc->nhc_dev, &nhc->nhc_dev_tracker); lwtstate_put(nhc->nhc_lwtstate); rt_fibinfo_free_cpus(nhc->nhc_pcpu_rth_output); rt_fibinfo_free(&nhc->nhc_rth_input); @@ -1006,7 +1006,7 @@ static int fib_check_nh_v6_gw(struct net *net, struct fib_nh *nh, err = ipv6_stub->fib6_nh_init(net, &fib6_nh, &cfg, GFP_KERNEL, extack); if (!err) { nh->fib_nh_dev = fib6_nh.fib_nh_dev; - dev_hold(nh->fib_nh_dev); + dev_hold_track(nh->fib_nh_dev, &nh->fib_nh_dev_tracker, GFP_KERNEL); nh->fib_nh_oif = nh->fib_nh_dev->ifindex; nh->fib_nh_scope = RT_SCOPE_LINK; @@ -1090,7 +1090,7 @@ static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table, if (!netif_carrier_ok(dev)) nh->fib_nh_flags |= RTNH_F_LINKDOWN; nh->fib_nh_dev = dev; - dev_hold(dev); + dev_hold_track(dev, &nh->fib_nh_dev_tracker, GFP_ATOMIC); nh->fib_nh_scope = RT_SCOPE_LINK; return 0; } @@ -1144,7 +1144,7 @@ static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table, "No egress device for nexthop gateway"); goto out; } - dev_hold(dev); + dev_hold_track(dev, &nh->fib_nh_dev_tracker, GFP_ATOMIC); if (!netif_carrier_ok(dev)) nh->fib_nh_flags |= RTNH_F_LINKDOWN; err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN; @@ -1178,7 +1178,7 @@ static int fib_check_nh_nongw(struct net *net, struct fib_nh *nh, } nh->fib_nh_dev = in_dev->dev; - dev_hold(nh->fib_nh_dev); + dev_hold_track(nh->fib_nh_dev, &nh->fib_nh_dev_tracker, GFP_ATOMIC); nh->fib_nh_scope = RT_SCOPE_HOST; if (!netif_carrier_ok(nh->fib_nh_dev)) nh->fib_nh_flags |= RTNH_F_LINKDOWN; @@ -1508,6 +1508,8 @@ struct fib_info *fib_create_info(struct fib_config *cfg, err = -ENODEV; if (!nh->fib_nh_dev) goto failure; + netdev_tracker_alloc(nh->fib_nh_dev, &nh->fib_nh_dev_tracker, + GFP_KERNEL); } else { int linkdown = 0; diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 8fcbc6258ec5..0d085cc8d96c 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -9,6 +9,7 @@ #include <linux/types.h> #include <linux/kernel.h> #include <net/genetlink.h> +#include <net/gro.h> #include <net/gue.h> #include <net/fou.h> #include <net/ip.h> @@ -246,17 +247,14 @@ static struct sk_buff *fou_gro_receive(struct sock *sk, /* Flag this frame as already having an outer encap header */ NAPI_GRO_CB(skb)->is_fou = 1; - rcu_read_lock(); offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; ops = rcu_dereference(offloads[proto]); if (!ops || !ops->callbacks.gro_receive) - goto out_unlock; + goto out; pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); -out_unlock: - rcu_read_unlock(); - +out: return pp; } @@ -268,19 +266,16 @@ static int fou_gro_complete(struct sock *sk, struct sk_buff *skb, const struct net_offload *ops; int err = -ENOSYS; - rcu_read_lock(); offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; ops = rcu_dereference(offloads[proto]); if (WARN_ON(!ops || !ops->callbacks.gro_complete)) - goto out_unlock; + goto out; err = ops->callbacks.gro_complete(skb, nhoff); skb_set_inner_mac_header(skb, nhoff); -out_unlock: - rcu_read_unlock(); - +out: return err; } @@ -438,17 +433,14 @@ next_proto: /* Flag this frame as already having an outer encap header */ NAPI_GRO_CB(skb)->is_fou = 1; - rcu_read_lock(); offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; ops = rcu_dereference(offloads[proto]); if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive)) - goto out_unlock; + goto out; pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); flush = 0; -out_unlock: - rcu_read_unlock(); out: skb_gro_flush_final_remcsum(skb, pp, flush, &grc); @@ -485,18 +477,16 @@ static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) return err; } - rcu_read_lock(); offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; ops = rcu_dereference(offloads[proto]); if (WARN_ON(!ops || !ops->callbacks.gro_complete)) - goto out_unlock; + goto out; err = ops->callbacks.gro_complete(skb, nhoff + guehlen); skb_set_inner_mac_header(skb, nhoff + guehlen); -out_unlock: - rcu_read_unlock(); +out: return err; } diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 1121a9d5fed9..07073fa35205 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -10,6 +10,7 @@ #include <linux/init.h> #include <net/protocol.h> #include <net/gre.h> +#include <net/gro.h> static struct sk_buff *gre_gso_segment(struct sk_buff *skb, netdev_features_t features) @@ -162,10 +163,9 @@ static struct sk_buff *gre_gro_receive(struct list_head *head, type = greh->protocol; - rcu_read_lock(); ptype = gro_find_receive_by_type(type); if (!ptype) - goto out_unlock; + goto out; grehlen = GRE_HEADER_SECTION; @@ -179,13 +179,13 @@ static struct sk_buff *gre_gro_receive(struct list_head *head, if (skb_gro_header_hard(skb, hlen)) { greh = skb_gro_header_slow(skb, hlen, off); if (unlikely(!greh)) - goto out_unlock; + goto out; } /* Don't bother verifying checksum if we're going to flush anyway. */ if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) { if (skb_gro_checksum_simple_validate(skb)) - goto out_unlock; + goto out; skb_gro_checksum_try_convert(skb, IPPROTO_GRE, null_compute_pseudo); @@ -229,8 +229,6 @@ static struct sk_buff *gre_gro_receive(struct list_head *head, pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); flush = 0; -out_unlock: - rcu_read_unlock(); out: skb_gro_flush_final(skb, pp, flush); @@ -255,13 +253,10 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff) if (greh->flags & GRE_CSUM) grehlen += GRE_HEADER_SECTION; - rcu_read_lock(); ptype = gro_find_complete_by_type(type); if (ptype) err = ptype->callbacks.gro_complete(skb, nhoff + grehlen); - rcu_read_unlock(); - skb_set_inner_mac_header(skb, nhoff + grehlen); return err; diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index d2e2b3d18c66..2ad3c7b42d6d 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -2558,7 +2558,6 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, msf->imsf_fmode = pmc->sfmode; psl = rtnl_dereference(pmc->sflist); if (!psl) { - len = 0; count = 0; } else { count = psl->sl_count; diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 62a67fdc344c..fc2a985f6064 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -1035,7 +1035,7 @@ void inet_csk_prepare_forced_close(struct sock *sk) } EXPORT_SYMBOL(inet_csk_prepare_forced_close); -int inet_csk_listen_start(struct sock *sk, int backlog) +int inet_csk_listen_start(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); struct inet_sock *inet = inet_sk(sk); diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 75737267746f..30ab717ff1b8 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -307,7 +307,7 @@ static inline struct sock *inet_lookup_run_bpf(struct net *net, struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, __be32 saddr, __be16 sport, - __be32 daddr, u16 hnum) + __be32 daddr, u16 hnum, const int dif) { struct sock *sk, *reuse_sk; bool no_reuseport; @@ -315,8 +315,8 @@ static inline struct sock *inet_lookup_run_bpf(struct net *net, if (hashinfo != &tcp_hashinfo) return NULL; /* only TCP is supported */ - no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_TCP, - saddr, sport, daddr, hnum, &sk); + no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_TCP, saddr, sport, + daddr, hnum, dif, &sk); if (no_reuseport || IS_ERR_OR_NULL(sk)) return sk; @@ -340,7 +340,7 @@ struct sock *__inet_lookup_listener(struct net *net, /* Lookup redirect from BPF */ if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { result = inet_lookup_run_bpf(net, hashinfo, skb, doff, - saddr, sport, daddr, hnum); + saddr, sport, daddr, hnum, dif); if (result) goto done; } diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 9bca57ef8b83..57c1d8431386 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -672,7 +672,6 @@ struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state) struct sk_buff *skb2; struct iphdr *iph; - len = state->left; /* IF: it doesn't fit, use 'mtu' - the data space left */ if (len > state->mtu) len = state->mtu; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 38d29b175ca6..445a9ecaefa1 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -576,7 +576,7 @@ out: return err; } -static void __ip_sock_set_tos(struct sock *sk, int val) +void __ip_sock_set_tos(struct sock *sk, int val) { if (sk->sk_type == SOCK_STREAM) { val &= ~INET_ECN_MASK; diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 2dda856ca260..07274619b9ea 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -195,10 +195,6 @@ static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) return 1; } -static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = { - FRA_GENERIC_POLICY, -}; - static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb, struct fib_rule_hdr *frh, struct nlattr **tb, struct netlink_ext_ack *extack) @@ -231,7 +227,6 @@ static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = { .compare = ipmr_rule_compare, .fill = ipmr_rule_fill, .nlgroup = RTNLGRP_IPV4_RULE, - .policy = ipmr_rule_policy, .owner = THIS_MODULE, }; @@ -696,7 +691,7 @@ static int vif_delete(struct mr_table *mrt, int vifi, int notify, if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify) unregister_netdevice_queue(dev, head); - dev_put(dev); + dev_put_track(dev, &v->dev_tracker); return 0; } @@ -896,6 +891,7 @@ static int vif_add(struct net *net, struct mr_table *mrt, /* And finish update writing critical data */ write_lock_bh(&mrt_lock); v->dev = dev; + netdev_tracker_alloc(dev, &v->dev_tracker, GFP_ATOMIC); if (v->flags & VIFF_REGISTER) mrt->mroute_reg_vif_num = vifi; if (vifi+1 > mrt->maxvif) diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c index 5dbd4b5505eb..1319d093cdda 100644 --- a/net/ipv4/nexthop.c +++ b/net/ipv4/nexthop.c @@ -1918,9 +1918,6 @@ static void nh_rt_cache_flush(struct net *net, struct nexthop *nh, if (!replaced_nh->is_group) return; - /* new dsts must use only the new nexthop group */ - synchronize_net(); - nhg = rtnl_dereference(replaced_nh->nh_grp); for (i = 0; i < nhg->num_nh; i++) { struct nh_grp_entry *nhge = &nhg->nh_entries[i]; @@ -2002,9 +1999,10 @@ static int replace_nexthop_grp(struct net *net, struct nexthop *old, rcu_assign_pointer(old->nh_grp, newg); + /* Make sure concurrent readers are not using 'oldg' anymore. */ + synchronize_net(); + if (newg->resilient) { - /* Make sure concurrent readers are not using 'oldg' anymore. */ - synchronize_net(); rcu_assign_pointer(oldg->res_table, tmp_table); rcu_assign_pointer(oldg->spare->res_table, tmp_table); } diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 1e44a43acfe2..e540b0dcf085 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -311,15 +311,11 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n", sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port)); - if (addr->sin_addr.s_addr == htonl(INADDR_ANY)) - chk_addr_ret = RTN_LOCAL; - else - chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr); - - if ((!inet_can_nonlocal_bind(net, isk) && - chk_addr_ret != RTN_LOCAL) || - chk_addr_ret == RTN_MULTICAST || - chk_addr_ret == RTN_BROADCAST) + chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr); + + if (!inet_addr_valid_or_nonlocal(net, inet_sk(sk), + addr->sin_addr.s_addr, + chk_addr_ret)) return -EADDRNOTAVAIL; #if IS_ENABLED(CONFIG_IPV6) diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index bb446e60cf58..a53f256bf9d3 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -99,8 +99,8 @@ int raw_hash_sk(struct sock *sk) write_lock_bh(&h->lock); sk_add_node(sk, head); - sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); write_unlock_bh(&h->lock); + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); return 0; } @@ -717,6 +717,7 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct inet_sock *inet = inet_sk(sk); struct sockaddr_in *addr = (struct sockaddr_in *) uaddr; + struct net *net = sock_net(sk); u32 tb_id = RT_TABLE_LOCAL; int ret = -EINVAL; int chk_addr_ret; @@ -725,16 +726,16 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) goto out; if (sk->sk_bound_dev_if) - tb_id = l3mdev_fib_table_by_index(sock_net(sk), - sk->sk_bound_dev_if) ? : tb_id; + tb_id = l3mdev_fib_table_by_index(net, + sk->sk_bound_dev_if) ? : tb_id; - chk_addr_ret = inet_addr_type_table(sock_net(sk), addr->sin_addr.s_addr, - tb_id); + chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id); ret = -EADDRNOTAVAIL; - if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL && - chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) + if (!inet_addr_valid_or_nonlocal(net, inet, addr->sin_addr.s_addr, + chk_addr_ret)) goto out; + inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr; if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) inet->inet_saddr = 0; /* Use device */ diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 0b4103b1e622..843a7a3699fe 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -602,7 +602,7 @@ static void fnhe_remove_oldest(struct fnhe_hash_bucket *hash) static u32 fnhe_hashfun(__be32 daddr) { - static siphash_key_t fnhe_hash_key __read_mostly; + static siphash_aligned_key_t fnhe_hash_key; u64 hval; net_get_random_once(&fnhe_hash_key, sizeof(fnhe_hash_key)); @@ -1531,8 +1531,9 @@ void rt_flush_dev(struct net_device *dev) if (rt->dst.dev != dev) continue; rt->dst.dev = blackhole_netdev; - dev_hold(rt->dst.dev); - dev_put(dev); + dev_replace_track(dev, blackhole_netdev, + &rt->dst.dev_tracker, + GFP_ATOMIC); } spin_unlock_bh(&ul->lock); } @@ -2819,7 +2820,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or new->output = dst_discard_out; new->dev = net->loopback_dev; - dev_hold(new->dev); + dev_hold_track(new->dev, &new->dev_tracker, GFP_ATOMIC); rt->rt_is_input = ort->rt_is_input; rt->rt_iif = ort->rt_iif; diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 8696dc343ad2..2cb3b852d148 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -14,7 +14,7 @@ #include <net/tcp.h> #include <net/route.h> -static siphash_key_t syncookie_secret[2] __read_mostly; +static siphash_aligned_key_t syncookie_secret[2]; #define COOKIEBITS 24 /* Upper bits store count */ #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 2bb28bfd83bf..3b75836db19b 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -292,7 +292,7 @@ EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count); long sysctl_tcp_mem[3] __read_mostly; EXPORT_SYMBOL(sysctl_tcp_mem); -atomic_long_t tcp_memory_allocated; /* Current allocated memory. */ +atomic_long_t tcp_memory_allocated ____cacheline_aligned_in_smp; /* Current allocated memory. */ EXPORT_SYMBOL(tcp_memory_allocated); #if IS_ENABLED(CONFIG_SMC) @@ -303,7 +303,7 @@ EXPORT_SYMBOL(tcp_have_smc); /* * Current number of TCP sockets. */ -struct percpu_counter tcp_sockets_allocated; +struct percpu_counter tcp_sockets_allocated ____cacheline_aligned_in_smp; EXPORT_SYMBOL(tcp_sockets_allocated); /* @@ -456,7 +456,6 @@ void tcp_init_sock(struct sock *sk) WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]); sk_sockets_allocated_inc(sk); - sk->sk_route_forced_caps = NETIF_F_GSO; } EXPORT_SYMBOL(tcp_init_sock); @@ -546,10 +545,11 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) if (state != TCP_SYN_SENT && (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) { int target = sock_rcvlowat(sk, 0, INT_MAX); + u16 urg_data = READ_ONCE(tp->urg_data); - if (READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) && - !sock_flag(sk, SOCK_URGINLINE) && - tp->urg_data) + if (unlikely(urg_data) && + READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) && + !sock_flag(sk, SOCK_URGINLINE)) target++; if (tcp_stream_is_readable(sk, target)) @@ -574,7 +574,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) } else mask |= EPOLLOUT | EPOLLWRNORM; - if (tp->urg_data & TCP_URG_VALID) + if (urg_data & TCP_URG_VALID) mask |= EPOLLPRI; } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) { /* Active TCP fastopen socket with defer_connect @@ -608,7 +608,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) unlock_sock_fast(sk, slow); break; case SIOCATMARK: - answ = tp->urg_data && + answ = READ_ONCE(tp->urg_data) && READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq); break; case SIOCOUTQ: @@ -1466,7 +1466,7 @@ static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) char c = tp->urg_data; if (!(flags & MSG_PEEK)) - tp->urg_data = TCP_URG_READ; + WRITE_ONCE(tp->urg_data, TCP_URG_READ); /* Read urgent data. */ msg->msg_flags |= MSG_OOB; @@ -1580,6 +1580,36 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied) tcp_send_ack(sk); } +void __sk_defer_free_flush(struct sock *sk) +{ + struct llist_node *head; + struct sk_buff *skb, *n; + + head = llist_del_all(&sk->defer_list); + llist_for_each_entry_safe(skb, n, head, ll_node) { + prefetch(n); + skb_mark_not_on_list(skb); + __kfree_skb(skb); + } +} +EXPORT_SYMBOL(__sk_defer_free_flush); + +static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb) +{ + __skb_unlink(skb, &sk->sk_receive_queue); + if (likely(skb->destructor == sock_rfree)) { + sock_rfree(skb); + skb->destructor = NULL; + skb->sk = NULL; + if (!skb_queue_empty(&sk->sk_receive_queue) || + !llist_empty(&sk->defer_list)) { + llist_add(&skb->ll_node, &sk->defer_list); + return; + } + } + __kfree_skb(skb); +} + static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) { struct sk_buff *skb; @@ -1599,7 +1629,7 @@ static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) * splitted a fat GRO packet, while we released socket lock * in skb_splice_bits() */ - sk_eat_skb(sk, skb); + tcp_eat_recv_skb(sk, skb); } return NULL; } @@ -1633,7 +1663,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, len = skb->len - offset; /* Stop reading if we hit a patch of urgent data */ - if (tp->urg_data) { + if (unlikely(tp->urg_data)) { u32 urg_offset = tp->urg_seq - seq; if (urg_offset < len) len = urg_offset; @@ -1665,11 +1695,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, continue; } if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { - sk_eat_skb(sk, skb); + tcp_eat_recv_skb(sk, skb); ++seq; break; } - sk_eat_skb(sk, skb); + tcp_eat_recv_skb(sk, skb); if (!desc->count) break; WRITE_ONCE(tp->copied_seq, seq); @@ -2329,7 +2359,7 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, u32 offset; /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ - if (tp->urg_data && tp->urg_seq == *seq) { + if (unlikely(tp->urg_data) && tp->urg_seq == *seq) { if (copied) break; if (signal_pending(current)) { @@ -2372,10 +2402,10 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, break; if (copied) { - if (sk->sk_err || + if (!timeo || + sk->sk_err || sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN) || - !timeo || signal_pending(current)) break; } else { @@ -2409,13 +2439,12 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, } } - tcp_cleanup_rbuf(sk, copied); - if (copied >= target) { /* Do not sleep, just process backlog. */ - release_sock(sk); - lock_sock(sk); + __sk_flush_backlog(sk); } else { + tcp_cleanup_rbuf(sk, copied); + sk_defer_free_flush(sk); sk_wait_data(sk, &timeo, last); } @@ -2435,7 +2464,7 @@ found_ok_skb: used = len; /* Do we have urgent data here? */ - if (tp->urg_data) { + if (unlikely(tp->urg_data)) { u32 urg_offset = tp->urg_seq - *seq; if (urg_offset < used) { if (!urg_offset) { @@ -2469,8 +2498,8 @@ found_ok_skb: tcp_rcv_space_adjust(sk); skip_copy: - if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) { - tp->urg_data = 0; + if (unlikely(tp->urg_data) && after(tp->copied_seq, tp->urg_seq)) { + WRITE_ONCE(tp->urg_data, 0); tcp_fast_path_check(sk); } @@ -2485,14 +2514,14 @@ skip_copy: if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) goto found_fin_ok; if (!(flags & MSG_PEEK)) - sk_eat_skb(sk, skb); + tcp_eat_recv_skb(sk, skb); continue; found_fin_ok: /* Process the FIN. */ WRITE_ONCE(*seq, *seq + 1); if (!(flags & MSG_PEEK)) - sk_eat_skb(sk, skb); + tcp_eat_recv_skb(sk, skb); break; } while (len > 0); @@ -2534,6 +2563,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, ret = tcp_recvmsg_locked(sk, msg, len, nonblock, flags, &tss, &cmsg_flags); release_sock(sk); + sk_defer_free_flush(sk); if (cmsg_flags && ret >= 0) { if (cmsg_flags & TCP_CMSG_TS) @@ -2964,7 +2994,7 @@ int tcp_disconnect(struct sock *sk, int flags) tcp_clear_xmit_timers(sk); __skb_queue_purge(&sk->sk_receive_queue); WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); - tp->urg_data = 0; + WRITE_ONCE(tp->urg_data, 0); tcp_write_queue_purge(sk); tcp_fastopen_active_disable_ofo_check(sk); skb_rbtree_purge(&tp->out_of_order_queue); @@ -3058,7 +3088,7 @@ int tcp_disconnect(struct sock *sk, int flags) sk->sk_frag.page = NULL; sk->sk_frag.offset = 0; } - + sk_defer_free_flush(sk); sk_error_report(sk); return 0; } @@ -3176,7 +3206,7 @@ static void tcp_enable_tx_delay(void) * TCP_CORK can be set together with TCP_NODELAY and it is stronger than * TCP_NODELAY. */ -static void __tcp_sock_set_cork(struct sock *sk, bool on) +void __tcp_sock_set_cork(struct sock *sk, bool on) { struct tcp_sock *tp = tcp_sk(sk); @@ -3204,7 +3234,7 @@ EXPORT_SYMBOL(tcp_sock_set_cork); * However, when TCP_NODELAY is set we make an explicit push, which overrides * even TCP_CORK for currently queued segments. */ -static void __tcp_sock_set_nodelay(struct sock *sk, bool on) +void __tcp_sock_set_nodelay(struct sock *sk, bool on) { if (on) { tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; @@ -3773,10 +3803,12 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) tcp_get_info_chrono_stats(tp, info); info->tcpi_segs_out = tp->segs_out; - info->tcpi_segs_in = tp->segs_in; + + /* segs_in and data_segs_in can be updated from tcp_segs_in() from BH */ + info->tcpi_segs_in = READ_ONCE(tp->segs_in); + info->tcpi_data_segs_in = READ_ONCE(tp->data_segs_in); info->tcpi_min_rtt = tcp_min_rtt(tp); - info->tcpi_data_segs_in = tp->data_segs_in; info->tcpi_data_segs_out = tp->data_segs_out; info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0; @@ -4185,6 +4217,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level, err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname, &zc, &len, err); release_sock(sk); + sk_defer_free_flush(sk); if (len >= offsetofend(struct tcp_zerocopy_receive, msg_flags)) goto zerocopy_rcv_cmsg; switch (len) { diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 0ce46849ec3d..8010583f868b 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5591,7 +5591,7 @@ static void tcp_check_urg(struct sock *sk, const struct tcphdr *th) } } - tp->urg_data = TCP_URG_NOTYET; + WRITE_ONCE(tp->urg_data, TCP_URG_NOTYET); WRITE_ONCE(tp->urg_seq, ptr); /* Disable header prediction. */ @@ -5604,11 +5604,11 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *t struct tcp_sock *tp = tcp_sk(sk); /* Check if we get a new urgent pointer - normally not. */ - if (th->urg) + if (unlikely(th->urg)) tcp_check_urg(sk, th); /* Do we wait for any urgent data? - normally not... */ - if (tp->urg_data == TCP_URG_NOTYET) { + if (unlikely(tp->urg_data == TCP_URG_NOTYET)) { u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - th->syn; @@ -5617,7 +5617,7 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *t u8 tmp; if (skb_copy_bits(skb, ptr, &tmp, 1)) BUG(); - tp->urg_data = TCP_URG_VALID | tmp; + WRITE_ONCE(tp->urg_data, TCP_URG_VALID | tmp); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk); } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 084df223b5df..ac10e4cdd8d0 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1182,7 +1182,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, if (!md5sig) return -ENOMEM; - sk_nocaps_add(sk, NETIF_F_GSO_MASK); + sk_gso_disable(sk); INIT_HLIST_HEAD(&md5sig->head); rcu_assign_pointer(tp->md5sig_info, md5sig); } @@ -1620,7 +1620,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, */ tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index, key->flags, key->key, key->keylen, GFP_ATOMIC); - sk_nocaps_add(newsk, NETIF_F_GSO_MASK); + sk_gso_disable(newsk); } #endif @@ -1803,8 +1803,7 @@ int tcp_v4_early_demux(struct sk_buff *skb) bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) { - u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf); - u32 tail_gso_size, tail_gso_segs; + u32 limit, tail_gso_size, tail_gso_segs; struct skb_shared_info *shinfo; const struct tcphdr *th; struct tcphdr *thtail; @@ -1912,7 +1911,7 @@ no_coalesce: * to reduce memory overhead, so add a little headroom here. * Few sockets backlog are possibly concurrently non empty. */ - limit += 64*1024; + limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf) + 64*1024; if (unlikely(sk_add_backlog(sk, skb, limit))) { bh_unlock_sock(sk); @@ -2106,6 +2105,7 @@ process: sk_incoming_cpu_update(sk); + sk_defer_free_flush(sk); bh_lock_sock_nested(sk); tcp_segs_in(tcp_sk(sk), skb); ret = 0; diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index fc61cd3fea65..30abde86db45 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -8,6 +8,7 @@ #include <linux/indirect_call_wrapper.h> #include <linux/skbuff.h> +#include <net/gro.h> #include <net/tcp.h> #include <net/protocol.h> diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 2e6e5a70168e..5079832af5c1 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1359,7 +1359,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, #ifdef CONFIG_TCP_MD5SIG /* Calculate the MD5 hash, as we have all we need now */ if (md5) { - sk_nocaps_add(sk, NETIF_F_GSO_MASK); + sk_gso_disable(sk); tp->af_specific->calc_md5_hash(opts.hash_location, md5, sk, skb); } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 15c6b450b8db..f376c777e8fc 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -122,7 +122,7 @@ EXPORT_SYMBOL(udp_table); long sysctl_udp_mem[3] __read_mostly; EXPORT_SYMBOL(sysctl_udp_mem); -atomic_long_t udp_memory_allocated; +atomic_long_t udp_memory_allocated ____cacheline_aligned_in_smp; EXPORT_SYMBOL(udp_memory_allocated); #define MAX_UDP_PORTS 65536 @@ -459,7 +459,7 @@ static struct sock *udp4_lookup_run_bpf(struct net *net, struct udp_table *udptable, struct sk_buff *skb, __be32 saddr, __be16 sport, - __be32 daddr, u16 hnum) + __be32 daddr, u16 hnum, const int dif) { struct sock *sk, *reuse_sk; bool no_reuseport; @@ -467,8 +467,8 @@ static struct sock *udp4_lookup_run_bpf(struct net *net, if (udptable != &udp_table) return NULL; /* only UDP is supported */ - no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_UDP, - saddr, sport, daddr, hnum, &sk); + no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_UDP, saddr, sport, + daddr, hnum, dif, &sk); if (no_reuseport || IS_ERR_OR_NULL(sk)) return sk; @@ -504,7 +504,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, /* Lookup redirect from BPF */ if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { sk = udp4_lookup_run_bpf(net, udptable, skb, - saddr, sport, daddr, hnum); + saddr, sport, daddr, hnum, dif); if (sk) { result = sk; goto done; diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 86d32a1e62ac..6d1a4bec2614 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -7,6 +7,7 @@ */ #include <linux/skbuff.h> +#include <net/gro.h> #include <net/udp.h> #include <net/protocol.h> #include <net/inet_common.h> @@ -424,6 +425,33 @@ out: return segs; } +static int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb) +{ + if (unlikely(p->len + skb->len >= 65536)) + return -E2BIG; + + if (NAPI_GRO_CB(p)->last == p) + skb_shinfo(p)->frag_list = skb; + else + NAPI_GRO_CB(p)->last->next = skb; + + skb_pull(skb, skb_gro_offset(skb)); + + NAPI_GRO_CB(p)->last = skb; + NAPI_GRO_CB(p)->count++; + p->data_len += skb->len; + + /* sk owenrship - if any - completely transferred to the aggregated packet */ + skb->destructor = NULL; + p->truesize += skb->truesize; + p->len += skb->len; + + NAPI_GRO_CB(skb)->same_flow = 1; + + return 0; +} + + #define UDP_GRO_CNT_MAX 64 static struct sk_buff *udp_gro_receive_segment(struct list_head *head, struct sk_buff *skb) @@ -600,13 +628,11 @@ struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb) inet_gro_compute_pseudo); skip: NAPI_GRO_CB(skb)->is_ipv6 = 0; - rcu_read_lock(); if (static_branch_unlikely(&udp_encap_needed_key)) sk = udp4_gro_lookup_skb(skb, uh->source, uh->dest); pp = udp_gro_receive(head, skb, uh, sk); - rcu_read_unlock(); return pp; flush: @@ -641,7 +667,6 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff, uh->len = newlen; - rcu_read_lock(); sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb, udp4_lib_lookup_skb, skb, uh->source, uh->dest); if (sk && udp_sk(sk)->gro_complete) { @@ -662,7 +687,6 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff, } else { err = udp_gro_complete_segment(skb); } - rcu_read_unlock(); if (skb->remcsum_offload) skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM; diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 9ebd54752e03..9e83bcb6bc99 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -77,7 +77,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, xdst->u.rt.rt_iif = fl4->flowi4_iif; xdst->u.dst.dev = dev; - dev_hold(dev); + dev_hold_track(dev, &xdst->u.dst.dev_tracker, GFP_ATOMIC); /* Sheit... I remember I did this right. Apparently, * it was magically lost, so this code needs audit */ diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 3445f8017430..3eee17790a82 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -405,13 +405,13 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) if (ndev->cnf.forwarding) dev_disable_lro(dev); /* We refer to the device */ - dev_hold(dev); + dev_hold_track(dev, &ndev->dev_tracker, GFP_KERNEL); if (snmp6_alloc_dev(ndev) < 0) { netdev_dbg(dev, "%s: cannot allocate memory for statistics\n", __func__); neigh_parms_release(&nd_tbl, ndev->nd_parms); - dev_put(dev); + dev_put_track(dev, &ndev->dev_tracker); kfree(ndev); return ERR_PTR(err); } diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c index 1d4054bb345b..881d1477d24a 100644 --- a/net/ipv6/addrconf_core.c +++ b/net/ipv6/addrconf_core.c @@ -263,7 +263,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev) #ifdef NET_REFCNT_DEBUG pr_debug("%s: %s\n", __func__, dev ? dev->name : "NIL"); #endif - dev_put(dev); + dev_put_track(dev, &idev->dev_tracker); if (!idev->dead) { pr_warn("Freeing alive inet6 device %p\n", idev); return; diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index dab4a047590b..d1636425654e 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -337,11 +337,8 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, chk_addr_ret = inet_addr_type_dev_table(net, dev, v4addr); rcu_read_unlock(); - if (!inet_can_nonlocal_bind(net, inet) && - v4addr != htonl(INADDR_ANY) && - chk_addr_ret != RTN_LOCAL && - chk_addr_ret != RTN_MULTICAST && - chk_addr_ret != RTN_BROADCAST) { + if (!inet_addr_valid_or_nonlocal(net, inet, v4addr, + chk_addr_ret)) { err = -EADDRNOTAVAIL; goto out; } diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 828e62514260..b5995c1f4d7a 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -175,7 +175,6 @@ static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *des * See 11.3.2 of RFC 3775 for details. */ if (opt[off] == IPV6_TLV_HAO) { - struct in6_addr final_addr; struct ipv6_destopt_hao *hao; hao = (struct ipv6_destopt_hao *)&opt[off]; @@ -184,9 +183,7 @@ static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *des hao->length); goto bad; } - final_addr = hao->addr; - hao->addr = iph->saddr; - iph->saddr = final_addr; + swap(hao->addr, iph->saddr); } break; } diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index a349d4798077..ba5e81cd569c 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -16,6 +16,7 @@ #include <crypto/authenc.h> #include <linux/err.h> #include <linux/module.h> +#include <net/gro.h> #include <net/ip.h> #include <net/xfrm.h> #include <net/esp.h> diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 38ece3b7b839..77e34aec7e82 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -686,7 +686,6 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb) struct net *net = dev_net(skb->dev); int accept_source_route = net->ipv6.devconf_all->accept_source_route; - idev = __in6_dev_get(skb->dev); if (idev && accept_source_route > idev->cnf.accept_source_route) accept_source_route = idev->cnf.accept_source_route; diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index dcedfe29d9d9..ec029c86ae06 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -340,10 +340,6 @@ INDIRECT_CALLABLE_SCOPE int fib6_rule_match(struct fib_rule *rule, return 1; } -static const struct nla_policy fib6_rule_policy[FRA_MAX+1] = { - FRA_GENERIC_POLICY, -}; - static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb, struct fib_rule_hdr *frh, struct nlattr **tb, @@ -459,7 +455,6 @@ static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = { .fill = fib6_rule_fill, .nlmsg_payload = fib6_rule_nlmsg_payload, .nlgroup = RTNLGRP_IPV6_RULE, - .policy = fib6_rule_policy, .owner = THIS_MODULE, .fro_net = &init_net, }; diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 67c9114835c8..4514444e96c8 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -165,7 +165,7 @@ static inline struct sock *inet6_lookup_run_bpf(struct net *net, const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, - const u16 hnum) + const u16 hnum, const int dif) { struct sock *sk, *reuse_sk; bool no_reuseport; @@ -173,8 +173,8 @@ static inline struct sock *inet6_lookup_run_bpf(struct net *net, if (hashinfo != &tcp_hashinfo) return NULL; /* only TCP is supported */ - no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_TCP, - saddr, sport, daddr, hnum, &sk); + no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_TCP, saddr, sport, + daddr, hnum, dif, &sk); if (no_reuseport || IS_ERR_OR_NULL(sk)) return sk; @@ -198,7 +198,7 @@ struct sock *inet6_lookup_listener(struct net *net, /* Lookup redirect from BPF */ if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { result = inet6_lookup_run_bpf(net, hashinfo, skb, doff, - saddr, sport, daddr, hnum); + saddr, sport, daddr, hnum, dif); if (result) goto done; } diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index d831d2439693..110839a88bc2 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -403,7 +403,7 @@ static void ip6erspan_tunnel_uninit(struct net_device *dev) ip6erspan_tunnel_unlink_md(ign, t); ip6gre_tunnel_unlink(ign, t); dst_cache_reset(&t->dst_cache); - dev_put(dev); + dev_put_track(dev, &t->dev_tracker); } static void ip6gre_tunnel_uninit(struct net_device *dev) @@ -416,7 +416,7 @@ static void ip6gre_tunnel_uninit(struct net_device *dev) if (ign->fb_tunnel_dev == dev) WRITE_ONCE(ign->fb_tunnel_dev, NULL); dst_cache_reset(&t->dst_cache); - dev_put(dev); + dev_put_track(dev, &t->dev_tracker); } @@ -1496,7 +1496,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev) } ip6gre_tnl_init_features(dev); - dev_hold(dev); + dev_hold_track(dev, &tunnel->dev_tracker, GFP_KERNEL); return 0; cleanup_dst_cache_init: @@ -1888,7 +1888,7 @@ static int ip6erspan_tap_init(struct net_device *dev) dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; ip6erspan_tnl_link_config(tunnel, 1); - dev_hold(dev); + dev_hold_track(dev, &tunnel->dev_tracker, GFP_KERNEL); return 0; cleanup_dst_cache_init: diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 1cbd49d5788d..b29e9ba5e113 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -208,7 +208,6 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head, flush += ntohs(iph->payload_len) != skb_gro_len(skb); - rcu_read_lock(); proto = iph->nexthdr; ops = rcu_dereference(inet6_offloads[proto]); if (!ops || !ops->callbacks.gro_receive) { @@ -221,7 +220,7 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head, ops = rcu_dereference(inet6_offloads[proto]); if (!ops || !ops->callbacks.gro_receive) - goto out_unlock; + goto out; iph = ipv6_hdr(skb); } @@ -279,9 +278,6 @@ not_same_flow: pp = indirect_call_gro_receive_l4(tcp6_gro_receive, udp6_gro_receive, ops->callbacks.gro_receive, head, skb); -out_unlock: - rcu_read_unlock(); - out: skb_gro_flush_final(skb, pp, flush); @@ -331,18 +327,14 @@ INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff) iph->payload_len = htons(skb->len - nhoff - sizeof(*iph)); - rcu_read_lock(); - nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops); if (WARN_ON(!ops || !ops->callbacks.gro_complete)) - goto out_unlock; + goto out; err = INDIRECT_CALL_L4(ops->callbacks.gro_complete, tcp6_gro_complete, udp6_gro_complete, skb, nhoff); -out_unlock: - rcu_read_unlock(); - +out: return err; } diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index ff4e83e2a506..2995f8d89e7e 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -977,7 +977,7 @@ slow_path: fail_toobig: if (skb->sk && dst_allfrag(skb_dst(skb))) - sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK); + sk_gso_disable(skb->sk); icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); err = -EMSGSIZE; diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 484aca492cc0..fe786df4f849 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -383,7 +383,7 @@ ip6_tnl_dev_uninit(struct net_device *dev) else ip6_tnl_unlink(ip6n, t); dst_cache_reset(&t->dst_cache); - dev_put(dev); + dev_put_track(dev, &t->dev_tracker); } /** @@ -1883,7 +1883,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev) dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len; - dev_hold(dev); + dev_hold_track(dev, &t->dev_tracker, GFP_KERNEL); return 0; destroy_dst: diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 527e9ead7449..ed9b6d6ca65e 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -293,7 +293,7 @@ static void vti6_dev_uninit(struct net_device *dev) RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL); else vti6_tnl_unlink(ip6n, t); - dev_put(dev); + dev_put_track(dev, &t->dev_tracker); } static int vti6_input_proto(struct sk_buff *skb, int nexthdr, __be32 spi, @@ -934,7 +934,7 @@ static inline int vti6_dev_init_gen(struct net_device *dev) dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; - dev_hold(dev); + dev_hold_track(dev, &t->dev_tracker, GFP_KERNEL); return 0; } diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 36ed9efb8825..7cf73e60e619 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -182,10 +182,6 @@ static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags) return 1; } -static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = { - FRA_GENERIC_POLICY, -}; - static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb, struct fib_rule_hdr *frh, struct nlattr **tb, struct netlink_ext_ack *extack) @@ -218,7 +214,6 @@ static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = { .compare = ip6mr_rule_compare, .fill = ip6mr_rule_fill, .nlgroup = RTNLGRP_IPV6_RULE, - .policy = ip6mr_rule_policy, .owner = THIS_MODULE, }; @@ -746,7 +741,7 @@ static int mif6_delete(struct mr_table *mrt, int vifi, int notify, if ((v->flags & MIFF_REGISTER) && !notify) unregister_netdevice_queue(dev, head); - dev_put(dev); + dev_put_track(dev, &v->dev_tracker); return 0; } @@ -919,6 +914,7 @@ static int mif6_add(struct net *net, struct mr_table *mrt, /* And finish update writing critical data */ write_lock_bh(&mrt_lock); v->dev = dev; + netdev_tracker_alloc(dev, &v->dev_tracker, GFP_ATOMIC); #ifdef CONFIG_IPV6_PIMSM_V2 if (v->flags & MIFF_REGISTER) mrt->mroute_reg_vif_num = vifi; diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 41efca817db4..a733803a710c 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -471,10 +471,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, if (sk->sk_protocol == IPPROTO_TCP) { struct inet_connection_sock *icsk = inet_csk(sk); - local_bh_disable(); + sock_prot_inuse_add(net, sk->sk_prot, -1); sock_prot_inuse_add(net, &tcp_prot, 1); - local_bh_enable(); + sk->sk_prot = &tcp_prot; icsk->icsk_af_ops = &ipv4_specific; sk->sk_socket->ops = &inet_stream_ops; @@ -485,10 +485,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, if (sk->sk_protocol == IPPROTO_UDPLITE) prot = &udplite_prot; - local_bh_disable(); + sock_prot_inuse_add(net, sk->sk_prot, -1); sock_prot_inuse_add(net, prot, 1); - local_bh_enable(); + sk->sk_prot = prot; sk->sk_socket->ops = &inet_dgram_ops; sk->sk_family = PF_INET; @@ -599,7 +599,14 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, /* RFC 3542, 6.5: default traffic class of 0x0 */ if (val == -1) val = 0; - np->tclass = val; + if (sk->sk_type == SOCK_STREAM) { + val &= ~INET_ECN_MASK; + val |= np->tclass & INET_ECN_MASK; + } + if (np->tclass != val) { + np->tclass = val; + sk_dst_reset(sk); + } retv = 0; break; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 42d60c76d30a..03be0e6b4826 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -182,8 +182,9 @@ static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev) if (rt_dev == dev) { rt->dst.dev = blackhole_netdev; - dev_hold(rt->dst.dev); - dev_put(rt_dev); + dev_replace_track(rt_dev, blackhole_netdev, + &rt->dst.dev_tracker, + GFP_ATOMIC); } } spin_unlock_bh(&ul->lock); @@ -328,9 +329,7 @@ static const struct rt6_info ip6_blk_hole_entry_template = { static void rt6_info_init(struct rt6_info *rt) { - struct dst_entry *dst = &rt->dst; - - memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); + memset_after(rt, 0, dst); INIT_LIST_HEAD(&rt->rt6i_uncached); } @@ -594,6 +593,7 @@ struct __rt6_probe_work { struct work_struct work; struct in6_addr target; struct net_device *dev; + netdevice_tracker dev_tracker; }; static void rt6_probe_deferred(struct work_struct *w) @@ -604,7 +604,7 @@ static void rt6_probe_deferred(struct work_struct *w) addrconf_addr_solict_mult(&work->target, &mcaddr); ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0); - dev_put(work->dev); + dev_put_track(work->dev, &work->dev_tracker); kfree(work); } @@ -658,7 +658,7 @@ static void rt6_probe(struct fib6_nh *fib6_nh) } else { INIT_WORK(&work->work, rt6_probe_deferred); work->target = *nh_gw; - dev_hold(dev); + dev_hold_track(dev, &work->dev_tracker, GFP_ATOMIC); work->dev = dev; schedule_work(&work->work); } @@ -1485,7 +1485,7 @@ static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket) static u32 rt6_exception_hash(const struct in6_addr *dst, const struct in6_addr *src) { - static siphash_key_t rt6_exception_key __read_mostly; + static siphash_aligned_key_t rt6_exception_key; struct { struct in6_addr dst; struct in6_addr src; @@ -3628,6 +3628,8 @@ pcpu_alloc: } fib6_nh->fib_nh_dev = dev; + netdev_tracker_alloc(dev, &fib6_nh->fib_nh_dev_tracker, gfp_flags); + fib6_nh->fib_nh_oif = dev->ifindex; err = 0; out: @@ -3658,24 +3660,8 @@ void fib6_nh_release(struct fib6_nh *fib6_nh) rcu_read_unlock(); - if (fib6_nh->rt6i_pcpu) { - int cpu; - - for_each_possible_cpu(cpu) { - struct rt6_info **ppcpu_rt; - struct rt6_info *pcpu_rt; - - ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu); - pcpu_rt = *ppcpu_rt; - if (pcpu_rt) { - dst_dev_put(&pcpu_rt->dst); - dst_release(&pcpu_rt->dst); - *ppcpu_rt = NULL; - } - } - - free_percpu(fib6_nh->rt6i_pcpu); - } + fib6_nh_release_dsts(fib6_nh); + free_percpu(fib6_nh->rt6i_pcpu); fib_nh_common_release(&fib6_nh->nh_common); } diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 8a3618a30632..a618dce7e0bc 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -521,7 +521,7 @@ static void ipip6_tunnel_uninit(struct net_device *dev) ipip6_tunnel_del_prl(tunnel, NULL); } dst_cache_reset(&tunnel->dst_cache); - dev_put(dev); + dev_put_track(dev, &tunnel->dev_tracker); } static int ipip6_err(struct sk_buff *skb, u32 info) @@ -1463,7 +1463,7 @@ static int ipip6_tunnel_init(struct net_device *dev) dev->tstats = NULL; return err; } - dev_hold(dev); + dev_hold_track(dev, &tunnel->dev_tracker, GFP_KERNEL); return 0; } diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index e8cfb9e997bf..d1b61d00368e 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -20,7 +20,7 @@ #define COOKIEBITS 24 /* Upper bits store count */ #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) -static siphash_key_t syncookie6_secret[2] __read_mostly; +static siphash_aligned_key_t syncookie6_secret[2]; /* RFC 2460, Section 8.3: * [ipv6 tcp] MSS must be computed as the maximum packet size minus 60 [..] diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 680e6481b967..1ac243d18c2b 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -72,7 +72,7 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb); static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, struct request_sock *req); -static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); +INDIRECT_CALLABLE_SCOPE int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); static const struct inet_connection_sock_af_ops ipv6_mapped; const struct inet_connection_sock_af_ops ipv6_specific; @@ -1466,7 +1466,8 @@ INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *, * This is because we cannot sleep with the original spinlock * held. */ -static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) +INDIRECT_CALLABLE_SCOPE +int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) { struct ipv6_pinfo *np = tcp_inet6_sk(sk); struct sk_buff *opt_skb = NULL; @@ -1760,6 +1761,7 @@ process: sk_incoming_cpu_update(sk); + sk_defer_free_flush(sk); bh_lock_sock_nested(sk); tcp_segs_in(tcp_sk(sk), skb); ret = 0; @@ -1896,9 +1898,7 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = { INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb) { - struct ipv6_pinfo *np = inet6_sk(sk); - - __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr); + __tcp_v6_send_check(skb, &sk->sk_v6_rcv_saddr, &sk->sk_v6_daddr); } const struct inet_connection_sock_af_ops ipv6_specific = { diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c index 1796856bc24f..39db5a226855 100644 --- a/net/ipv6/tcpv6_offload.c +++ b/net/ipv6/tcpv6_offload.c @@ -7,6 +7,7 @@ */ #include <linux/indirect_call_wrapper.h> #include <linux/skbuff.h> +#include <net/gro.h> #include <net/protocol.h> #include <net/tcp.h> #include <net/ip6_checksum.h> diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index a2caca6ccf11..01e53eb4875a 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -195,7 +195,7 @@ static inline struct sock *udp6_lookup_run_bpf(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, - u16 hnum) + u16 hnum, const int dif) { struct sock *sk, *reuse_sk; bool no_reuseport; @@ -203,8 +203,8 @@ static inline struct sock *udp6_lookup_run_bpf(struct net *net, if (udptable != &udp_table) return NULL; /* only UDP is supported */ - no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP, - saddr, sport, daddr, hnum, &sk); + no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP, saddr, sport, + daddr, hnum, dif, &sk); if (no_reuseport || IS_ERR_OR_NULL(sk)) return sk; @@ -240,7 +240,7 @@ struct sock *__udp6_lib_lookup(struct net *net, /* Lookup redirect from BPF */ if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { sk = udp6_lookup_run_bpf(net, udptable, skb, - saddr, sport, daddr, hnum); + saddr, sport, daddr, hnum, dif); if (sk) { result = sk; goto done; diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index b3d9ed96e5ea..7720d04ed396 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -13,6 +13,7 @@ #include <net/udp.h> #include <net/ip6_checksum.h> #include "ip6_offload.h" +#include <net/gro.h> static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, netdev_features_t features) @@ -144,13 +145,11 @@ struct sk_buff *udp6_gro_receive(struct list_head *head, struct sk_buff *skb) skip: NAPI_GRO_CB(skb)->is_ipv6 = 1; - rcu_read_lock(); if (static_branch_unlikely(&udpv6_encap_needed_key)) sk = udp6_gro_lookup_skb(skb, uh->source, uh->dest); pp = udp_gro_receive(head, skb, uh, sk); - rcu_read_unlock(); return pp; flush: diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index af7a4b8b1e9c..fad687ee6dd8 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -74,11 +74,11 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, struct rt6_info *rt = (struct rt6_info *)xdst->route; xdst->u.dst.dev = dev; - dev_hold(dev); + dev_hold_track(dev, &xdst->u.dst.dev_tracker, GFP_ATOMIC); xdst->u.rt6.rt6i_idev = in6_dev_get(dev); if (!xdst->u.rt6.rt6i_idev) { - dev_put(dev); + dev_put_track(dev, &xdst->u.dst.dev_tracker); return -ENODEV; } diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 18316ee3c692..49ecbe8d176a 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -142,7 +142,7 @@ static inline size_t iucv_msg_length(struct iucv_message *msg) * iucv_sock_in_state() - check for specific states * @sk: sock structure * @state: first iucv sk state - * @state: second iucv sk state + * @state2: second iucv sk state * * Returns true if the socket in either in the first or second state. */ @@ -172,7 +172,7 @@ static inline int iucv_below_msglim(struct sock *sk) (atomic_read(&iucv->pendings) <= 0)); } -/** +/* * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit */ static void iucv_sock_wake_msglim(struct sock *sk) @@ -187,7 +187,7 @@ static void iucv_sock_wake_msglim(struct sock *sk) rcu_read_unlock(); } -/** +/* * afiucv_hs_send() - send a message through HiperSockets transport */ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, @@ -473,7 +473,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, atomic_set(&iucv->msg_recv, 0); iucv->path = NULL; iucv->sk_txnotify = afiucv_hs_callback_txnotify; - memset(&iucv->src_user_id , 0, 32); + memset(&iucv->init, 0, sizeof(iucv->init)); if (pr_iucv) iucv->transport = AF_IUCV_TRANS_IUCV; else @@ -1831,9 +1831,9 @@ static void afiucv_swap_src_dest(struct sk_buff *skb) memset(skb->data, 0, ETH_HLEN); } -/** +/* * afiucv_hs_callback_syn - react on received SYN - **/ + */ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb) { struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb); @@ -1896,9 +1896,9 @@ out: return NET_RX_SUCCESS; } -/** +/* * afiucv_hs_callback_synack() - react on received SYN-ACK - **/ + */ static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb) { struct iucv_sock *iucv = iucv_sk(sk); @@ -1917,9 +1917,9 @@ static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb) return NET_RX_SUCCESS; } -/** +/* * afiucv_hs_callback_synfin() - react on received SYN_FIN - **/ + */ static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb) { struct iucv_sock *iucv = iucv_sk(sk); @@ -1937,9 +1937,9 @@ static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb) return NET_RX_SUCCESS; } -/** +/* * afiucv_hs_callback_fin() - react on received FIN - **/ + */ static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb) { struct iucv_sock *iucv = iucv_sk(sk); @@ -1960,9 +1960,9 @@ static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb) return NET_RX_SUCCESS; } -/** +/* * afiucv_hs_callback_win() - react on received WIN - **/ + */ static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb) { struct iucv_sock *iucv = iucv_sk(sk); @@ -1978,9 +1978,9 @@ static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb) return NET_RX_SUCCESS; } -/** +/* * afiucv_hs_callback_rx() - react on received data - **/ + */ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) { struct iucv_sock *iucv = iucv_sk(sk); @@ -2022,11 +2022,11 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) return NET_RX_SUCCESS; } -/** +/* * afiucv_hs_rcv() - base function for arriving data through HiperSockets * transport * called from netif RX softirq - **/ + */ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { @@ -2128,10 +2128,10 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, return err; } -/** +/* * afiucv_hs_callback_txnotify() - handle send notifications from HiperSockets * transport - **/ + */ static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify n) { struct iucv_sock *iucv = iucv_sk(sk); diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index f3343a8541a5..8f4d49a7d3e8 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -276,8 +276,8 @@ static union iucv_param *iucv_param[NR_CPUS]; static union iucv_param *iucv_param_irq[NR_CPUS]; /** - * iucv_call_b2f0 - * @code: identifier of IUCV call to CP. + * __iucv_call_b2f0 + * @command: identifier of IUCV call to CP. * @parm: pointer to a struct iucv_parm block * * Calls CP to execute IUCV commands. @@ -309,7 +309,7 @@ static inline int iucv_call_b2f0(int command, union iucv_param *parm) return ccode == 1 ? parm->ctrl.iprcode : ccode; } -/** +/* * iucv_query_maxconn * * Determines the maximum number of connections that may be established. @@ -493,8 +493,8 @@ static void iucv_retrieve_cpu(void *data) cpumask_clear_cpu(cpu, &iucv_buffer_cpumask); } -/** - * iucv_setmask_smp +/* + * iucv_setmask_mp * * Allow iucv interrupts on all cpus. */ @@ -512,7 +512,7 @@ static void iucv_setmask_mp(void) cpus_read_unlock(); } -/** +/* * iucv_setmask_up * * Allow iucv interrupts on a single cpu. @@ -529,7 +529,7 @@ static void iucv_setmask_up(void) smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); } -/** +/* * iucv_enable * * This function makes iucv ready for use. It allocates the pathid @@ -564,7 +564,7 @@ out: return rc; } -/** +/* * iucv_disable * * This function shuts down iucv. It disables iucv interrupts, retrieves @@ -1347,8 +1347,9 @@ EXPORT_SYMBOL(iucv_message_send); * @srccls: source class of message * @buffer: address of send buffer or address of struct iucv_array * @size: length of send buffer - * @ansbuf: address of answer buffer or address of struct iucv_array + * @answer: address of answer buffer or address of struct iucv_array * @asize: size of reply buffer + * @residual: ignored * * This function transmits data to another application. Data to be * transmitted is in a buffer. The receiver of the send is expected to @@ -1400,13 +1401,6 @@ out: } EXPORT_SYMBOL(iucv_message_send2way); -/** - * iucv_path_pending - * @data: Pointer to external interrupt buffer - * - * Process connection pending work item. Called from tasklet while holding - * iucv_table_lock. - */ struct iucv_path_pending { u16 ippathid; u8 ipflags1; @@ -1420,6 +1414,13 @@ struct iucv_path_pending { u8 res4[3]; } __packed; +/** + * iucv_path_pending + * @data: Pointer to external interrupt buffer + * + * Process connection pending work item. Called from tasklet while holding + * iucv_table_lock. + */ static void iucv_path_pending(struct iucv_irq_data *data) { struct iucv_path_pending *ipp = (void *) data; @@ -1461,13 +1462,6 @@ out_sever: iucv_sever_pathid(ipp->ippathid, error); } -/** - * iucv_path_complete - * @data: Pointer to external interrupt buffer - * - * Process connection complete work item. Called from tasklet while holding - * iucv_table_lock. - */ struct iucv_path_complete { u16 ippathid; u8 ipflags1; @@ -1481,6 +1475,13 @@ struct iucv_path_complete { u8 res4[3]; } __packed; +/** + * iucv_path_complete + * @data: Pointer to external interrupt buffer + * + * Process connection complete work item. Called from tasklet while holding + * iucv_table_lock. + */ static void iucv_path_complete(struct iucv_irq_data *data) { struct iucv_path_complete *ipc = (void *) data; @@ -1492,13 +1493,6 @@ static void iucv_path_complete(struct iucv_irq_data *data) path->handler->path_complete(path, ipc->ipuser); } -/** - * iucv_path_severed - * @data: Pointer to external interrupt buffer - * - * Process connection severed work item. Called from tasklet while holding - * iucv_table_lock. - */ struct iucv_path_severed { u16 ippathid; u8 res1; @@ -1511,6 +1505,13 @@ struct iucv_path_severed { u8 res5[3]; } __packed; +/** + * iucv_path_severed + * @data: Pointer to external interrupt buffer + * + * Process connection severed work item. Called from tasklet while holding + * iucv_table_lock. + */ static void iucv_path_severed(struct iucv_irq_data *data) { struct iucv_path_severed *ips = (void *) data; @@ -1528,13 +1529,6 @@ static void iucv_path_severed(struct iucv_irq_data *data) } } -/** - * iucv_path_quiesced - * @data: Pointer to external interrupt buffer - * - * Process connection quiesced work item. Called from tasklet while holding - * iucv_table_lock. - */ struct iucv_path_quiesced { u16 ippathid; u8 res1; @@ -1547,6 +1541,13 @@ struct iucv_path_quiesced { u8 res5[3]; } __packed; +/** + * iucv_path_quiesced + * @data: Pointer to external interrupt buffer + * + * Process connection quiesced work item. Called from tasklet while holding + * iucv_table_lock. + */ static void iucv_path_quiesced(struct iucv_irq_data *data) { struct iucv_path_quiesced *ipq = (void *) data; @@ -1556,13 +1557,6 @@ static void iucv_path_quiesced(struct iucv_irq_data *data) path->handler->path_quiesced(path, ipq->ipuser); } -/** - * iucv_path_resumed - * @data: Pointer to external interrupt buffer - * - * Process connection resumed work item. Called from tasklet while holding - * iucv_table_lock. - */ struct iucv_path_resumed { u16 ippathid; u8 res1; @@ -1575,6 +1569,13 @@ struct iucv_path_resumed { u8 res5[3]; } __packed; +/** + * iucv_path_resumed + * @data: Pointer to external interrupt buffer + * + * Process connection resumed work item. Called from tasklet while holding + * iucv_table_lock. + */ static void iucv_path_resumed(struct iucv_irq_data *data) { struct iucv_path_resumed *ipr = (void *) data; @@ -1584,13 +1585,6 @@ static void iucv_path_resumed(struct iucv_irq_data *data) path->handler->path_resumed(path, ipr->ipuser); } -/** - * iucv_message_complete - * @data: Pointer to external interrupt buffer - * - * Process message complete work item. Called from tasklet while holding - * iucv_table_lock. - */ struct iucv_message_complete { u16 ippathid; u8 ipflags1; @@ -1606,6 +1600,13 @@ struct iucv_message_complete { u8 res2[3]; } __packed; +/** + * iucv_message_complete + * @data: Pointer to external interrupt buffer + * + * Process message complete work item. Called from tasklet while holding + * iucv_table_lock. + */ static void iucv_message_complete(struct iucv_irq_data *data) { struct iucv_message_complete *imc = (void *) data; @@ -1624,13 +1625,6 @@ static void iucv_message_complete(struct iucv_irq_data *data) } } -/** - * iucv_message_pending - * @data: Pointer to external interrupt buffer - * - * Process message pending work item. Called from tasklet while holding - * iucv_table_lock. - */ struct iucv_message_pending { u16 ippathid; u8 ipflags1; @@ -1653,6 +1647,13 @@ struct iucv_message_pending { u8 res2[3]; } __packed; +/** + * iucv_message_pending + * @data: Pointer to external interrupt buffer + * + * Process message pending work item. Called from tasklet while holding + * iucv_table_lock. + */ static void iucv_message_pending(struct iucv_irq_data *data) { struct iucv_message_pending *imp = (void *) data; @@ -1673,7 +1674,7 @@ static void iucv_message_pending(struct iucv_irq_data *data) } } -/** +/* * iucv_tasklet_fn: * * This tasklet loops over the queue of irq buffers created by @@ -1717,7 +1718,7 @@ static void iucv_tasklet_fn(unsigned long ignored) spin_unlock(&iucv_table_lock); } -/** +/* * iucv_work_fn: * * This work function loops over the queue of path pending irq blocks @@ -1748,9 +1749,8 @@ static void iucv_work_fn(struct work_struct *work) spin_unlock_bh(&iucv_table_lock); } -/** +/* * iucv_external_interrupt - * @code: irq code * * Handles external interrupts coming in from CP. * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn(). diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 93271a2632b8..7499c51b1850 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -250,15 +250,15 @@ struct l2tp_session *l2tp_tunnel_get_session(struct l2tp_tunnel *tunnel, session_list = l2tp_session_id_hash(tunnel, session_id); - read_lock_bh(&tunnel->hlist_lock); - hlist_for_each_entry(session, session_list, hlist) + rcu_read_lock_bh(); + hlist_for_each_entry_rcu(session, session_list, hlist) if (session->session_id == session_id) { l2tp_session_inc_refcount(session); - read_unlock_bh(&tunnel->hlist_lock); + rcu_read_unlock_bh(); return session; } - read_unlock_bh(&tunnel->hlist_lock); + rcu_read_unlock_bh(); return NULL; } @@ -291,18 +291,18 @@ struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth) struct l2tp_session *session; int count = 0; - read_lock_bh(&tunnel->hlist_lock); + rcu_read_lock_bh(); for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { - hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) { + hlist_for_each_entry_rcu(session, &tunnel->session_hlist[hash], hlist) { if (++count > nth) { l2tp_session_inc_refcount(session); - read_unlock_bh(&tunnel->hlist_lock); + rcu_read_unlock_bh(); return session; } } } - read_unlock_bh(&tunnel->hlist_lock); + rcu_read_unlock_bh(); return NULL; } @@ -347,7 +347,7 @@ int l2tp_session_register(struct l2tp_session *session, head = l2tp_session_id_hash(tunnel, session->session_id); - write_lock_bh(&tunnel->hlist_lock); + spin_lock_bh(&tunnel->hlist_lock); if (!tunnel->acpt_newsess) { err = -ENODEV; goto err_tlock; @@ -384,8 +384,8 @@ int l2tp_session_register(struct l2tp_session *session, l2tp_tunnel_inc_refcount(tunnel); } - hlist_add_head(&session->hlist, head); - write_unlock_bh(&tunnel->hlist_lock); + hlist_add_head_rcu(&session->hlist, head); + spin_unlock_bh(&tunnel->hlist_lock); trace_register_session(session); @@ -394,7 +394,7 @@ int l2tp_session_register(struct l2tp_session *session, err_tlock_pnlock: spin_unlock_bh(&pn->l2tp_session_hlist_lock); err_tlock: - write_unlock_bh(&tunnel->hlist_lock); + spin_unlock_bh(&tunnel->hlist_lock); return err; } @@ -1170,9 +1170,9 @@ static void l2tp_session_unhash(struct l2tp_session *session) /* Remove the session from core hashes */ if (tunnel) { /* Remove from the per-tunnel hash */ - write_lock_bh(&tunnel->hlist_lock); - hlist_del_init(&session->hlist); - write_unlock_bh(&tunnel->hlist_lock); + spin_lock_bh(&tunnel->hlist_lock); + hlist_del_init_rcu(&session->hlist); + spin_unlock_bh(&tunnel->hlist_lock); /* For L2TPv3 we have a per-net hash: remove from there, too */ if (tunnel->version != L2TP_HDR_VER_2) { @@ -1181,8 +1181,9 @@ static void l2tp_session_unhash(struct l2tp_session *session) spin_lock_bh(&pn->l2tp_session_hlist_lock); hlist_del_init_rcu(&session->global_hlist); spin_unlock_bh(&pn->l2tp_session_hlist_lock); - synchronize_rcu(); } + + synchronize_rcu(); } } @@ -1190,22 +1191,19 @@ static void l2tp_session_unhash(struct l2tp_session *session) */ static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) { - int hash; - struct hlist_node *walk; - struct hlist_node *tmp; struct l2tp_session *session; + int hash; - write_lock_bh(&tunnel->hlist_lock); + spin_lock_bh(&tunnel->hlist_lock); tunnel->acpt_newsess = false; for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { again: - hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { - session = hlist_entry(walk, struct l2tp_session, hlist); - hlist_del_init(&session->hlist); + hlist_for_each_entry_rcu(session, &tunnel->session_hlist[hash], hlist) { + hlist_del_init_rcu(&session->hlist); - write_unlock_bh(&tunnel->hlist_lock); + spin_unlock_bh(&tunnel->hlist_lock); l2tp_session_delete(session); - write_lock_bh(&tunnel->hlist_lock); + spin_lock_bh(&tunnel->hlist_lock); /* Now restart from the beginning of this hash * chain. We always remove a session from the @@ -1215,7 +1213,7 @@ again: goto again; } } - write_unlock_bh(&tunnel->hlist_lock); + spin_unlock_bh(&tunnel->hlist_lock); } /* Tunnel socket destroy hook for UDP encapsulation */ @@ -1408,7 +1406,7 @@ int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, tunnel->magic = L2TP_TUNNEL_MAGIC; sprintf(&tunnel->name[0], "tunl %u", tunnel_id); - rwlock_init(&tunnel->hlist_lock); + spin_lock_init(&tunnel->hlist_lock); tunnel->acpt_newsess = true; tunnel->encap = encap; diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 98ea98eb9567..a88e070b431d 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -160,7 +160,7 @@ struct l2tp_tunnel { unsigned long dead; struct rcu_head rcu; - rwlock_t hlist_lock; /* protect session_hlist */ + spinlock_t hlist_lock; /* write-protection for session_hlist */ bool acpt_newsess; /* indicates whether this tunnel accepts * new sessions. Protected by hlist_lock. */ diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index bca75bef8282..9d1aafe75f92 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -32,7 +32,8 @@ static struct dentry *rootdir; struct l2tp_dfs_seq_data { - struct net *net; + struct net *net; + netns_tracker ns_tracker; int tunnel_idx; /* current tunnel */ int session_idx; /* index of session within current tunnel */ struct l2tp_tunnel *tunnel; @@ -120,24 +121,21 @@ static void l2tp_dfs_seq_stop(struct seq_file *p, void *v) static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v) { struct l2tp_tunnel *tunnel = v; + struct l2tp_session *session; int session_count = 0; int hash; - struct hlist_node *walk; - struct hlist_node *tmp; - read_lock_bh(&tunnel->hlist_lock); + rcu_read_lock_bh(); for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { - hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { - struct l2tp_session *session; - - session = hlist_entry(walk, struct l2tp_session, hlist); + hlist_for_each_entry_rcu(session, &tunnel->session_hlist[hash], hlist) { + /* Session ID of zero is a dummy/reserved value used by pppol2tp */ if (session->session_id == 0) continue; session_count++; } } - read_unlock_bh(&tunnel->hlist_lock); + rcu_read_unlock_bh(); seq_printf(m, "\nTUNNEL %u peer %u", tunnel->tunnel_id, tunnel->peer_tunnel_id); if (tunnel->sock) { @@ -284,7 +282,7 @@ static int l2tp_dfs_seq_open(struct inode *inode, struct file *file) rc = PTR_ERR(pd->net); goto err_free_pd; } - + netns_tracker_alloc(pd->net, &pd->ns_tracker, GFP_KERNEL); rc = seq_open(file, &l2tp_dfs_seq_ops); if (rc) goto err_free_net; @@ -296,7 +294,7 @@ out: return rc; err_free_net: - put_net(pd->net); + put_net_track(pd->net, &pd->ns_tracker); err_free_pd: kfree(pd); goto out; @@ -310,7 +308,7 @@ static int l2tp_dfs_seq_release(struct inode *inode, struct file *file) seq = file->private_data; pd = seq->private; if (pd->net) - put_net(pd->net); + put_net_track(pd->net, &pd->ns_tracker); kfree(pd); seq_release(inode, file); diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 3086f4a6ae68..26c00ebf4fba 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -224,7 +224,7 @@ static int llc_ui_release(struct socket *sock) } else { release_sock(sk); } - dev_put(llc->dev); + dev_put_track(llc->dev, &llc->dev_tracker); sock_put(sk); llc_sk_free(sk); out: @@ -295,6 +295,7 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr) llc->dev = dev_getfirstbyhwtype(&init_net, addr->sllc_arphrd); if (!llc->dev) goto out; + netdev_tracker_alloc(llc->dev, &llc->dev_tracker, GFP_KERNEL); rc = -EUSERS; llc->laddr.lsap = llc_ui_autoport(); if (!llc->laddr.lsap) @@ -362,7 +363,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) } else llc->dev = dev_getbyhwaddr_rcu(&init_net, addr->sllc_arphrd, addr->sllc_mac); - dev_hold(llc->dev); + dev_hold_track(llc->dev, &llc->dev_tracker, GFP_ATOMIC); rcu_read_unlock(); if (!llc->dev) goto out; diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c index 0ff490a73fae..07e9abb5978a 100644 --- a/net/llc/llc_proc.c +++ b/net/llc/llc_proc.c @@ -195,7 +195,7 @@ static int llc_seq_core_show(struct seq_file *seq, void *v) timer_pending(&llc->pf_cycle_timer.timer), timer_pending(&llc->rej_sent_timer.timer), timer_pending(&llc->busy_state_timer.timer), - !!sk->sk_backlog.tail, !!sk->sk_lock.owned); + !!sk->sk_backlog.tail, sock_owned_by_user_nocheck(sk)); out: return 0; } diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 2d0dd69f9753..87a208089caf 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -5,7 +5,7 @@ * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation */ #include <linux/ieee80211.h> @@ -3201,6 +3201,18 @@ void ieee80211_csa_finish(struct ieee80211_vif *vif) } EXPORT_SYMBOL(ieee80211_csa_finish); +void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif, bool block_tx) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_local *local = sdata->local; + + sdata->csa_block_tx = block_tx; + sdata_info(sdata, "channel switch failed, disconnecting\n"); + ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work); +} +EXPORT_SYMBOL(ieee80211_channel_switch_disconnect); + static int ieee80211_set_after_csa_beacon(struct ieee80211_sub_if_data *sdata, u32 *changed) { @@ -4271,6 +4283,21 @@ ieee80211_color_change_bss_config_notify(struct ieee80211_sub_if_data *sdata, changed |= BSS_CHANGED_HE_BSS_COLOR; ieee80211_bss_info_change_notify(sdata, changed); + + if (!sdata->vif.bss_conf.nontransmitted && sdata->vif.mbssid_tx_vif) { + struct ieee80211_sub_if_data *child; + + mutex_lock(&sdata->local->iflist_mtx); + list_for_each_entry(child, &sdata->local->interfaces, list) { + if (child != sdata && child->vif.mbssid_tx_vif == &sdata->vif) { + child->vif.bss_conf.he_bss_color.color = color; + child->vif.bss_conf.he_bss_color.enabled = enable; + ieee80211_bss_info_change_notify(child, + BSS_CHANGED_HE_BSS_COLOR); + } + } + mutex_unlock(&sdata->local->iflist_mtx); + } } static int ieee80211_color_change_finalize(struct ieee80211_sub_if_data *sdata) @@ -4355,6 +4382,9 @@ ieee80211_color_change(struct wiphy *wiphy, struct net_device *dev, sdata_assert_lock(sdata); + if (sdata->vif.bss_conf.nontransmitted) + return -EINVAL; + mutex_lock(&local->mtx); /* don't allow another color change if one is already active or if csa @@ -4386,6 +4416,18 @@ out: return err; } +static int +ieee80211_set_radar_background(struct wiphy *wiphy, + struct cfg80211_chan_def *chandef) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + + if (!local->ops->set_radar_background) + return -EOPNOTSUPP; + + return local->ops->set_radar_background(&local->hw, chandef); +} + const struct cfg80211_ops mac80211_config_ops = { .add_virtual_intf = ieee80211_add_iface, .del_virtual_intf = ieee80211_del_iface, @@ -4490,4 +4532,5 @@ const struct cfg80211_ops mac80211_config_ops = { .reset_tid_config = ieee80211_reset_tid_config, .set_sar_specs = ieee80211_set_sar_specs, .color_change = ieee80211_color_change, + .set_radar_background = ieee80211_set_radar_background, }; diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 481f01b0f65c..9479f2787ea7 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -936,14 +936,15 @@ static ssize_t sta_he_capa_read(struct file *file, char __user *userbuf, PFLAG(PHY, 9, RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB, "RX-FULL-BW-SU-USING-MU-WITH-NON-COMP-SIGB"); - switch (cap[9] & IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK) { - case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_0US: + switch (u8_get_bits(cap[9], + IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK)) { + case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US: PRINT("NOMINAL-PACKET-PADDING-0US"); break; - case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_8US: + case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US: PRINT("NOMINAL-PACKET-PADDING-8US"); break; - case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US: + case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US: PRINT("NOMINAL-PACKET-PADDING-16US"); break; } diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index c336267f4599..4e2fc1a08681 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -1486,4 +1486,26 @@ static inline void drv_twt_teardown_request(struct ieee80211_local *local, trace_drv_return_void(local); } +static inline int drv_net_fill_forward_path(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, + struct net_device_path_ctx *ctx, + struct net_device_path *path) +{ + int ret = -EOPNOTSUPP; + + sdata = get_bss_sdata(sdata); + if (!check_sdata_in_driver(sdata)) + return -EIO; + + trace_drv_net_fill_forward_path(local, sdata, sta); + if (local->ops->net_fill_forward_path) + ret = local->ops->net_fill_forward_path(&local->hw, + &sdata->vif, sta, + ctx, path); + trace_drv_return_int(local, ret); + + return ret; +} + #endif /* __MAC80211_DRIVER_OPS */ diff --git a/net/mac80211/ethtool.c b/net/mac80211/ethtool.c index 99a2e30b3833..b2253df54413 100644 --- a/net/mac80211/ethtool.c +++ b/net/mac80211/ethtool.c @@ -14,7 +14,9 @@ #include "driver-ops.h" static int ieee80211_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *rp) + struct ethtool_ringparam *rp, + struct kernel_ethtool_ringparam *kernel_rp, + struct netlink_ext_ack *extack) { struct ieee80211_local *local = wiphy_priv(dev->ieee80211_ptr->wiphy); @@ -25,7 +27,9 @@ static int ieee80211_set_ringparam(struct net_device *dev, } static void ieee80211_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *rp) + struct ethtool_ringparam *rp, + struct kernel_ethtool_ringparam *kernel_rp, + struct netlink_ext_ack *extack) { struct ieee80211_local *local = wiphy_priv(dev->ieee80211_ptr->wiphy); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 5666bbb8860b..08c0542c93a3 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1463,7 +1463,7 @@ struct ieee80211_local { }; static inline struct ieee80211_sub_if_data * -IEEE80211_DEV_TO_SUB_IF(struct net_device *dev) +IEEE80211_DEV_TO_SUB_IF(const struct net_device *dev) { return netdev_priv(dev); } diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 20aa5cc31f77..41531478437c 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -789,6 +789,64 @@ static const struct net_device_ops ieee80211_monitorif_ops = { .ndo_get_stats64 = ieee80211_get_stats64, }; +static int ieee80211_netdev_fill_forward_path(struct net_device_path_ctx *ctx, + struct net_device_path *path) +{ + struct ieee80211_sub_if_data *sdata; + struct ieee80211_local *local; + struct sta_info *sta; + int ret = -ENOENT; + + sdata = IEEE80211_DEV_TO_SUB_IF(ctx->dev); + local = sdata->local; + + if (!local->ops->net_fill_forward_path) + return -EOPNOTSUPP; + + rcu_read_lock(); + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: + sta = rcu_dereference(sdata->u.vlan.sta); + if (sta) + break; + if (sdata->wdev.use_4addr) + goto out; + if (is_multicast_ether_addr(ctx->daddr)) + goto out; + sta = sta_info_get_bss(sdata, ctx->daddr); + break; + case NL80211_IFTYPE_AP: + if (is_multicast_ether_addr(ctx->daddr)) + goto out; + sta = sta_info_get(sdata, ctx->daddr); + break; + case NL80211_IFTYPE_STATION: + if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) { + sta = sta_info_get(sdata, ctx->daddr); + if (sta && test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { + if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) + goto out; + + break; + } + } + + sta = sta_info_get(sdata, sdata->u.mgd.bssid); + break; + default: + goto out; + } + + if (!sta) + goto out; + + ret = drv_net_fill_forward_path(local, sdata, &sta->sta, ctx, path); +out: + rcu_read_unlock(); + + return ret; +} + static const struct net_device_ops ieee80211_dataif_8023_ops = { .ndo_open = ieee80211_open, .ndo_stop = ieee80211_stop, @@ -798,6 +856,7 @@ static const struct net_device_ops ieee80211_dataif_8023_ops = { .ndo_set_mac_address = ieee80211_change_mac, .ndo_select_queue = ieee80211_netdev_select_queue, .ndo_get_stats64 = ieee80211_get_stats64, + .ndo_fill_forward_path = ieee80211_netdev_fill_forward_path, }; static bool ieee80211_iftype_supports_hdr_offload(enum nl80211_iftype iftype) diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 45fb517591ee..5311c3cd3050 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -1131,17 +1131,14 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) local->scan_ies_len += 2 + sizeof(struct ieee80211_vht_cap); - /* HE cap element is variable in size - set len to allow max size */ /* - * TODO: 1 is added at the end of the calculation to accommodate for - * the temporary placing of the HE capabilities IE under EXT. - * Remove it once it is placed in the final place. - */ - if (supp_he) + * HE cap element is variable in size - set len to allow max size */ + if (supp_he) { local->scan_ies_len += - 2 + sizeof(struct ieee80211_he_cap_elem) + + 3 + sizeof(struct ieee80211_he_cap_elem) + sizeof(struct ieee80211_he_mcs_nss_supp) + - IEEE80211_HE_PPE_THRES_MAX_LEN + 1; + IEEE80211_HE_PPE_THRES_MAX_LEN; + } if (!local->ops->hw_scan) { /* For hw_scan, driver needs to set these up. */ diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 37f7d975f3da..51f55c4ee3c6 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -164,12 +164,15 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, chandef->freq1_offset = channel->freq_offset; if (channel->band == NL80211_BAND_6GHZ) { - if (!ieee80211_chandef_he_6ghz_oper(sdata, he_oper, chandef)) + if (!ieee80211_chandef_he_6ghz_oper(sdata, he_oper, chandef)) { + mlme_dbg(sdata, + "bad 6 GHz operation, disabling HT/VHT/HE\n"); ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT | IEEE80211_STA_DISABLE_HE; - else + } else { ret = 0; + } vht_chandef = *chandef; goto out; } else if (sband->band == NL80211_BAND_S1GHZ) { @@ -190,6 +193,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap); if (!ht_oper || !sta_ht_cap.ht_supported) { + mlme_dbg(sdata, "HT operation missing / HT not supported\n"); ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT | IEEE80211_STA_DISABLE_HE; @@ -223,6 +227,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, if (sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) { ieee80211_chandef_ht_oper(ht_oper, chandef); } else { + mlme_dbg(sdata, "40 MHz not supported\n"); /* 40 MHz (and 80 MHz) must be supported for VHT */ ret = IEEE80211_STA_DISABLE_VHT; /* also mark 40 MHz disabled */ @@ -231,6 +236,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, } if (!vht_oper || !sband->vht_cap.vht_supported) { + mlme_dbg(sdata, "VHT operation missing / VHT not supported\n"); ret = IEEE80211_STA_DISABLE_VHT; goto out; } @@ -253,7 +259,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, &vht_chandef)) { if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE)) sdata_info(sdata, - "HE AP VHT information is invalid, disable HE\n"); + "HE AP VHT information is invalid, disabling HE\n"); ret = IEEE80211_STA_DISABLE_HE; goto out; } @@ -263,7 +269,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, &vht_chandef)) { if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) sdata_info(sdata, - "AP VHT information is invalid, disable VHT\n"); + "AP VHT information is invalid, disabling VHT\n"); ret = IEEE80211_STA_DISABLE_VHT; goto out; } @@ -271,7 +277,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, if (!cfg80211_chandef_valid(&vht_chandef)) { if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) sdata_info(sdata, - "AP VHT information is invalid, disable VHT\n"); + "AP VHT information is invalid, disabling VHT\n"); ret = IEEE80211_STA_DISABLE_VHT; goto out; } @@ -284,7 +290,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) { if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) sdata_info(sdata, - "AP VHT information doesn't match HT, disable VHT\n"); + "AP VHT information doesn't match HT, disabling VHT\n"); ret = IEEE80211_STA_DISABLE_VHT; goto out; } @@ -649,10 +655,6 @@ static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata, if (!he_cap || !reg_cap) return; - /* - * TODO: the 1 added is because this temporarily is under the EXTENSION - * IE. Get rid of it when it moves. - */ he_cap_size = 2 + 1 + sizeof(he_cap->he_cap_elem) + ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem) + @@ -3741,6 +3743,10 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, elems->timeout_int && elems->timeout_int->type == WLAN_TIMEOUT_ASSOC_COMEBACK) { u32 tu, ms; + + cfg80211_assoc_comeback(sdata->dev, assoc_data->bss, + le32_to_cpu(elems->timeout_int->value)); + tu = le32_to_cpu(elems->timeout_int->value); ms = tu * 1024 / 1000; sdata_info(sdata, @@ -5043,19 +5049,23 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, /* disable HT/VHT/HE if we don't support them */ if (!sband->ht_cap.ht_supported && !is_6ghz) { + mlme_dbg(sdata, "HT not supported, disabling HT/VHT/HE\n"); ifmgd->flags |= IEEE80211_STA_DISABLE_HT; ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; ifmgd->flags |= IEEE80211_STA_DISABLE_HE; } if (!sband->vht_cap.vht_supported && is_5ghz) { + mlme_dbg(sdata, "VHT not supported, disabling VHT/HE\n"); ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; ifmgd->flags |= IEEE80211_STA_DISABLE_HE; } if (!ieee80211_get_he_iftype_cap(sband, - ieee80211_vif_type_p2p(&sdata->vif))) + ieee80211_vif_type_p2p(&sdata->vif))) { + mlme_dbg(sdata, "HE not supported, disabling it\n"); ifmgd->flags |= IEEE80211_STA_DISABLE_HE; + } if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) && !is_6ghz) { ht_oper = elems->ht_operation; @@ -5079,6 +5089,8 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, } if (!elems->vht_cap_elem) { + sdata_info(sdata, + "bad VHT capabilities, disabling VHT\n"); ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; vht_oper = NULL; } @@ -5126,8 +5138,10 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, break; } - if (!have_80mhz) + if (!have_80mhz) { + sdata_info(sdata, "80 MHz not supported, disabling VHT\n"); ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; + } if (sband->band == NL80211_BAND_S1GHZ) { s1g_oper = elems->s1g_oper; @@ -5691,12 +5705,14 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, else if (!is_6ghz) ifmgd->flags |= IEEE80211_STA_DISABLE_HT; vht_elem = ieee80211_bss_get_elem(req->bss, WLAN_EID_VHT_CAPABILITY); - if (vht_elem && vht_elem->datalen >= sizeof(struct ieee80211_vht_cap)) + if (vht_elem && vht_elem->datalen >= sizeof(struct ieee80211_vht_cap)) { memcpy(&assoc_data->ap_vht_cap, vht_elem->data, sizeof(struct ieee80211_vht_cap)); - else if (is_5ghz) + } else if (is_5ghz) { + sdata_info(sdata, "VHT capa missing/short, disabling VHT/HE\n"); ifmgd->flags |= IEEE80211_STA_DISABLE_VHT | IEEE80211_STA_DISABLE_HE; + } rcu_read_unlock(); if (WARN((sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_UAPSD) && @@ -5770,16 +5786,21 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, } if (req->flags & ASSOC_REQ_DISABLE_HT) { + mlme_dbg(sdata, "HT disabled by flag, disabling HT/VHT/HE\n"); ifmgd->flags |= IEEE80211_STA_DISABLE_HT; ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; ifmgd->flags |= IEEE80211_STA_DISABLE_HE; } - if (req->flags & ASSOC_REQ_DISABLE_VHT) + if (req->flags & ASSOC_REQ_DISABLE_VHT) { + mlme_dbg(sdata, "VHT disabled by flag, disabling VHT\n"); ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; + } - if (req->flags & ASSOC_REQ_DISABLE_HE) + if (req->flags & ASSOC_REQ_DISABLE_HE) { + mlme_dbg(sdata, "HE disabled by flag, disabling VHT\n"); ifmgd->flags |= IEEE80211_STA_DISABLE_HE; + } err = ieee80211_prep_connection(sdata, req->bss, true, override); if (err) diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 72b44d4c42d0..9c3b7fc377c1 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -18,8 +18,6 @@ #define AVG_AMPDU_SIZE 16 #define AVG_PKT_SIZE 1200 -#define SAMPLE_SWITCH_THR 100 - /* Number of bits for an average sized packet */ #define MCS_NBITS ((AVG_PKT_SIZE * AVG_AMPDU_SIZE) << 3) diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 0544563ede52..fec82f7c2fa6 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -465,7 +465,12 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, unsigned int stbc; rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_MCS)); - *pos++ = local->hw.radiotap_mcs_details; + *pos = local->hw.radiotap_mcs_details; + if (status->enc_flags & RX_ENC_FLAG_HT_GF) + *pos |= IEEE80211_RADIOTAP_MCS_HAVE_FMT; + if (status->enc_flags & RX_ENC_FLAG_LDPC) + *pos |= IEEE80211_RADIOTAP_MCS_HAVE_FEC; + pos++; *pos = 0; if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) *pos |= IEEE80211_RADIOTAP_MCS_SGI; diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index 9e8381bef7ed..d91498f77796 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -2892,6 +2892,13 @@ TRACE_EVENT(drv_twt_teardown_request, ) ); +DEFINE_EVENT(sta_event, drv_net_fill_forward_path, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta), + TP_ARGS(local, sdata, sta) +); + #endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */ #undef TRACE_INCLUDE_PATH diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 86a54df3aabd..6d054fed062f 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -3821,7 +3821,7 @@ struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac) { struct ieee80211_local *local = hw_to_local(hw); struct airtime_sched_info *air_sched; - u64 now = ktime_get_boottime_ns(); + u64 now = ktime_get_coarse_boottime_ns(); struct ieee80211_txq *ret = NULL; struct airtime_info *air_info; struct txq_info *txqi = NULL; @@ -3948,7 +3948,7 @@ void ieee80211_update_airtime_weight(struct ieee80211_local *local, u64 weight_sum = 0; if (unlikely(!now)) - now = ktime_get_boottime_ns(); + now = ktime_get_coarse_boottime_ns(); lockdep_assert_held(&air_sched->lock); @@ -3974,7 +3974,7 @@ void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_local *local = hw_to_local(hw); struct txq_info *txqi = to_txq_info(txq); struct airtime_sched_info *air_sched; - u64 now = ktime_get_boottime_ns(); + u64 now = ktime_get_coarse_boottime_ns(); struct airtime_info *air_info; u8 ac = txq->ac; bool was_active; @@ -4032,7 +4032,7 @@ static void __ieee80211_unschedule_txq(struct ieee80211_hw *hw, if (!purge) airtime_set_active(air_sched, air_info, - ktime_get_boottime_ns()); + ktime_get_coarse_boottime_ns()); rb_erase_cached(&txqi->schedule_order, &air_sched->active_txqs); @@ -4120,7 +4120,7 @@ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw, if (RB_EMPTY_NODE(&txqi->schedule_order)) goto out; - now = ktime_get_boottime_ns(); + now = ktime_get_coarse_boottime_ns(); /* Like in ieee80211_next_txq(), make sure the first station in the * scheduling order is eligible for transmission to avoid starvation. diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 4eed23e27610..7ed0d268aff2 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -449,7 +449,6 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb, (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) return 0; - hdr = (struct ieee80211_hdr *) pos; pos += hdrlen; pn64 = atomic64_inc_return(&key->conf.tx_pn); @@ -686,7 +685,6 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) return 0; - hdr = (struct ieee80211_hdr *)pos; pos += hdrlen; pn64 = atomic64_inc_return(&key->conf.tx_pn); @@ -881,8 +879,6 @@ ieee80211_crypto_cs_decrypt(struct ieee80211_rx_data *rx) if (skb_linearize(rx->skb)) return RX_DROP_UNUSABLE; - hdr = (struct ieee80211_hdr *)rx->skb->data; - rx_pn = key->u.gen.rx_pn[qos_tid]; skb_pn = rx->skb->data + hdrlen + cs->pn_off; diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c index 871cf6266125..c921de63b494 100644 --- a/net/mctp/af_mctp.c +++ b/net/mctp/af_mctp.c @@ -405,8 +405,7 @@ static void mctp_sk_unhash(struct sock *sk) trace_mctp_key_release(key, MCTP_TRACE_KEY_CLOSED); spin_lock(&key->lock); - if (key->reasm_head) - kfree_skb(key->reasm_head); + kfree_skb(key->reasm_head); key->reasm_head = NULL; key->reasm_dead = true; key->valid = false; diff --git a/net/mctp/device.c b/net/mctp/device.c index 8799ee77e7b7..ef2755f82f87 100644 --- a/net/mctp/device.c +++ b/net/mctp/device.c @@ -35,14 +35,24 @@ struct mctp_dev *mctp_dev_get_rtnl(const struct net_device *dev) return rtnl_dereference(dev->mctp_ptr); } -static int mctp_fill_addrinfo(struct sk_buff *skb, struct netlink_callback *cb, - struct mctp_dev *mdev, mctp_eid_t eid) +static int mctp_addrinfo_size(void) +{ + return NLMSG_ALIGN(sizeof(struct ifaddrmsg)) + + nla_total_size(1) // IFA_LOCAL + + nla_total_size(1) // IFA_ADDRESS + ; +} + +/* flag should be NLM_F_MULTI for dump calls */ +static int mctp_fill_addrinfo(struct sk_buff *skb, + struct mctp_dev *mdev, mctp_eid_t eid, + int msg_type, u32 portid, u32 seq, int flag) { struct ifaddrmsg *hdr; struct nlmsghdr *nlh; - nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, - RTM_NEWADDR, sizeof(*hdr), NLM_F_MULTI); + nlh = nlmsg_put(skb, portid, seq, + msg_type, sizeof(*hdr), flag); if (!nlh) return -EMSGSIZE; @@ -72,10 +82,14 @@ static int mctp_dump_dev_addrinfo(struct mctp_dev *mdev, struct sk_buff *skb, struct netlink_callback *cb) { struct mctp_dump_cb *mcb = (void *)cb->ctx; + u32 portid, seq; int rc = 0; + portid = NETLINK_CB(cb->skb).portid; + seq = cb->nlh->nlmsg_seq; for (; mcb->a_idx < mdev->num_addrs; mcb->a_idx++) { - rc = mctp_fill_addrinfo(skb, cb, mdev, mdev->addrs[mcb->a_idx]); + rc = mctp_fill_addrinfo(skb, mdev, mdev->addrs[mcb->a_idx], + RTM_NEWADDR, portid, seq, NLM_F_MULTI); if (rc < 0) break; } @@ -127,6 +141,32 @@ out: return skb->len; } +static void mctp_addr_notify(struct mctp_dev *mdev, mctp_eid_t eid, int msg_type, + struct sk_buff *req_skb, struct nlmsghdr *req_nlh) +{ + u32 portid = NETLINK_CB(req_skb).portid; + struct net *net = dev_net(mdev->dev); + struct sk_buff *skb; + int rc = -ENOBUFS; + + skb = nlmsg_new(mctp_addrinfo_size(), GFP_KERNEL); + if (!skb) + goto out; + + rc = mctp_fill_addrinfo(skb, mdev, eid, msg_type, + portid, req_nlh->nlmsg_seq, 0); + if (rc < 0) { + WARN_ON_ONCE(rc == -EMSGSIZE); + goto out; + } + + rtnl_notify(skb, net, portid, RTNLGRP_MCTP_IFADDR, req_nlh, GFP_KERNEL); + return; +out: + kfree_skb(skb); + rtnl_set_sk_err(net, RTNLGRP_MCTP_IFADDR, rc); +} + static const struct nla_policy ifa_mctp_policy[IFA_MAX + 1] = { [IFA_ADDRESS] = { .type = NLA_U8 }, [IFA_LOCAL] = { .type = NLA_U8 }, @@ -189,6 +229,7 @@ static int mctp_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, kfree(tmp_addrs); + mctp_addr_notify(mdev, addr->s_addr, RTM_NEWADDR, skb, nlh); mctp_route_add_local(mdev, addr->s_addr); return 0; @@ -244,6 +285,8 @@ static int mctp_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, mdev->num_addrs--; spin_unlock_irqrestore(&mdev->addrs_lock, flags); + mctp_addr_notify(mdev, addr->s_addr, RTM_DELADDR, skb, nlh); + return 0; } diff --git a/net/mctp/route.c b/net/mctp/route.c index cdf09c2a7007..8d9f4ff3e285 100644 --- a/net/mctp/route.c +++ b/net/mctp/route.c @@ -231,9 +231,7 @@ static void __mctp_key_unlock_drop(struct mctp_sk_key *key, struct net *net, /* and one for the local reference */ mctp_key_unref(key); - if (skb) - kfree_skb(skb); - + kfree_skb(skb); } #ifdef CONFIG_MCTP_FLOWS @@ -892,8 +890,7 @@ out_release: if (!ext_rt) mctp_route_release(rt); - if (dev) - dev_put(dev); + dev_put(dev); return rc; diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c index 36fac3daf86a..86ad15abf897 100644 --- a/net/mctp/test/route-test.c +++ b/net/mctp/test/route-test.c @@ -150,11 +150,6 @@ static void mctp_test_fragment(struct kunit *test) rt = mctp_test_create_route(&init_net, NULL, 10, mtu); KUNIT_ASSERT_TRUE(test, rt); - /* The refcount would usually be incremented as part of a route lookup, - * but we're setting the route directly here. - */ - refcount_inc(&rt->rt.refs); - rc = mctp_do_fragment_route(&rt->rt, skb, mtu, MCTP_TAG_OWNER); KUNIT_EXPECT_FALSE(test, rc); diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 0c7bde1c14a6..48f75a56f4ae 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -230,8 +230,8 @@ static struct mpls_nh *mpls_get_nexthop(struct mpls_route *rt, u8 index) * Since those fields can change at any moment, use READ_ONCE to * access both. */ -static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt, - struct sk_buff *skb) +static const struct mpls_nh *mpls_select_multipath(struct mpls_route *rt, + struct sk_buff *skb) { u32 hash = 0; int nh_index = 0; @@ -343,8 +343,8 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, { struct net *net = dev_net(dev); struct mpls_shim_hdr *hdr; + const struct mpls_nh *nh; struct mpls_route *rt; - struct mpls_nh *nh; struct mpls_entry_decoded dec; struct net_device *out_dev; struct mpls_dev *out_mdev; @@ -2360,12 +2360,12 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, u32 labels[MAX_NEW_LABELS]; struct mpls_shim_hdr *hdr; unsigned int hdr_size = 0; + const struct mpls_nh *nh; struct net_device *dev; struct mpls_route *rt; struct rtmsg *rtm, *r; struct nlmsghdr *nlh; struct sk_buff *skb; - struct mpls_nh *nh; u8 n_labels; int err; diff --git a/net/mpls/internal.h b/net/mpls/internal.h index 893df00b77b6..b9f492ddf93b 100644 --- a/net/mpls/internal.h +++ b/net/mpls/internal.h @@ -158,17 +158,16 @@ struct mpls_route { /* next hop label forwarding entry */ }; #define for_nexthops(rt) { \ - int nhsel; struct mpls_nh *nh; u8 *__nh; \ - for (nhsel = 0, nh = (rt)->rt_nh, __nh = (u8 *)((rt)->rt_nh); \ + int nhsel; const struct mpls_nh *nh; \ + for (nhsel = 0, nh = (rt)->rt_nh; \ nhsel < (rt)->rt_nhn; \ - __nh += rt->rt_nh_size, nh = (struct mpls_nh *)__nh, nhsel++) + nh = (void *)nh + (rt)->rt_nh_size, nhsel++) #define change_nexthops(rt) { \ - int nhsel; struct mpls_nh *nh; u8 *__nh; \ - for (nhsel = 0, nh = (struct mpls_nh *)((rt)->rt_nh), \ - __nh = (u8 *)((rt)->rt_nh); \ + int nhsel; struct mpls_nh *nh; \ + for (nhsel = 0, nh = (rt)->rt_nh; \ nhsel < (rt)->rt_nhn; \ - __nh += rt->rt_nh_size, nh = (struct mpls_nh *)__nh, nhsel++) + nh = (void *)nh + (rt)->rt_nh_size, nhsel++) #define endfor_nexthops(rt) } diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index f523051f5aef..6cde58c259a8 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -38,7 +38,8 @@ struct mptcp_pm_add_entry { u8 retrans_times; }; -#define MAX_ADDR_ID 255 +/* max value of mptcp_addr_info.id */ +#define MAX_ADDR_ID U8_MAX #define BITMAP_SZ DIV_ROUND_UP(MAX_ADDR_ID + 1, BITS_PER_LONG) struct pm_nl_pernet { @@ -825,14 +826,13 @@ find_next: entry->addr.id = find_next_zero_bit(pernet->id_bitmap, MAX_ADDR_ID + 1, pernet->next_id); - if ((!entry->addr.id || entry->addr.id > MAX_ADDR_ID) && - pernet->next_id != 1) { + if (!entry->addr.id && pernet->next_id != 1) { pernet->next_id = 1; goto find_next; } } - if (!entry->addr.id || entry->addr.id > MAX_ADDR_ID) + if (!entry->addr.id) goto out; __set_bit(entry->addr.id, pernet->id_bitmap); @@ -1705,22 +1705,28 @@ next: static int mptcp_nl_cmd_set_flags(struct sk_buff *skb, struct genl_info *info) { + struct mptcp_pm_addr_entry addr = { .addr = { .family = AF_UNSPEC }, }, *entry; struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR]; struct pm_nl_pernet *pernet = genl_info_pm_nl(info); - struct mptcp_pm_addr_entry addr, *entry; struct net *net = sock_net(skb->sk); - u8 bkup = 0; + u8 bkup = 0, lookup_by_id = 0; int ret; - ret = mptcp_pm_parse_addr(attr, info, true, &addr); + ret = mptcp_pm_parse_addr(attr, info, false, &addr); if (ret < 0) return ret; if (addr.flags & MPTCP_PM_ADDR_FLAG_BACKUP) bkup = 1; + if (addr.addr.family == AF_UNSPEC) { + lookup_by_id = 1; + if (!addr.addr.id) + return -EOPNOTSUPP; + } list_for_each_entry(entry, &pernet->local_addr_list, list) { - if (addresses_equal(&entry->addr, &addr.addr, true)) { + if ((!lookup_by_id && addresses_equal(&entry->addr, &addr.addr, true)) || + (lookup_by_id && entry->addr.id == addr.addr.id)) { mptcp_nl_addr_backup(net, &entry->addr, bkup); if (bkup) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 54613f5b7521..df5a0cf431c1 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -22,6 +22,7 @@ #endif #include <net/mptcp.h> #include <net/xfrm.h> +#include <asm/ioctls.h> #include "protocol.h" #include "mib.h" @@ -46,9 +47,10 @@ struct mptcp_skb_cb { enum { MPTCP_CMSG_TS = BIT(0), + MPTCP_CMSG_INQ = BIT(1), }; -static struct percpu_counter mptcp_sockets_allocated; +static struct percpu_counter mptcp_sockets_allocated ____cacheline_aligned_in_smp; static void __mptcp_destroy_sock(struct sock *sk); static void __mptcp_check_send_data_fin(struct sock *sk); @@ -738,6 +740,7 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk) MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq, delta); MPTCP_SKB_CB(skb)->offset += delta; + MPTCP_SKB_CB(skb)->map_seq += delta; __skb_queue_tail(&sk->sk_receive_queue, skb); } msk->ack_seq = end_seq; @@ -1369,7 +1372,7 @@ out: struct subflow_send_info { struct sock *ssk; - u64 ratio; + u64 linger_time; }; void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow) @@ -1394,20 +1397,24 @@ bool mptcp_subflow_active(struct mptcp_subflow_context *subflow) return __mptcp_subflow_active(subflow); } +#define SSK_MODE_ACTIVE 0 +#define SSK_MODE_BACKUP 1 +#define SSK_MODE_MAX 2 + /* implement the mptcp packet scheduler; * returns the subflow that will transmit the next DSS * additionally updates the rtx timeout */ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk) { - struct subflow_send_info send_info[2]; + struct subflow_send_info send_info[SSK_MODE_MAX]; struct mptcp_subflow_context *subflow; struct sock *sk = (struct sock *)msk; + u32 pace, burst, wmem; int i, nr_active = 0; struct sock *ssk; + u64 linger_time; long tout = 0; - u64 ratio; - u32 pace; sock_owned_by_me(sk); @@ -1426,10 +1433,11 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk) } /* pick the subflow with the lower wmem/wspace ratio */ - for (i = 0; i < 2; ++i) { + for (i = 0; i < SSK_MODE_MAX; ++i) { send_info[i].ssk = NULL; - send_info[i].ratio = -1; + send_info[i].linger_time = -1; } + mptcp_for_each_subflow(msk, subflow) { trace_mptcp_subflow_get_send(subflow); ssk = mptcp_subflow_tcp_sock(subflow); @@ -1438,34 +1446,51 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk) tout = max(tout, mptcp_timeout_from_subflow(subflow)); nr_active += !subflow->backup; - if (!sk_stream_memory_free(subflow->tcp_sock) || !tcp_sk(ssk)->snd_wnd) - continue; - - pace = READ_ONCE(ssk->sk_pacing_rate); - if (!pace) - continue; + pace = subflow->avg_pacing_rate; + if (unlikely(!pace)) { + /* init pacing rate from socket */ + subflow->avg_pacing_rate = READ_ONCE(ssk->sk_pacing_rate); + pace = subflow->avg_pacing_rate; + if (!pace) + continue; + } - ratio = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, - pace); - if (ratio < send_info[subflow->backup].ratio) { + linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace); + if (linger_time < send_info[subflow->backup].linger_time) { send_info[subflow->backup].ssk = ssk; - send_info[subflow->backup].ratio = ratio; + send_info[subflow->backup].linger_time = linger_time; } } __mptcp_set_timeout(sk, tout); /* pick the best backup if no other subflow is active */ if (!nr_active) - send_info[0].ssk = send_info[1].ssk; - - if (send_info[0].ssk) { - msk->last_snd = send_info[0].ssk; - msk->snd_burst = min_t(int, MPTCP_SEND_BURST_SIZE, - tcp_sk(msk->last_snd)->snd_wnd); - return msk->last_snd; - } + send_info[SSK_MODE_ACTIVE].ssk = send_info[SSK_MODE_BACKUP].ssk; + + /* According to the blest algorithm, to avoid HoL blocking for the + * faster flow, we need to: + * - estimate the faster flow linger time + * - use the above to estimate the amount of byte transferred + * by the faster flow + * - check that the amount of queued data is greter than the above, + * otherwise do not use the picked, slower, subflow + * We select the subflow with the shorter estimated time to flush + * the queued mem, which basically ensure the above. We just need + * to check that subflow has a non empty cwin. + */ + ssk = send_info[SSK_MODE_ACTIVE].ssk; + if (!ssk || !sk_stream_memory_free(ssk) || !tcp_sk(ssk)->snd_wnd) + return NULL; - return NULL; + burst = min_t(int, MPTCP_SEND_BURST_SIZE, tcp_sk(ssk)->snd_wnd); + wmem = READ_ONCE(ssk->sk_wmem_queued); + subflow = mptcp_subflow_ctx(ssk); + subflow->avg_pacing_rate = div_u64((u64)subflow->avg_pacing_rate * wmem + + READ_ONCE(ssk->sk_pacing_rate) * burst, + burst + wmem); + msk->last_snd = ssk; + msk->snd_burst = burst; + return ssk; } static void mptcp_push_release(struct sock *ssk, struct mptcp_sendmsg_info *info) @@ -1499,7 +1524,7 @@ static void mptcp_update_post_push(struct mptcp_sock *msk, msk->snd_nxt = snd_nxt_new; } -static void mptcp_check_and_set_pending(struct sock *sk) +void mptcp_check_and_set_pending(struct sock *sk) { if (mptcp_send_head(sk) && !test_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags)) @@ -1784,8 +1809,10 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk, copied += count; if (count < data_len) { - if (!(flags & MSG_PEEK)) + if (!(flags & MSG_PEEK)) { MPTCP_SKB_CB(skb)->offset += count; + MPTCP_SKB_CB(skb)->map_seq += count; + } break; } @@ -1965,6 +1992,27 @@ static bool __mptcp_move_skbs(struct mptcp_sock *msk) return !skb_queue_empty(&msk->receive_queue); } +static unsigned int mptcp_inq_hint(const struct sock *sk) +{ + const struct mptcp_sock *msk = mptcp_sk(sk); + const struct sk_buff *skb; + + skb = skb_peek(&msk->receive_queue); + if (skb) { + u64 hint_val = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq; + + if (hint_val >= INT_MAX) + return INT_MAX; + + return (unsigned int)hint_val; + } + + if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) + return 1; + + return 0; +} + static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len) { @@ -1989,6 +2037,9 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, len = min_t(size_t, len, INT_MAX); target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); + if (unlikely(msk->recvmsg_inq)) + cmsg_flags = MPTCP_CMSG_INQ; + while (copied < len) { int bytes_read; @@ -2062,6 +2113,12 @@ out_err: if (cmsg_flags && copied >= 0) { if (cmsg_flags & MPTCP_CMSG_TS) tcp_recv_timestamp(msg, sk, &tss); + + if (cmsg_flags & MPTCP_CMSG_INQ) { + unsigned int inq = mptcp_inq_hint(sk); + + put_cmsg(msg, SOL_TCP, TCP_CM_INQ, sizeof(inq), &inq); + } } pr_debug("msk=%p rx queue empty=%d:%d copied=%d", @@ -3179,6 +3236,57 @@ static int mptcp_forward_alloc_get(const struct sock *sk) return sk->sk_forward_alloc + mptcp_sk(sk)->rmem_fwd_alloc; } +static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v) +{ + const struct sock *sk = (void *)msk; + u64 delta; + + if (sk->sk_state == TCP_LISTEN) + return -EINVAL; + + if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) + return 0; + + delta = msk->write_seq - v; + if (delta > INT_MAX) + delta = INT_MAX; + + return (int)delta; +} + +static int mptcp_ioctl(struct sock *sk, int cmd, unsigned long arg) +{ + struct mptcp_sock *msk = mptcp_sk(sk); + bool slow; + int answ; + + switch (cmd) { + case SIOCINQ: + if (sk->sk_state == TCP_LISTEN) + return -EINVAL; + + lock_sock(sk); + __mptcp_move_skbs(msk); + answ = mptcp_inq_hint(sk); + release_sock(sk); + break; + case SIOCOUTQ: + slow = lock_sock_fast(sk); + answ = mptcp_ioctl_outq(msk, READ_ONCE(msk->snd_una)); + unlock_sock_fast(sk, slow); + break; + case SIOCOUTQNSD: + slow = lock_sock_fast(sk); + answ = mptcp_ioctl_outq(msk, msk->snd_nxt); + unlock_sock_fast(sk, slow); + break; + default: + return -ENOIOCTLCMD; + } + + return put_user(answ, (int __user *)arg); +} + static struct proto mptcp_prot = { .name = "MPTCP", .owner = THIS_MODULE, @@ -3191,6 +3299,7 @@ static struct proto mptcp_prot = { .shutdown = mptcp_shutdown, .destroy = mptcp_destroy, .sendmsg = mptcp_sendmsg, + .ioctl = mptcp_ioctl, .recvmsg = mptcp_recvmsg, .release_cb = mptcp_release_cb, .hash = mptcp_hash, diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index d87cc040352e..0486c9f5b38b 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -249,6 +249,9 @@ struct mptcp_sock { bool rcv_fastclose; bool use_64bit_ack; /* Set when we received a 64-bit DSN */ bool csum_enabled; + u8 recvmsg_inq:1, + cork:1, + nodelay:1; spinlock_t join_list_lock; struct work_struct work; struct sk_buff *ooo_last_skb; @@ -392,6 +395,7 @@ DECLARE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions); /* MPTCP subflow context */ struct mptcp_subflow_context { struct list_head node;/* conn_list of subflows */ + unsigned long avg_pacing_rate; /* protected by msk socket lock */ u64 local_key; u64 remote_key; u64 idsn; @@ -554,6 +558,7 @@ unsigned int mptcp_stale_loss_cnt(const struct net *net); void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow, struct mptcp_options_received *mp_opt); bool __mptcp_retransmit_pending_data(struct sock *sk); +void mptcp_check_and_set_pending(struct sock *sk); void __mptcp_push_pending(struct sock *sk, unsigned int flags); bool mptcp_subflow_data_available(struct sock *sk); void __init mptcp_subflow_init(void); diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c index f8efd478ac97..aa3fcd86dbe2 100644 --- a/net/mptcp/sockopt.c +++ b/net/mptcp/sockopt.c @@ -390,6 +390,8 @@ static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname, switch (optname) { case IPV6_V6ONLY: + case IPV6_TRANSPARENT: + case IPV6_FREEBIND: lock_sock(sk); ssock = __mptcp_nmpc_socket(msk); if (!ssock) { @@ -398,8 +400,24 @@ static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname, } ret = tcp_setsockopt(ssock->sk, SOL_IPV6, optname, optval, optlen); - if (ret == 0) + if (ret != 0) { + release_sock(sk); + return ret; + } + + sockopt_seq_inc(msk); + + switch (optname) { + case IPV6_V6ONLY: sk->sk_ipv6only = ssock->sk->sk_ipv6only; + break; + case IPV6_TRANSPARENT: + inet_sk(sk)->transparent = inet_sk(ssock->sk)->transparent; + break; + case IPV6_FREEBIND: + inet_sk(sk)->freebind = inet_sk(ssock->sk)->freebind; + break; + } release_sock(sk); break; @@ -538,6 +556,7 @@ static bool mptcp_supported_sockopt(int level, int optname) case TCP_TIMESTAMP: case TCP_NOTSENT_LOWAT: case TCP_TX_DELAY: + case TCP_INQ: return true; } @@ -549,7 +568,6 @@ static bool mptcp_supported_sockopt(int level, int optname) /* TCP_FASTOPEN_KEY, TCP_FASTOPEN TCP_FASTOPEN_CONNECT, TCP_FASTOPEN_NO_COOKIE, * are not supported fastopen is currently unsupported */ - /* TCP_INQ is currently unsupported, needs some recvmsg work */ } return false; } @@ -597,14 +615,171 @@ static int mptcp_setsockopt_sol_tcp_congestion(struct mptcp_sock *msk, sockptr_t return ret; } +static int mptcp_setsockopt_sol_tcp_cork(struct mptcp_sock *msk, sockptr_t optval, + unsigned int optlen) +{ + struct mptcp_subflow_context *subflow; + struct sock *sk = (struct sock *)msk; + int val; + + if (optlen < sizeof(int)) + return -EINVAL; + + if (copy_from_sockptr(&val, optval, sizeof(val))) + return -EFAULT; + + lock_sock(sk); + sockopt_seq_inc(msk); + msk->cork = !!val; + mptcp_for_each_subflow(msk, subflow) { + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + + lock_sock(ssk); + __tcp_sock_set_cork(ssk, !!val); + release_sock(ssk); + } + if (!val) + mptcp_check_and_set_pending(sk); + release_sock(sk); + + return 0; +} + +static int mptcp_setsockopt_sol_tcp_nodelay(struct mptcp_sock *msk, sockptr_t optval, + unsigned int optlen) +{ + struct mptcp_subflow_context *subflow; + struct sock *sk = (struct sock *)msk; + int val; + + if (optlen < sizeof(int)) + return -EINVAL; + + if (copy_from_sockptr(&val, optval, sizeof(val))) + return -EFAULT; + + lock_sock(sk); + sockopt_seq_inc(msk); + msk->nodelay = !!val; + mptcp_for_each_subflow(msk, subflow) { + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + + lock_sock(ssk); + __tcp_sock_set_nodelay(ssk, !!val); + release_sock(ssk); + } + if (val) + mptcp_check_and_set_pending(sk); + release_sock(sk); + + return 0; +} + +static int mptcp_setsockopt_sol_ip_set_transparent(struct mptcp_sock *msk, int optname, + sockptr_t optval, unsigned int optlen) +{ + struct sock *sk = (struct sock *)msk; + struct inet_sock *issk; + struct socket *ssock; + int err; + + err = ip_setsockopt(sk, SOL_IP, optname, optval, optlen); + if (err != 0) + return err; + + lock_sock(sk); + + ssock = __mptcp_nmpc_socket(msk); + if (!ssock) { + release_sock(sk); + return -EINVAL; + } + + issk = inet_sk(ssock->sk); + + switch (optname) { + case IP_FREEBIND: + issk->freebind = inet_sk(sk)->freebind; + break; + case IP_TRANSPARENT: + issk->transparent = inet_sk(sk)->transparent; + break; + default: + release_sock(sk); + WARN_ON_ONCE(1); + return -EOPNOTSUPP; + } + + sockopt_seq_inc(msk); + release_sock(sk); + return 0; +} + +static int mptcp_setsockopt_v4_set_tos(struct mptcp_sock *msk, int optname, + sockptr_t optval, unsigned int optlen) +{ + struct mptcp_subflow_context *subflow; + struct sock *sk = (struct sock *)msk; + int err, val; + + err = ip_setsockopt(sk, SOL_IP, optname, optval, optlen); + + if (err != 0) + return err; + + lock_sock(sk); + sockopt_seq_inc(msk); + val = inet_sk(sk)->tos; + mptcp_for_each_subflow(msk, subflow) { + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + + __ip_sock_set_tos(ssk, val); + } + release_sock(sk); + + return err; +} + +static int mptcp_setsockopt_v4(struct mptcp_sock *msk, int optname, + sockptr_t optval, unsigned int optlen) +{ + switch (optname) { + case IP_FREEBIND: + case IP_TRANSPARENT: + return mptcp_setsockopt_sol_ip_set_transparent(msk, optname, optval, optlen); + case IP_TOS: + return mptcp_setsockopt_v4_set_tos(msk, optname, optval, optlen); + } + + return -EOPNOTSUPP; +} + static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname, sockptr_t optval, unsigned int optlen) { + struct sock *sk = (void *)msk; + int ret, val; + switch (optname) { + case TCP_INQ: + ret = mptcp_get_int_option(msk, optval, optlen, &val); + if (ret) + return ret; + if (val < 0 || val > 1) + return -EINVAL; + + lock_sock(sk); + msk->recvmsg_inq = !!val; + release_sock(sk); + return 0; case TCP_ULP: return -EOPNOTSUPP; case TCP_CONGESTION: return mptcp_setsockopt_sol_tcp_congestion(msk, optval, optlen); + case TCP_CORK: + return mptcp_setsockopt_sol_tcp_cork(msk, optval, optlen); + case TCP_NODELAY: + return mptcp_setsockopt_sol_tcp_nodelay(msk, optval, optlen); } return -EOPNOTSUPP; @@ -636,6 +811,9 @@ int mptcp_setsockopt(struct sock *sk, int level, int optname, if (ssk) return tcp_setsockopt(ssk, level, optname, optval, optlen); + if (level == SOL_IP) + return mptcp_setsockopt_v4(msk, optname, optval, optlen); + if (level == SOL_IPV6) return mptcp_setsockopt_v6(msk, optname, optval, optlen); @@ -931,6 +1109,35 @@ static int mptcp_getsockopt_subflow_addrs(struct mptcp_sock *msk, char __user *o return 0; } +static int mptcp_put_int_option(struct mptcp_sock *msk, char __user *optval, + int __user *optlen, int val) +{ + int len; + + if (get_user(len, optlen)) + return -EFAULT; + if (len < 0) + return -EINVAL; + + if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) { + unsigned char ucval = (unsigned char)val; + + len = 1; + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &ucval, 1)) + return -EFAULT; + } else { + len = min_t(unsigned int, len, sizeof(int)); + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &val, len)) + return -EFAULT; + } + + return 0; +} + static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname, char __user *optval, int __user *optlen) { @@ -941,10 +1148,29 @@ static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname, case TCP_CC_INFO: return mptcp_getsockopt_first_sf_only(msk, SOL_TCP, optname, optval, optlen); + case TCP_INQ: + return mptcp_put_int_option(msk, optval, optlen, msk->recvmsg_inq); + case TCP_CORK: + return mptcp_put_int_option(msk, optval, optlen, msk->cork); + case TCP_NODELAY: + return mptcp_put_int_option(msk, optval, optlen, msk->nodelay); } return -EOPNOTSUPP; } +static int mptcp_getsockopt_v4(struct mptcp_sock *msk, int optname, + char __user *optval, int __user *optlen) +{ + struct sock *sk = (void *)msk; + + switch (optname) { + case IP_TOS: + return mptcp_put_int_option(msk, optval, optlen, inet_sk(sk)->tos); + } + + return -EOPNOTSUPP; +} + static int mptcp_getsockopt_sol_mptcp(struct mptcp_sock *msk, int optname, char __user *optval, int __user *optlen) { @@ -980,6 +1206,8 @@ int mptcp_getsockopt(struct sock *sk, int level, int optname, if (ssk) return tcp_getsockopt(ssk, level, optname, optval, option); + if (level == SOL_IP) + return mptcp_getsockopt_v4(msk, optname, optval, option); if (level == SOL_TCP) return mptcp_getsockopt_sol_tcp(msk, optname, optval, option); if (level == SOL_MPTCP) @@ -1002,6 +1230,7 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk) ssk->sk_priority = sk->sk_priority; ssk->sk_bound_dev_if = sk->sk_bound_dev_if; ssk->sk_incoming_cpu = sk->sk_incoming_cpu; + __ip_sock_set_tos(ssk, inet_sk(sk)->tos); if (sk->sk_userlocks & tx_rx_locks) { ssk->sk_userlocks |= sk->sk_userlocks & tx_rx_locks; @@ -1027,6 +1256,11 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk) if (inet_csk(sk)->icsk_ca_ops != inet_csk(ssk)->icsk_ca_ops) tcp_set_congestion_control(ssk, msk->ca_name, false, true); + __tcp_sock_set_cork(ssk, !!msk->cork); + __tcp_sock_set_nodelay(ssk, !!msk->nodelay); + + inet_sk(ssk)->transparent = inet_sk(sk)->transparent; + inet_sk(ssk)->freebind = inet_sk(sk)->freebind; } static void __mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk) diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 6172f380dfb7..24bc9d5e87be 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -1425,6 +1425,8 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc, if (addr.ss_family == AF_INET6) addrlen = sizeof(struct sockaddr_in6); #endif + mptcp_sockopt_sync(msk, ssk); + ssk->sk_bound_dev_if = ifindex; err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen); if (err) @@ -1441,7 +1443,6 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc, mptcp_info2sockaddr(remote, &addr, ssk->sk_family); mptcp_add_pending_subflow(msk, subflow); - mptcp_sockopt_sync(msk, ssk); err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK); if (err && err != -EINPROGRESS) goto failed_unlink; @@ -1533,10 +1534,8 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock) * needs it. */ sf->sk->sk_net_refcnt = 1; - get_net(net); -#ifdef CONFIG_PROC_FS - this_cpu_add(*net->core.sock_inuse, 1); -#endif + get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL); + sock_inuse_add(net, 1); err = tcp_set_ulp(sf->sk, "mptcp"); release_sock(sf->sk); diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 39c523bd775c..7f645328b47f 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -960,8 +960,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest, * Create a destination for the given service */ static int -ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest, - struct ip_vs_dest **dest_p) +ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) { struct ip_vs_dest *dest; unsigned int atype, i; @@ -1021,8 +1020,6 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest, spin_lock_init(&dest->stats.lock); __ip_vs_update_dest(svc, dest, udest, 1); - *dest_p = dest; - LeaveFunction(2); return 0; @@ -1096,7 +1093,7 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) /* * Allocate and initialize the dest structure */ - ret = ip_vs_new_dest(svc, udest, &dest); + ret = ip_vs_new_dest(svc, udest); } LeaveFunction(2); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 4712a90a1820..d7e313548066 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -189,7 +189,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); unsigned int nf_conntrack_max __read_mostly; EXPORT_SYMBOL_GPL(nf_conntrack_max); seqcount_spinlock_t nf_conntrack_generation __read_mostly; -static siphash_key_t nf_conntrack_hash_rnd __read_mostly; +static siphash_aligned_key_t nf_conntrack_hash_rnd; static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, unsigned int zoneid, @@ -482,7 +482,7 @@ EXPORT_SYMBOL_GPL(nf_ct_invert_tuple); */ u32 nf_ct_get_id(const struct nf_conn *ct) { - static __read_mostly siphash_key_t ct_id_seed; + static siphash_aligned_key_t ct_id_seed; unsigned long a, b, c, d; net_get_random_once(&ct_id_seed, sizeof(ct_id_seed)); @@ -1562,9 +1562,7 @@ __nf_conntrack_alloc(struct net *net, ct->status = 0; WRITE_ONCE(ct->timeout, 0); write_pnet(&ct->ct_net, net); - memset(&ct->__nfct_init_offset, 0, - offsetof(struct nf_conn, proto) - - offsetof(struct nf_conn, __nfct_init_offset)); + memset_after(ct, 0, __nfct_init_offset); nf_ct_zone_add(ct, zone); @@ -2590,7 +2588,6 @@ int nf_conntrack_hash_resize(unsigned int hashsize) hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); } } - old_size = nf_conntrack_htable_size; old_hash = nf_conntrack_hash; nf_conntrack_hash = hash; diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index f562eeef4234..1e89b595ecd0 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -41,7 +41,7 @@ EXPORT_SYMBOL_GPL(nf_ct_expect_hash); unsigned int nf_ct_expect_max __read_mostly; static struct kmem_cache *nf_ct_expect_cachep __read_mostly; -static siphash_key_t nf_ct_expect_hashrnd __read_mostly; +static siphash_aligned_key_t nf_ct_expect_hashrnd; /* nf_conntrack_expect helper functions */ void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp, diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index ec4164c32d27..0be2a1ae5c17 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1747,7 +1747,7 @@ restart: res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NFNL_MSG_TYPE(cb->nlh->nlmsg_type), - ct, dying ? true : false, 0); + ct, dying, 0); if (res < 0) { if (!atomic_inc_not_zero(&ct->ct_general.use)) continue; @@ -2996,7 +2996,7 @@ static const union nf_inet_addr any_addr; static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp) { - static __read_mostly siphash_key_t exp_id_seed; + static siphash_aligned_key_t exp_id_seed; unsigned long a, b, c, d; net_get_random_once(&exp_id_seed, sizeof(exp_id_seed)); diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 4d50d51db796..ab9f6c75524d 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -34,7 +34,7 @@ static unsigned int nat_net_id __read_mostly; static struct hlist_head *nf_nat_bysource __read_mostly; static unsigned int nf_nat_htable_size __read_mostly; -static siphash_key_t nf_nat_hash_rnd __read_mostly; +static siphash_aligned_key_t nf_nat_hash_rnd; struct nf_nat_lookup_hook_priv { struct nf_hook_entries __rcu *entries; diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index f0b9e21a2452..44c3de176d18 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -1528,15 +1528,9 @@ static void __net_exit nfnl_queue_net_exit(struct net *net) WARN_ON_ONCE(!hlist_empty(&q->instance_table[i])); } -static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list) -{ - synchronize_rcu(); -} - static struct pernet_operations nfnl_queue_net_ops = { .init = nfnl_queue_net_init, .exit = nfnl_queue_net_exit, - .exit_batch = nfnl_queue_net_exit_batch, .id = &nfnl_queue_net_id, .size = sizeof(struct nfnl_queue_net), }; diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c index cd59afde5b2f..fa9301ca6033 100644 --- a/net/netfilter/nft_fwd_netdev.c +++ b/net/netfilter/nft_fwd_netdev.c @@ -27,9 +27,11 @@ static void nft_fwd_netdev_eval(const struct nft_expr *expr, { struct nft_fwd_netdev *priv = nft_expr_priv(expr); int oif = regs->data[priv->sreg_dev]; + struct sk_buff *skb = pkt->skb; /* This is used by ifb only. */ - skb_set_redirected(pkt->skb, true); + skb->skb_iif = skb->dev->ifindex; + skb_set_redirected(skb, nft_hook(pkt) == NF_NETDEV_INGRESS); nf_fwd_netdev_egress(pkt, oif); regs->verdict.code = NF_STOLEN; @@ -198,7 +200,8 @@ static int nft_fwd_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nft_data **data) { - return nft_chain_validate_hooks(ctx->chain, (1 << NF_NETDEV_INGRESS)); + return nft_chain_validate_hooks(ctx->chain, (1 << NF_NETDEV_INGRESS) | + (1 << NF_NETDEV_EGRESS)); } static struct nft_expr_type nft_fwd_netdev_type; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 9eba2e648385..4be2d97ff93e 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -707,9 +707,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol, if (err < 0) goto out_module; - local_bh_disable(); sock_prot_inuse_add(net, &netlink_proto, 1); - local_bh_enable(); nlk = nlk_sk(sock->sk); nlk->module = module; @@ -809,9 +807,7 @@ static int netlink_release(struct socket *sock) netlink_table_ungrab(); } - local_bh_disable(); sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1); - local_bh_enable(); call_rcu(&nlk->rcu, deferred_put_nlk_sk); return 0; } diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index 8e1a88f13622..b498dac4e1e0 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c @@ -82,7 +82,7 @@ struct vport *ovs_netdev_link(struct vport *vport, const char *name) err = -ENODEV; goto error_free_vport; } - + netdev_tracker_alloc(vport->dev, &vport->dev_tracker, GFP_KERNEL); if (vport->dev->flags & IFF_LOOPBACK || (vport->dev->type != ARPHRD_ETHER && vport->dev->type != ARPHRD_NONE) || @@ -115,7 +115,7 @@ error_master_upper_dev_unlink: error_unlock: rtnl_unlock(); error_put: - dev_put(vport->dev); + dev_put_track(vport->dev, &vport->dev_tracker); error_free_vport: ovs_vport_free(vport); return ERR_PTR(err); @@ -137,8 +137,7 @@ static void vport_netdev_free(struct rcu_head *rcu) { struct vport *vport = container_of(rcu, struct vport, rcu); - if (vport->dev) - dev_put(vport->dev); + dev_put_track(vport->dev, &vport->dev_tracker); ovs_vport_free(vport); } @@ -174,7 +173,7 @@ void ovs_netdev_tunnel_destroy(struct vport *vport) */ if (vport->dev->reg_state == NETREG_REGISTERED) rtnl_delete_link(vport->dev); - dev_put(vport->dev); + dev_put_track(vport->dev, &vport->dev_tracker); vport->dev = NULL; rtnl_unlock(); diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index 8a930ca6d6b1..9de5030d9801 100644 --- a/net/openvswitch/vport.h +++ b/net/openvswitch/vport.h @@ -58,6 +58,7 @@ struct vport_portids { /** * struct vport - one port within a datapath * @dev: Pointer to net_device. + * @dev_tracker: refcount tracker for @dev reference * @dp: Datapath to which this port belongs. * @upcall_portids: RCU protected 'struct vport_portids'. * @port_no: Index into @dp's @ports array. @@ -69,6 +70,7 @@ struct vport_portids { */ struct vport { struct net_device *dev; + netdevice_tracker dev_tracker; struct datapath *dp; struct vport_portids __rcu *upcall_portids; u16 port_no; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 76c2dca7f0a5..59c265b8ac26 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -3102,16 +3102,14 @@ static int packet_release(struct socket *sock) sk_del_node_init_rcu(sk); mutex_unlock(&net->packet.sklist_lock); - preempt_disable(); sock_prot_inuse_add(net, sk->sk_prot, -1); - preempt_enable(); spin_lock(&po->bind_lock); unregister_prot_hook(sk, false); packet_cached_dev_reset(po); if (po->prot_hook.dev) { - dev_put(po->prot_hook.dev); + dev_put_track(po->prot_hook.dev, &po->prot_hook.dev_tracker); po->prot_hook.dev = NULL; } spin_unlock(&po->bind_lock); @@ -3219,18 +3217,25 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex, WRITE_ONCE(po->num, proto); po->prot_hook.type = proto; + dev_put_track(dev_curr, &po->prot_hook.dev_tracker); + dev_curr = NULL; + if (unlikely(unlisted)) { dev_put(dev); po->prot_hook.dev = NULL; WRITE_ONCE(po->ifindex, -1); packet_cached_dev_reset(po); } else { + if (dev) + netdev_tracker_alloc(dev, + &po->prot_hook.dev_tracker, + GFP_ATOMIC); po->prot_hook.dev = dev; WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0); packet_cached_dev_assign(po, dev); } } - dev_put(dev_curr); + dev_put_track(dev_curr, &po->prot_hook.dev_tracker); if (proto == 0 || !need_rehook) goto out_unlock; @@ -3368,9 +3373,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol, sk_add_node_tail_rcu(sk, &net->packet.sklist); mutex_unlock(&net->packet.sklist_lock); - preempt_disable(); sock_prot_inuse_add(net, &packet_proto, 1); - preempt_enable(); return 0; out2: @@ -4142,7 +4145,8 @@ static int packet_notifier(struct notifier_block *this, if (msg == NETDEV_UNREGISTER) { packet_cached_dev_reset(po); WRITE_ONCE(po->ifindex, -1); - dev_put(po->prot_hook.dev); + dev_put_track(po->prot_hook.dev, + &po->prot_hook.dev_tracker); po->prot_hook.dev = NULL; } spin_unlock(&po->bind_lock); diff --git a/net/rds/send.c b/net/rds/send.c index 53444397de66..0c5504068e3c 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -272,7 +272,7 @@ restart: /* Unfortunately, the way Infiniband deals with * RDMA to a bad MR key is by moving the entire - * queue pair to error state. We cold possibly + * queue pair to error state. We could possibly * recover from that, but right now we drop the * connection. * Therefore, we never retransmit messages with RDMA ops. diff --git a/net/rfkill/core.c b/net/rfkill/core.c index ac15a944573f..5b1927d66f0d 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -946,6 +946,18 @@ bool rfkill_blocked(struct rfkill *rfkill) } EXPORT_SYMBOL(rfkill_blocked); +bool rfkill_soft_blocked(struct rfkill *rfkill) +{ + unsigned long flags; + u32 state; + + spin_lock_irqsave(&rfkill->lock, flags); + state = rfkill->state; + spin_unlock_irqrestore(&rfkill->lock, flags); + + return !!(state & RFKILL_BLOCK_SW); +} +EXPORT_SYMBOL(rfkill_soft_blocked); struct rfkill * __must_check rfkill_alloc(const char *name, struct device *parent, diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 3258da3d5bed..32563cef85bf 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -19,8 +19,10 @@ #include <net/sock.h> #include <net/sch_generic.h> #include <net/pkt_cls.h> +#include <net/tc_act/tc_pedit.h> #include <net/act_api.h> #include <net/netlink.h> +#include <net/flow_offload.h> #ifdef CONFIG_INET DEFINE_STATIC_KEY_FALSE(tcf_frag_xmit_count); @@ -129,8 +131,244 @@ static void free_tcf(struct tc_action *p) kfree(p); } +static void offload_action_hw_count_set(struct tc_action *act, + u32 hw_count) +{ + act->in_hw_count = hw_count; +} + +static void offload_action_hw_count_inc(struct tc_action *act, + u32 hw_count) +{ + act->in_hw_count += hw_count; +} + +static void offload_action_hw_count_dec(struct tc_action *act, + u32 hw_count) +{ + act->in_hw_count = act->in_hw_count > hw_count ? + act->in_hw_count - hw_count : 0; +} + +static unsigned int tcf_offload_act_num_actions_single(struct tc_action *act) +{ + if (is_tcf_pedit(act)) + return tcf_pedit_nkeys(act); + else + return 1; +} + +static bool tc_act_skip_hw(u32 flags) +{ + return (flags & TCA_ACT_FLAGS_SKIP_HW) ? true : false; +} + +static bool tc_act_skip_sw(u32 flags) +{ + return (flags & TCA_ACT_FLAGS_SKIP_SW) ? true : false; +} + +static bool tc_act_in_hw(struct tc_action *act) +{ + return !!act->in_hw_count; +} + +/* SKIP_HW and SKIP_SW are mutually exclusive flags. */ +static bool tc_act_flags_valid(u32 flags) +{ + flags &= TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW; + + return flags ^ (TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW); +} + +static int offload_action_init(struct flow_offload_action *fl_action, + struct tc_action *act, + enum offload_act_command cmd, + struct netlink_ext_ack *extack) +{ + int err; + + fl_action->extack = extack; + fl_action->command = cmd; + fl_action->index = act->tcfa_index; + + if (act->ops->offload_act_setup) { + spin_lock_bh(&act->tcfa_lock); + err = act->ops->offload_act_setup(act, fl_action, NULL, + false); + spin_unlock_bh(&act->tcfa_lock); + return err; + } + + return -EOPNOTSUPP; +} + +static int tcf_action_offload_cmd_ex(struct flow_offload_action *fl_act, + u32 *hw_count) +{ + int err; + + err = flow_indr_dev_setup_offload(NULL, NULL, TC_SETUP_ACT, + fl_act, NULL, NULL); + if (err < 0) + return err; + + if (hw_count) + *hw_count = err; + + return 0; +} + +static int tcf_action_offload_cmd_cb_ex(struct flow_offload_action *fl_act, + u32 *hw_count, + flow_indr_block_bind_cb_t *cb, + void *cb_priv) +{ + int err; + + err = cb(NULL, NULL, cb_priv, TC_SETUP_ACT, NULL, fl_act, NULL); + if (err < 0) + return err; + + if (hw_count) + *hw_count = 1; + + return 0; +} + +static int tcf_action_offload_cmd(struct flow_offload_action *fl_act, + u32 *hw_count, + flow_indr_block_bind_cb_t *cb, + void *cb_priv) +{ + return cb ? tcf_action_offload_cmd_cb_ex(fl_act, hw_count, + cb, cb_priv) : + tcf_action_offload_cmd_ex(fl_act, hw_count); +} + +static int tcf_action_offload_add_ex(struct tc_action *action, + struct netlink_ext_ack *extack, + flow_indr_block_bind_cb_t *cb, + void *cb_priv) +{ + bool skip_sw = tc_act_skip_sw(action->tcfa_flags); + struct tc_action *actions[TCA_ACT_MAX_PRIO] = { + [0] = action, + }; + struct flow_offload_action *fl_action; + u32 in_hw_count = 0; + int num, err = 0; + + if (tc_act_skip_hw(action->tcfa_flags)) + return 0; + + num = tcf_offload_act_num_actions_single(action); + fl_action = offload_action_alloc(num); + if (!fl_action) + return -ENOMEM; + + err = offload_action_init(fl_action, action, FLOW_ACT_REPLACE, extack); + if (err) + goto fl_err; + + err = tc_setup_action(&fl_action->action, actions); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to setup tc actions for offload\n"); + goto fl_err; + } + + err = tcf_action_offload_cmd(fl_action, &in_hw_count, cb, cb_priv); + if (!err) + cb ? offload_action_hw_count_inc(action, in_hw_count) : + offload_action_hw_count_set(action, in_hw_count); + + if (skip_sw && !tc_act_in_hw(action)) + err = -EINVAL; + + tc_cleanup_offload_action(&fl_action->action); + +fl_err: + kfree(fl_action); + + return err; +} + +/* offload the tc action after it is inserted */ +static int tcf_action_offload_add(struct tc_action *action, + struct netlink_ext_ack *extack) +{ + return tcf_action_offload_add_ex(action, extack, NULL, NULL); +} + +int tcf_action_update_hw_stats(struct tc_action *action) +{ + struct flow_offload_action fl_act = {}; + int err; + + if (!tc_act_in_hw(action)) + return -EOPNOTSUPP; + + err = offload_action_init(&fl_act, action, FLOW_ACT_STATS, NULL); + if (err) + return err; + + err = tcf_action_offload_cmd(&fl_act, NULL, NULL, NULL); + if (!err) { + preempt_disable(); + tcf_action_stats_update(action, fl_act.stats.bytes, + fl_act.stats.pkts, + fl_act.stats.drops, + fl_act.stats.lastused, + true); + preempt_enable(); + action->used_hw_stats = fl_act.stats.used_hw_stats; + action->used_hw_stats_valid = true; + } else { + return -EOPNOTSUPP; + } + + return 0; +} +EXPORT_SYMBOL(tcf_action_update_hw_stats); + +static int tcf_action_offload_del_ex(struct tc_action *action, + flow_indr_block_bind_cb_t *cb, + void *cb_priv) +{ + struct flow_offload_action fl_act = {}; + u32 in_hw_count = 0; + int err = 0; + + if (!tc_act_in_hw(action)) + return 0; + + err = offload_action_init(&fl_act, action, FLOW_ACT_DESTROY, NULL); + if (err) + return err; + + err = tcf_action_offload_cmd(&fl_act, &in_hw_count, cb, cb_priv); + if (err < 0) + return err; + + if (!cb && action->in_hw_count != in_hw_count) + return -EINVAL; + + /* do not need to update hw state when deleting action */ + if (cb && in_hw_count) + offload_action_hw_count_dec(action, in_hw_count); + + return 0; +} + +static int tcf_action_offload_del(struct tc_action *action) +{ + return tcf_action_offload_del_ex(action, NULL, NULL); +} + static void tcf_action_cleanup(struct tc_action *p) { + tcf_action_offload_del(p); if (p->ops->cleanup) p->ops->cleanup(p); @@ -497,7 +735,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, p->tcfa_tm.install = jiffies; p->tcfa_tm.lastuse = jiffies; p->tcfa_tm.firstuse = 0; - p->tcfa_flags = flags & TCA_ACT_FLAGS_USER_MASK; + p->tcfa_flags = flags; if (est) { err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats, &p->tcfa_rate_est, @@ -622,6 +860,59 @@ EXPORT_SYMBOL(tcf_idrinfo_destroy); static LIST_HEAD(act_base); static DEFINE_RWLOCK(act_mod_lock); +/* since act ops id is stored in pernet subsystem list, + * then there is no way to walk through only all the action + * subsystem, so we keep tc action pernet ops id for + * reoffload to walk through. + */ +static LIST_HEAD(act_pernet_id_list); +static DEFINE_MUTEX(act_id_mutex); +struct tc_act_pernet_id { + struct list_head list; + unsigned int id; +}; + +static int tcf_pernet_add_id_list(unsigned int id) +{ + struct tc_act_pernet_id *id_ptr; + int ret = 0; + + mutex_lock(&act_id_mutex); + list_for_each_entry(id_ptr, &act_pernet_id_list, list) { + if (id_ptr->id == id) { + ret = -EEXIST; + goto err_out; + } + } + + id_ptr = kzalloc(sizeof(*id_ptr), GFP_KERNEL); + if (!id_ptr) { + ret = -ENOMEM; + goto err_out; + } + id_ptr->id = id; + + list_add_tail(&id_ptr->list, &act_pernet_id_list); + +err_out: + mutex_unlock(&act_id_mutex); + return ret; +} + +static void tcf_pernet_del_id_list(unsigned int id) +{ + struct tc_act_pernet_id *id_ptr; + + mutex_lock(&act_id_mutex); + list_for_each_entry(id_ptr, &act_pernet_id_list, list) { + if (id_ptr->id == id) { + list_del(&id_ptr->list); + kfree(id_ptr); + break; + } + } + mutex_unlock(&act_id_mutex); +} int tcf_register_action(struct tc_action_ops *act, struct pernet_operations *ops) @@ -640,18 +931,31 @@ int tcf_register_action(struct tc_action_ops *act, if (ret) return ret; + if (ops->id) { + ret = tcf_pernet_add_id_list(*ops->id); + if (ret) + goto err_id; + } + write_lock(&act_mod_lock); list_for_each_entry(a, &act_base, head) { if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) { - write_unlock(&act_mod_lock); - unregister_pernet_subsys(ops); - return -EEXIST; + ret = -EEXIST; + goto err_out; } } list_add_tail(&act->head, &act_base); write_unlock(&act_mod_lock); return 0; + +err_out: + write_unlock(&act_mod_lock); + if (ops->id) + tcf_pernet_del_id_list(*ops->id); +err_id: + unregister_pernet_subsys(ops); + return ret; } EXPORT_SYMBOL(tcf_register_action); @@ -670,8 +974,11 @@ int tcf_unregister_action(struct tc_action_ops *act, } } write_unlock(&act_mod_lock); - if (!err) + if (!err) { unregister_pernet_subsys(ops); + if (ops->id) + tcf_pernet_del_id_list(*ops->id); + } return err; } EXPORT_SYMBOL(tcf_unregister_action); @@ -735,6 +1042,9 @@ restart_act_graph: jmp_prgcnt -= 1; continue; } + + if (tc_act_skip_sw(a->tcfa_flags)) + continue; repeat: ret = a->ops->act(skb, a, res); if (ret == TC_ACT_REPEAT) @@ -821,6 +1131,7 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) int err = -EINVAL; unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; + u32 flags; if (tcf_action_dump_terse(skb, a, false)) goto nla_put_failure; @@ -835,9 +1146,13 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) a->used_hw_stats, TCA_ACT_HW_STATS_ANY)) goto nla_put_failure; - if (a->tcfa_flags && + flags = a->tcfa_flags & TCA_ACT_FLAGS_USER_MASK; + if (flags && nla_put_bitfield32(skb, TCA_ACT_FLAGS, - a->tcfa_flags, a->tcfa_flags)) + flags, flags)) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_ACT_IN_HW_COUNT, a->in_hw_count)) goto nla_put_failure; nest = nla_nest_start_noflag(skb, TCA_OPTIONS); @@ -919,7 +1234,9 @@ static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = { [TCA_ACT_COOKIE] = { .type = NLA_BINARY, .len = TC_COOKIE_MAX_SIZE }, [TCA_ACT_OPTIONS] = { .type = NLA_NESTED }, - [TCA_ACT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS), + [TCA_ACT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS | + TCA_ACT_FLAGS_SKIP_HW | + TCA_ACT_FLAGS_SKIP_SW), [TCA_ACT_HW_STATS] = NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY), }; @@ -1032,8 +1349,13 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, } } hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]); - if (tb[TCA_ACT_FLAGS]) + if (tb[TCA_ACT_FLAGS]) { userflags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]); + if (!tc_act_flags_valid(userflags.value)) { + err = -EINVAL; + goto err_out; + } + } err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, tp, userflags.value | flags, extack); @@ -1061,11 +1383,17 @@ err_out: return ERR_PTR(err); } +static bool tc_act_bind(u32 flags) +{ + return !!(flags & TCA_ACT_FLAGS_BIND); +} + /* Returns numbers of initialized actions or negative error. */ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, struct tc_action *actions[], - int init_res[], size_t *attr_size, u32 flags, + int init_res[], size_t *attr_size, + u32 flags, u32 fl_flags, struct netlink_ext_ack *extack) { struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {}; @@ -1103,6 +1431,22 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, sz += tcf_action_fill_size(act); /* Start from index 0 */ actions[i - 1] = act; + if (tc_act_bind(flags)) { + bool skip_sw = tc_skip_sw(fl_flags); + bool skip_hw = tc_skip_hw(fl_flags); + + if (tc_act_bind(act->tcfa_flags)) + continue; + if (skip_sw != tc_act_skip_sw(act->tcfa_flags) || + skip_hw != tc_act_skip_hw(act->tcfa_flags)) { + err = -EINVAL; + goto err; + } + } else { + err = tcf_action_offload_add(act, extack); + if (tc_act_skip_sw(act->tcfa_flags) && err) + goto err; + } } /* We have to commit them all together, because if any error happened in @@ -1154,6 +1498,9 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p, if (p == NULL) goto errout; + /* update hw stats for this action */ + tcf_action_update_hw_stats(p); + /* compat_mode being true specifies a call that is supposed * to add additional backward compatibility statistic TLVs. */ @@ -1396,6 +1743,96 @@ static int tcf_action_delete(struct net *net, struct tc_action *actions[]) } static int +tcf_reoffload_del_notify(struct net *net, struct tc_action *action) +{ + size_t attr_size = tcf_action_fill_size(action); + struct tc_action *actions[TCA_ACT_MAX_PRIO] = { + [0] = action, + }; + const struct tc_action_ops *ops = action->ops; + struct sk_buff *skb; + int ret; + + skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size, + GFP_KERNEL); + if (!skb) + return -ENOBUFS; + + if (tca_get_fill(skb, actions, 0, 0, 0, RTM_DELACTION, 0, 1) <= 0) { + kfree_skb(skb); + return -EINVAL; + } + + ret = tcf_idr_release_unsafe(action); + if (ret == ACT_P_DELETED) { + module_put(ops->owner); + ret = rtnetlink_send(skb, net, 0, RTNLGRP_TC, 0); + } else { + kfree_skb(skb); + } + + return ret; +} + +int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb, + void *cb_priv, bool add) +{ + struct tc_act_pernet_id *id_ptr; + struct tcf_idrinfo *idrinfo; + struct tc_action_net *tn; + struct tc_action *p; + unsigned int act_id; + unsigned long tmp; + unsigned long id; + struct idr *idr; + struct net *net; + int ret; + + if (!cb) + return -EINVAL; + + down_read(&net_rwsem); + mutex_lock(&act_id_mutex); + + for_each_net(net) { + list_for_each_entry(id_ptr, &act_pernet_id_list, list) { + act_id = id_ptr->id; + tn = net_generic(net, act_id); + if (!tn) + continue; + idrinfo = tn->idrinfo; + if (!idrinfo) + continue; + + mutex_lock(&idrinfo->lock); + idr = &idrinfo->action_idr; + idr_for_each_entry_ul(idr, p, tmp, id) { + if (IS_ERR(p) || tc_act_bind(p->tcfa_flags)) + continue; + if (add) { + tcf_action_offload_add_ex(p, NULL, cb, + cb_priv); + continue; + } + + /* cb unregister to update hw count */ + ret = tcf_action_offload_del_ex(p, cb, cb_priv); + if (ret < 0) + continue; + if (tc_act_skip_sw(p->tcfa_flags) && + !tc_act_in_hw(p)) + tcf_reoffload_del_notify(net, p); + } + mutex_unlock(&idrinfo->lock); + } + } + mutex_unlock(&act_id_mutex); + up_read(&net_rwsem); + + return 0; +} + +static int tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], u32 portid, size_t attr_size, struct netlink_ext_ack *extack) { @@ -1508,7 +1945,7 @@ static int tcf_action_add(struct net *net, struct nlattr *nla, for (loop = 0; loop < 10; loop++) { ret = tcf_action_init(net, NULL, nla, NULL, actions, init_res, - &attr_size, flags, extack); + &attr_size, flags, 0, extack); if (ret != -EAGAIN) break; } diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index f2bf896331a5..a77d8908e737 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -305,7 +305,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, ret = tcf_idr_check_alloc(tn, &index, act, bind); if (!ret) { ret = tcf_idr_create(tn, index, est, act, - &act_bpf_ops, bind, true, 0); + &act_bpf_ops, bind, true, flags); if (ret < 0) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index 94e78ac7a748..09e2aafc8943 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -124,7 +124,7 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla, ret = tcf_idr_check_alloc(tn, &index, a, bind); if (!ret) { ret = tcf_idr_create(tn, index, est, a, - &act_connmark_ops, bind, false, 0); + &act_connmark_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index a15ec95e69c3..e0f515b774ca 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -695,6 +695,24 @@ static size_t tcf_csum_get_fill_size(const struct tc_action *act) return nla_total_size(sizeof(struct tc_csum)); } +static int tcf_csum_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + entry->id = FLOW_ACTION_CSUM; + entry->csum_flags = tcf_csum_update_flags(act); + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + fl_action->id = FLOW_ACTION_CSUM; + } + + return 0; +} + static struct tc_action_ops act_csum_ops = { .kind = "csum", .id = TCA_ID_CSUM, @@ -706,6 +724,7 @@ static struct tc_action_ops act_csum_ops = { .walk = tcf_csum_walker, .lookup = tcf_csum_search, .get_fill_size = tcf_csum_get_fill_size, + .offload_act_setup = tcf_csum_offload_act_setup, .size = sizeof(struct tcf_csum), }; diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index ab3591408419..f9afb5abff21 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -393,7 +393,8 @@ static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft, { bool tcp = false; - if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) + if ((ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) || + !test_bit(IPS_ASSURED_BIT, &ct->status)) return; switch (nf_ct_protonum(ct)) { @@ -1493,6 +1494,26 @@ static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets, c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse); } +static int tcf_ct_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + entry->id = FLOW_ACTION_CT; + entry->ct.action = tcf_ct_action(act); + entry->ct.zone = tcf_ct_zone(act); + entry->ct.flow_table = tcf_ct_ft(act); + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + fl_action->id = FLOW_ACTION_CT; + } + + return 0; +} + static struct tc_action_ops act_ct_ops = { .kind = "ct", .id = TCA_ID_CT, @@ -1504,6 +1525,7 @@ static struct tc_action_ops act_ct_ops = { .walk = tcf_ct_walker, .lookup = tcf_ct_search, .stats_update = tcf_stats_update, + .offload_act_setup = tcf_ct_offload_act_setup, .size = sizeof(struct tcf_ct), }; diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c index 549374a2d008..0281e45987a4 100644 --- a/net/sched/act_ctinfo.c +++ b/net/sched/act_ctinfo.c @@ -212,7 +212,7 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla, err = tcf_idr_check_alloc(tn, &index, a, bind); if (!err) { ret = tcf_idr_create(tn, index, est, a, - &act_ctinfo_ops, bind, false, 0); + &act_ctinfo_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index d8dce173df37..bde6a6c01e64 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -252,6 +252,43 @@ static size_t tcf_gact_get_fill_size(const struct tc_action *act) return sz; } +static int tcf_gact_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + if (is_tcf_gact_ok(act)) { + entry->id = FLOW_ACTION_ACCEPT; + } else if (is_tcf_gact_shot(act)) { + entry->id = FLOW_ACTION_DROP; + } else if (is_tcf_gact_trap(act)) { + entry->id = FLOW_ACTION_TRAP; + } else if (is_tcf_gact_goto_chain(act)) { + entry->id = FLOW_ACTION_GOTO; + entry->chain_index = tcf_gact_goto_chain_index(act); + } else { + return -EOPNOTSUPP; + } + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + if (is_tcf_gact_ok(act)) + fl_action->id = FLOW_ACTION_ACCEPT; + else if (is_tcf_gact_shot(act)) + fl_action->id = FLOW_ACTION_DROP; + else if (is_tcf_gact_trap(act)) + fl_action->id = FLOW_ACTION_TRAP; + else if (is_tcf_gact_goto_chain(act)) + fl_action->id = FLOW_ACTION_GOTO; + else + return -EOPNOTSUPP; + } + + return 0; +} + static struct tc_action_ops act_gact_ops = { .kind = "gact", .id = TCA_ID_GACT, @@ -263,6 +300,7 @@ static struct tc_action_ops act_gact_ops = { .walk = tcf_gact_walker, .lookup = tcf_gact_search, .get_fill_size = tcf_gact_get_fill_size, + .offload_act_setup = tcf_gact_offload_act_setup, .size = sizeof(struct tcf_gact), }; diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c index 7df72a4197a3..d56e73843a4b 100644 --- a/net/sched/act_gate.c +++ b/net/sched/act_gate.c @@ -357,7 +357,7 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla, if (!err) { ret = tcf_idr_create(tn, index, est, a, - &act_gate_ops, bind, false, 0); + &act_gate_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; @@ -597,6 +597,54 @@ static size_t tcf_gate_get_fill_size(const struct tc_action *act) return nla_total_size(sizeof(struct tc_gate)); } +static void tcf_gate_entry_destructor(void *priv) +{ + struct action_gate_entry *oe = priv; + + kfree(oe); +} + +static int tcf_gate_get_entries(struct flow_action_entry *entry, + const struct tc_action *act) +{ + entry->gate.entries = tcf_gate_get_list(act); + + if (!entry->gate.entries) + return -EINVAL; + + entry->destructor = tcf_gate_entry_destructor; + entry->destructor_priv = entry->gate.entries; + + return 0; +} + +static int tcf_gate_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + int err; + + if (bind) { + struct flow_action_entry *entry = entry_data; + + entry->id = FLOW_ACTION_GATE; + entry->gate.prio = tcf_gate_prio(act); + entry->gate.basetime = tcf_gate_basetime(act); + entry->gate.cycletime = tcf_gate_cycletime(act); + entry->gate.cycletimeext = tcf_gate_cycletimeext(act); + entry->gate.num_entries = tcf_gate_num_entries(act); + err = tcf_gate_get_entries(entry, act); + if (err) + return err; + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + fl_action->id = FLOW_ACTION_GATE; + } + + return 0; +} + static struct tc_action_ops act_gate_ops = { .kind = "gate", .id = TCA_ID_GATE, @@ -609,6 +657,7 @@ static struct tc_action_ops act_gate_ops = { .stats_update = tcf_gate_stats_update, .get_fill_size = tcf_gate_get_fill_size, .lookup = tcf_gate_search, + .offload_act_setup = tcf_gate_offload_act_setup, .size = sizeof(struct tcf_gate), }; diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index b757f90a2d58..41ba55e60b1b 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -553,7 +553,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, &act_ife_ops, - bind, true, 0); + bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); kfree(p); diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 265b1443e252..2f3d507c24a1 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -145,7 +145,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, ops, bind, - false, 0); + false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index efc963ab995a..39acd1d18609 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -79,7 +79,7 @@ static void tcf_mirred_release(struct tc_action *a) /* last reference to action, no need to lock */ dev = rcu_dereference_protected(m->tcfm_dev, 1); - dev_put(dev); + dev_put_track(dev, &m->tcfm_dev_tracker); } static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = { @@ -101,7 +101,6 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, bool mac_header_xmit = false; struct tc_mirred *parm; struct tcf_mirred *m; - struct net_device *dev; bool exists = false; int ret, err; u32 index; @@ -171,16 +170,19 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, spin_lock_bh(&m->tcf_lock); if (parm->ifindex) { - dev = dev_get_by_index(net, parm->ifindex); - if (!dev) { + struct net_device *odev, *ndev; + + ndev = dev_get_by_index(net, parm->ifindex); + if (!ndev) { spin_unlock_bh(&m->tcf_lock); err = -ENODEV; goto put_chain; } - mac_header_xmit = dev_is_mac_header_xmit(dev); - dev = rcu_replace_pointer(m->tcfm_dev, dev, + mac_header_xmit = dev_is_mac_header_xmit(ndev); + odev = rcu_replace_pointer(m->tcfm_dev, ndev, lockdep_is_held(&m->tcf_lock)); - dev_put(dev); + dev_put_track(odev, &m->tcfm_dev_tracker); + netdev_tracker_alloc(ndev, &m->tcfm_dev_tracker, GFP_ATOMIC); m->tcfm_mac_header_xmit = mac_header_xmit; } goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); @@ -400,7 +402,7 @@ static int mirred_device_event(struct notifier_block *unused, list_for_each_entry(m, &mirred_list, tcfm_list) { spin_lock_bh(&m->tcf_lock); if (tcf_mirred_dev_dereference(m) == dev) { - dev_put(dev); + dev_put_track(dev, &m->tcfm_dev_tracker); /* Note : no rcu grace period necessary, as * net_device are already rcu protected. */ @@ -448,6 +450,55 @@ static size_t tcf_mirred_get_fill_size(const struct tc_action *act) return nla_total_size(sizeof(struct tc_mirred)); } +static void tcf_offload_mirred_get_dev(struct flow_action_entry *entry, + const struct tc_action *act) +{ + entry->dev = act->ops->get_dev(act, &entry->destructor); + if (!entry->dev) + return; + entry->destructor_priv = entry->dev; +} + +static int tcf_mirred_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + if (is_tcf_mirred_egress_redirect(act)) { + entry->id = FLOW_ACTION_REDIRECT; + tcf_offload_mirred_get_dev(entry, act); + } else if (is_tcf_mirred_egress_mirror(act)) { + entry->id = FLOW_ACTION_MIRRED; + tcf_offload_mirred_get_dev(entry, act); + } else if (is_tcf_mirred_ingress_redirect(act)) { + entry->id = FLOW_ACTION_REDIRECT_INGRESS; + tcf_offload_mirred_get_dev(entry, act); + } else if (is_tcf_mirred_ingress_mirror(act)) { + entry->id = FLOW_ACTION_MIRRED_INGRESS; + tcf_offload_mirred_get_dev(entry, act); + } else { + return -EOPNOTSUPP; + } + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + if (is_tcf_mirred_egress_redirect(act)) + fl_action->id = FLOW_ACTION_REDIRECT; + else if (is_tcf_mirred_egress_mirror(act)) + fl_action->id = FLOW_ACTION_MIRRED; + else if (is_tcf_mirred_ingress_redirect(act)) + fl_action->id = FLOW_ACTION_REDIRECT_INGRESS; + else if (is_tcf_mirred_ingress_mirror(act)) + fl_action->id = FLOW_ACTION_MIRRED_INGRESS; + else + return -EOPNOTSUPP; + } + + return 0; +} + static struct tc_action_ops act_mirred_ops = { .kind = "mirred", .id = TCA_ID_MIRRED, @@ -460,6 +511,7 @@ static struct tc_action_ops act_mirred_ops = { .walk = tcf_mirred_walker, .lookup = tcf_mirred_search, .get_fill_size = tcf_mirred_get_fill_size, + .offload_act_setup = tcf_mirred_offload_act_setup, .size = sizeof(struct tcf_mirred), .get_dev = tcf_mirred_get_dev, }; diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c index 8faa4c58305e..b9ff3459fdab 100644 --- a/net/sched/act_mpls.c +++ b/net/sched/act_mpls.c @@ -248,7 +248,7 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, - &act_mpls_ops, bind, true, 0); + &act_mpls_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; @@ -384,6 +384,57 @@ static int tcf_mpls_search(struct net *net, struct tc_action **a, u32 index) return tcf_idr_search(tn, a, index); } +static int tcf_mpls_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + switch (tcf_mpls_action(act)) { + case TCA_MPLS_ACT_PUSH: + entry->id = FLOW_ACTION_MPLS_PUSH; + entry->mpls_push.proto = tcf_mpls_proto(act); + entry->mpls_push.label = tcf_mpls_label(act); + entry->mpls_push.tc = tcf_mpls_tc(act); + entry->mpls_push.bos = tcf_mpls_bos(act); + entry->mpls_push.ttl = tcf_mpls_ttl(act); + break; + case TCA_MPLS_ACT_POP: + entry->id = FLOW_ACTION_MPLS_POP; + entry->mpls_pop.proto = tcf_mpls_proto(act); + break; + case TCA_MPLS_ACT_MODIFY: + entry->id = FLOW_ACTION_MPLS_MANGLE; + entry->mpls_mangle.label = tcf_mpls_label(act); + entry->mpls_mangle.tc = tcf_mpls_tc(act); + entry->mpls_mangle.bos = tcf_mpls_bos(act); + entry->mpls_mangle.ttl = tcf_mpls_ttl(act); + break; + default: + return -EOPNOTSUPP; + } + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + switch (tcf_mpls_action(act)) { + case TCA_MPLS_ACT_PUSH: + fl_action->id = FLOW_ACTION_MPLS_PUSH; + break; + case TCA_MPLS_ACT_POP: + fl_action->id = FLOW_ACTION_MPLS_POP; + break; + case TCA_MPLS_ACT_MODIFY: + fl_action->id = FLOW_ACTION_MPLS_MANGLE; + break; + default: + return -EOPNOTSUPP; + } + } + + return 0; +} + static struct tc_action_ops act_mpls_ops = { .kind = "mpls", .id = TCA_ID_MPLS, @@ -394,6 +445,7 @@ static struct tc_action_ops act_mpls_ops = { .cleanup = tcf_mpls_cleanup, .walk = tcf_mpls_walker, .lookup = tcf_mpls_search, + .offload_act_setup = tcf_mpls_offload_act_setup, .size = sizeof(struct tcf_mpls), }; diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 7dd6b586ba7f..2a39b3729e84 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -61,7 +61,7 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est, err = tcf_idr_check_alloc(tn, &index, a, bind); if (!err) { ret = tcf_idr_create(tn, index, est, a, - &act_nat_ops, bind, false, 0); + &act_nat_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index c6c862c459cc..31fcd279c177 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -189,7 +189,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, err = tcf_idr_check_alloc(tn, &index, a, bind); if (!err) { ret = tcf_idr_create(tn, index, est, a, - &act_pedit_ops, bind, false, 0); + &act_pedit_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); goto out_free; @@ -487,6 +487,39 @@ static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index) return tcf_idr_search(tn, a, index); } +static int tcf_pedit_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + int k; + + for (k = 0; k < tcf_pedit_nkeys(act); k++) { + switch (tcf_pedit_cmd(act, k)) { + case TCA_PEDIT_KEY_EX_CMD_SET: + entry->id = FLOW_ACTION_MANGLE; + break; + case TCA_PEDIT_KEY_EX_CMD_ADD: + entry->id = FLOW_ACTION_ADD; + break; + default: + return -EOPNOTSUPP; + } + entry->mangle.htype = tcf_pedit_htype(act, k); + entry->mangle.mask = tcf_pedit_mask(act, k); + entry->mangle.val = tcf_pedit_val(act, k); + entry->mangle.offset = tcf_pedit_offset(act, k); + entry->hw_stats = tc_act_hw_stats(act->hw_stats); + entry++; + } + *index_inc = k; + } else { + return -EOPNOTSUPP; + } + + return 0; +} + static struct tc_action_ops act_pedit_ops = { .kind = "pedit", .id = TCA_ID_PEDIT, @@ -498,6 +531,7 @@ static struct tc_action_ops act_pedit_ops = { .init = tcf_pedit_init, .walk = tcf_pedit_walker, .lookup = tcf_pedit_search, + .offload_act_setup = tcf_pedit_offload_act_setup, .size = sizeof(struct tcf_pedit), }; diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 9e77ba8401e5..0923aa2b8f8a 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -90,7 +90,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, NULL, a, - &act_police_ops, bind, true, 0); + &act_police_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; @@ -405,6 +405,30 @@ static int tcf_police_search(struct net *net, struct tc_action **a, u32 index) return tcf_idr_search(tn, a, index); } +static int tcf_police_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + entry->id = FLOW_ACTION_POLICE; + entry->police.burst = tcf_police_burst(act); + entry->police.rate_bytes_ps = + tcf_police_rate_bytes_ps(act); + entry->police.burst_pkt = tcf_police_burst_pkt(act); + entry->police.rate_pkt_ps = + tcf_police_rate_pkt_ps(act); + entry->police.mtu = tcf_police_tcfp_mtu(act); + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + fl_action->id = FLOW_ACTION_POLICE; + } + + return 0; +} + MODULE_AUTHOR("Alexey Kuznetsov"); MODULE_DESCRIPTION("Policing actions"); MODULE_LICENSE("GPL"); @@ -420,6 +444,7 @@ static struct tc_action_ops act_police_ops = { .walk = tcf_police_walker, .lookup = tcf_police_search, .cleanup = tcf_police_cleanup, + .offload_act_setup = tcf_police_offload_act_setup, .size = sizeof(struct tcf_police), }; diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index ce859b0e0deb..9a22cdda6bbd 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -70,7 +70,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, - &act_sample_ops, bind, true, 0); + &act_sample_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; @@ -282,6 +282,35 @@ tcf_sample_get_group(const struct tc_action *a, return group; } +static void tcf_offload_sample_get_group(struct flow_action_entry *entry, + const struct tc_action *act) +{ + entry->sample.psample_group = + act->ops->get_psample_group(act, &entry->destructor); + entry->destructor_priv = entry->sample.psample_group; +} + +static int tcf_sample_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + entry->id = FLOW_ACTION_SAMPLE; + entry->sample.trunc_size = tcf_sample_trunc_size(act); + entry->sample.truncate = tcf_sample_truncate(act); + entry->sample.rate = tcf_sample_rate(act); + tcf_offload_sample_get_group(entry, act); + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + fl_action->id = FLOW_ACTION_SAMPLE; + } + + return 0; +} + static struct tc_action_ops act_sample_ops = { .kind = "sample", .id = TCA_ID_SAMPLE, @@ -294,6 +323,7 @@ static struct tc_action_ops act_sample_ops = { .walk = tcf_sample_walker, .lookup = tcf_sample_search, .get_psample_group = tcf_sample_get_group, + .offload_act_setup = tcf_sample_offload_act_setup, .size = sizeof(struct tcf_sample), }; diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index e617ab4505ca..8c1d60bde93e 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -129,7 +129,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, - &act_simp_ops, bind, false, 0); + &act_simp_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index d30ecbfc8f84..ceba11b198bb 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -176,7 +176,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, - &act_skbedit_ops, bind, true, 0); + &act_skbedit_ops, bind, true, act_flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; @@ -327,6 +327,41 @@ static size_t tcf_skbedit_get_fill_size(const struct tc_action *act) + nla_total_size_64bit(sizeof(u64)); /* TCA_SKBEDIT_FLAGS */ } +static int tcf_skbedit_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + if (is_tcf_skbedit_mark(act)) { + entry->id = FLOW_ACTION_MARK; + entry->mark = tcf_skbedit_mark(act); + } else if (is_tcf_skbedit_ptype(act)) { + entry->id = FLOW_ACTION_PTYPE; + entry->ptype = tcf_skbedit_ptype(act); + } else if (is_tcf_skbedit_priority(act)) { + entry->id = FLOW_ACTION_PRIORITY; + entry->priority = tcf_skbedit_priority(act); + } else { + return -EOPNOTSUPP; + } + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + if (is_tcf_skbedit_mark(act)) + fl_action->id = FLOW_ACTION_MARK; + else if (is_tcf_skbedit_ptype(act)) + fl_action->id = FLOW_ACTION_PTYPE; + else if (is_tcf_skbedit_priority(act)) + fl_action->id = FLOW_ACTION_PRIORITY; + else + return -EOPNOTSUPP; + } + + return 0; +} + static struct tc_action_ops act_skbedit_ops = { .kind = "skbedit", .id = TCA_ID_SKBEDIT, @@ -339,6 +374,7 @@ static struct tc_action_ops act_skbedit_ops = { .walk = tcf_skbedit_walker, .get_fill_size = tcf_skbedit_get_fill_size, .lookup = tcf_skbedit_search, + .offload_act_setup = tcf_skbedit_offload_act_setup, .size = sizeof(struct tcf_skbedit), }; diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index 9b6b52c5e24e..2083612d8780 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -168,7 +168,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, - &act_skbmod_ops, bind, true, 0); + &act_skbmod_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index d9cd174eecb7..23aba03d26a8 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -787,6 +787,59 @@ static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index) return tcf_idr_search(tn, a, index); } +static void tcf_tunnel_encap_put_tunnel(void *priv) +{ + struct ip_tunnel_info *tunnel = priv; + + kfree(tunnel); +} + +static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry, + const struct tc_action *act) +{ + entry->tunnel = tcf_tunnel_info_copy(act); + if (!entry->tunnel) + return -ENOMEM; + entry->destructor = tcf_tunnel_encap_put_tunnel; + entry->destructor_priv = entry->tunnel; + return 0; +} + +static int tcf_tunnel_key_offload_act_setup(struct tc_action *act, + void *entry_data, + u32 *index_inc, + bool bind) +{ + int err; + + if (bind) { + struct flow_action_entry *entry = entry_data; + + if (is_tcf_tunnel_set(act)) { + entry->id = FLOW_ACTION_TUNNEL_ENCAP; + err = tcf_tunnel_encap_get_tunnel(entry, act); + if (err) + return err; + } else if (is_tcf_tunnel_release(act)) { + entry->id = FLOW_ACTION_TUNNEL_DECAP; + } else { + return -EOPNOTSUPP; + } + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + if (is_tcf_tunnel_set(act)) + fl_action->id = FLOW_ACTION_TUNNEL_ENCAP; + else if (is_tcf_tunnel_release(act)) + fl_action->id = FLOW_ACTION_TUNNEL_DECAP; + else + return -EOPNOTSUPP; + } + + return 0; +} + static struct tc_action_ops act_tunnel_key_ops = { .kind = "tunnel_key", .id = TCA_ID_TUNNEL_KEY, @@ -797,6 +850,7 @@ static struct tc_action_ops act_tunnel_key_ops = { .cleanup = tunnel_key_release, .walk = tunnel_key_walker, .lookup = tunnel_key_search, + .offload_act_setup = tcf_tunnel_key_offload_act_setup, .size = sizeof(struct tcf_tunnel_key), }; diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index e4dc5a555bd8..756e2dcde1cd 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -368,6 +368,53 @@ static size_t tcf_vlan_get_fill_size(const struct tc_action *act) + nla_total_size(sizeof(u8)); /* TCA_VLAN_PUSH_VLAN_PRIORITY */ } +static int tcf_vlan_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + switch (tcf_vlan_action(act)) { + case TCA_VLAN_ACT_PUSH: + entry->id = FLOW_ACTION_VLAN_PUSH; + entry->vlan.vid = tcf_vlan_push_vid(act); + entry->vlan.proto = tcf_vlan_push_proto(act); + entry->vlan.prio = tcf_vlan_push_prio(act); + break; + case TCA_VLAN_ACT_POP: + entry->id = FLOW_ACTION_VLAN_POP; + break; + case TCA_VLAN_ACT_MODIFY: + entry->id = FLOW_ACTION_VLAN_MANGLE; + entry->vlan.vid = tcf_vlan_push_vid(act); + entry->vlan.proto = tcf_vlan_push_proto(act); + entry->vlan.prio = tcf_vlan_push_prio(act); + break; + default: + return -EOPNOTSUPP; + } + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + switch (tcf_vlan_action(act)) { + case TCA_VLAN_ACT_PUSH: + fl_action->id = FLOW_ACTION_VLAN_PUSH; + break; + case TCA_VLAN_ACT_POP: + fl_action->id = FLOW_ACTION_VLAN_POP; + break; + case TCA_VLAN_ACT_MODIFY: + fl_action->id = FLOW_ACTION_VLAN_MANGLE; + break; + default: + return -EOPNOTSUPP; + } + } + + return 0; +} + static struct tc_action_ops act_vlan_ops = { .kind = "vlan", .id = TCA_ID_VLAN, @@ -380,6 +427,7 @@ static struct tc_action_ops act_vlan_ops = { .stats_update = tcf_vlan_stats_update, .get_fill_size = tcf_vlan_get_fill_size, .lookup = tcf_vlan_search, + .offload_act_setup = tcf_vlan_offload_act_setup, .size = sizeof(struct tcf_vlan), }; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 35c74bdde848..a53c72e6d944 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -3028,9 +3028,9 @@ void tcf_exts_destroy(struct tcf_exts *exts) } EXPORT_SYMBOL(tcf_exts_destroy); -int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, - struct nlattr *rate_tlv, struct tcf_exts *exts, - u32 flags, struct netlink_ext_ack *extack) +int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb, + struct nlattr *rate_tlv, struct tcf_exts *exts, + u32 flags, u32 fl_flags, struct netlink_ext_ack *extack) { #ifdef CONFIG_NET_CLS_ACT { @@ -3064,7 +3064,8 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, flags |= TCA_ACT_FLAGS_BIND; err = tcf_action_init(net, tp, tb[exts->action], rate_tlv, exts->actions, init_res, - &attr_size, flags, extack); + &attr_size, flags, fl_flags, + extack); if (err < 0) return err; exts->nr_actions = err; @@ -3080,6 +3081,15 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, return 0; } +EXPORT_SYMBOL(tcf_exts_validate_ex); + +int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, + struct nlattr *rate_tlv, struct tcf_exts *exts, + u32 flags, struct netlink_ext_ack *extack) +{ + return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts, + flags, 0, extack); +} EXPORT_SYMBOL(tcf_exts_validate); void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src) @@ -3323,7 +3333,7 @@ err_unlock: up_read(&block->cb_lock); if (take_rtnl) rtnl_unlock(); - return ok_count < 0 ? ok_count : 0; + return min(ok_count, 0); } EXPORT_SYMBOL(tc_setup_cb_add); @@ -3379,7 +3389,7 @@ err_unlock: up_read(&block->cb_lock); if (take_rtnl) rtnl_unlock(); - return ok_count < 0 ? ok_count : 0; + return min(ok_count, 0); } EXPORT_SYMBOL(tc_setup_cb_replace); @@ -3417,7 +3427,7 @@ retry: up_read(&block->cb_lock); if (take_rtnl) rtnl_unlock(); - return ok_count < 0 ? ok_count : 0; + return min(ok_count, 0); } EXPORT_SYMBOL(tc_setup_cb_destroy); @@ -3464,7 +3474,7 @@ static void tcf_act_put_cookie(struct flow_action_entry *entry) flow_action_cookie_destroy(entry->cookie); } -void tc_cleanup_flow_action(struct flow_action *flow_action) +void tc_cleanup_offload_action(struct flow_action *flow_action) { struct flow_action_entry *entry; int i; @@ -3475,93 +3485,37 @@ void tc_cleanup_flow_action(struct flow_action *flow_action) entry->destructor(entry->destructor_priv); } } -EXPORT_SYMBOL(tc_cleanup_flow_action); +EXPORT_SYMBOL(tc_cleanup_offload_action); -static void tcf_mirred_get_dev(struct flow_action_entry *entry, - const struct tc_action *act) +static int tc_setup_offload_act(struct tc_action *act, + struct flow_action_entry *entry, + u32 *index_inc) { #ifdef CONFIG_NET_CLS_ACT - entry->dev = act->ops->get_dev(act, &entry->destructor); - if (!entry->dev) - return; - entry->destructor_priv = entry->dev; -#endif -} - -static void tcf_tunnel_encap_put_tunnel(void *priv) -{ - struct ip_tunnel_info *tunnel = priv; - - kfree(tunnel); -} - -static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry, - const struct tc_action *act) -{ - entry->tunnel = tcf_tunnel_info_copy(act); - if (!entry->tunnel) - return -ENOMEM; - entry->destructor = tcf_tunnel_encap_put_tunnel; - entry->destructor_priv = entry->tunnel; + if (act->ops->offload_act_setup) + return act->ops->offload_act_setup(act, entry, index_inc, true); + else + return -EOPNOTSUPP; +#else return 0; -} - -static void tcf_sample_get_group(struct flow_action_entry *entry, - const struct tc_action *act) -{ -#ifdef CONFIG_NET_CLS_ACT - entry->sample.psample_group = - act->ops->get_psample_group(act, &entry->destructor); - entry->destructor_priv = entry->sample.psample_group; #endif } -static void tcf_gate_entry_destructor(void *priv) -{ - struct action_gate_entry *oe = priv; - - kfree(oe); -} - -static int tcf_gate_get_entries(struct flow_action_entry *entry, - const struct tc_action *act) -{ - entry->gate.entries = tcf_gate_get_list(act); - - if (!entry->gate.entries) - return -EINVAL; - - entry->destructor = tcf_gate_entry_destructor; - entry->destructor_priv = entry->gate.entries; - - return 0; -} - -static enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats) -{ - if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY)) - return FLOW_ACTION_HW_STATS_DONT_CARE; - else if (!hw_stats) - return FLOW_ACTION_HW_STATS_DISABLED; - - return hw_stats; -} - -int tc_setup_flow_action(struct flow_action *flow_action, - const struct tcf_exts *exts) +int tc_setup_action(struct flow_action *flow_action, + struct tc_action *actions[]) { + int i, j, index, err = 0; struct tc_action *act; - int i, j, k, err = 0; BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY); BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE); BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED); - if (!exts) + if (!actions) return 0; j = 0; - tcf_exts_for_each_action(i, act, exts) { + tcf_act_for_each_action(i, act, actions) { struct flow_action_entry *entry; entry = &flow_action->entries[j]; @@ -3571,165 +3525,39 @@ int tc_setup_flow_action(struct flow_action *flow_action, goto err_out_locked; entry->hw_stats = tc_act_hw_stats(act->hw_stats); - - if (is_tcf_gact_ok(act)) { - entry->id = FLOW_ACTION_ACCEPT; - } else if (is_tcf_gact_shot(act)) { - entry->id = FLOW_ACTION_DROP; - } else if (is_tcf_gact_trap(act)) { - entry->id = FLOW_ACTION_TRAP; - } else if (is_tcf_gact_goto_chain(act)) { - entry->id = FLOW_ACTION_GOTO; - entry->chain_index = tcf_gact_goto_chain_index(act); - } else if (is_tcf_mirred_egress_redirect(act)) { - entry->id = FLOW_ACTION_REDIRECT; - tcf_mirred_get_dev(entry, act); - } else if (is_tcf_mirred_egress_mirror(act)) { - entry->id = FLOW_ACTION_MIRRED; - tcf_mirred_get_dev(entry, act); - } else if (is_tcf_mirred_ingress_redirect(act)) { - entry->id = FLOW_ACTION_REDIRECT_INGRESS; - tcf_mirred_get_dev(entry, act); - } else if (is_tcf_mirred_ingress_mirror(act)) { - entry->id = FLOW_ACTION_MIRRED_INGRESS; - tcf_mirred_get_dev(entry, act); - } else if (is_tcf_vlan(act)) { - switch (tcf_vlan_action(act)) { - case TCA_VLAN_ACT_PUSH: - entry->id = FLOW_ACTION_VLAN_PUSH; - entry->vlan.vid = tcf_vlan_push_vid(act); - entry->vlan.proto = tcf_vlan_push_proto(act); - entry->vlan.prio = tcf_vlan_push_prio(act); - break; - case TCA_VLAN_ACT_POP: - entry->id = FLOW_ACTION_VLAN_POP; - break; - case TCA_VLAN_ACT_MODIFY: - entry->id = FLOW_ACTION_VLAN_MANGLE; - entry->vlan.vid = tcf_vlan_push_vid(act); - entry->vlan.proto = tcf_vlan_push_proto(act); - entry->vlan.prio = tcf_vlan_push_prio(act); - break; - default: - err = -EOPNOTSUPP; - goto err_out_locked; - } - } else if (is_tcf_tunnel_set(act)) { - entry->id = FLOW_ACTION_TUNNEL_ENCAP; - err = tcf_tunnel_encap_get_tunnel(entry, act); - if (err) - goto err_out_locked; - } else if (is_tcf_tunnel_release(act)) { - entry->id = FLOW_ACTION_TUNNEL_DECAP; - } else if (is_tcf_pedit(act)) { - for (k = 0; k < tcf_pedit_nkeys(act); k++) { - switch (tcf_pedit_cmd(act, k)) { - case TCA_PEDIT_KEY_EX_CMD_SET: - entry->id = FLOW_ACTION_MANGLE; - break; - case TCA_PEDIT_KEY_EX_CMD_ADD: - entry->id = FLOW_ACTION_ADD; - break; - default: - err = -EOPNOTSUPP; - goto err_out_locked; - } - entry->mangle.htype = tcf_pedit_htype(act, k); - entry->mangle.mask = tcf_pedit_mask(act, k); - entry->mangle.val = tcf_pedit_val(act, k); - entry->mangle.offset = tcf_pedit_offset(act, k); - entry->hw_stats = tc_act_hw_stats(act->hw_stats); - entry = &flow_action->entries[++j]; - } - } else if (is_tcf_csum(act)) { - entry->id = FLOW_ACTION_CSUM; - entry->csum_flags = tcf_csum_update_flags(act); - } else if (is_tcf_skbedit_mark(act)) { - entry->id = FLOW_ACTION_MARK; - entry->mark = tcf_skbedit_mark(act); - } else if (is_tcf_sample(act)) { - entry->id = FLOW_ACTION_SAMPLE; - entry->sample.trunc_size = tcf_sample_trunc_size(act); - entry->sample.truncate = tcf_sample_truncate(act); - entry->sample.rate = tcf_sample_rate(act); - tcf_sample_get_group(entry, act); - } else if (is_tcf_police(act)) { - entry->id = FLOW_ACTION_POLICE; - entry->police.burst = tcf_police_burst(act); - entry->police.rate_bytes_ps = - tcf_police_rate_bytes_ps(act); - entry->police.burst_pkt = tcf_police_burst_pkt(act); - entry->police.rate_pkt_ps = - tcf_police_rate_pkt_ps(act); - entry->police.mtu = tcf_police_tcfp_mtu(act); - entry->police.index = act->tcfa_index; - } else if (is_tcf_ct(act)) { - entry->id = FLOW_ACTION_CT; - entry->ct.action = tcf_ct_action(act); - entry->ct.zone = tcf_ct_zone(act); - entry->ct.flow_table = tcf_ct_ft(act); - } else if (is_tcf_mpls(act)) { - switch (tcf_mpls_action(act)) { - case TCA_MPLS_ACT_PUSH: - entry->id = FLOW_ACTION_MPLS_PUSH; - entry->mpls_push.proto = tcf_mpls_proto(act); - entry->mpls_push.label = tcf_mpls_label(act); - entry->mpls_push.tc = tcf_mpls_tc(act); - entry->mpls_push.bos = tcf_mpls_bos(act); - entry->mpls_push.ttl = tcf_mpls_ttl(act); - break; - case TCA_MPLS_ACT_POP: - entry->id = FLOW_ACTION_MPLS_POP; - entry->mpls_pop.proto = tcf_mpls_proto(act); - break; - case TCA_MPLS_ACT_MODIFY: - entry->id = FLOW_ACTION_MPLS_MANGLE; - entry->mpls_mangle.label = tcf_mpls_label(act); - entry->mpls_mangle.tc = tcf_mpls_tc(act); - entry->mpls_mangle.bos = tcf_mpls_bos(act); - entry->mpls_mangle.ttl = tcf_mpls_ttl(act); - break; - default: - err = -EOPNOTSUPP; - goto err_out_locked; - } - } else if (is_tcf_skbedit_ptype(act)) { - entry->id = FLOW_ACTION_PTYPE; - entry->ptype = tcf_skbedit_ptype(act); - } else if (is_tcf_skbedit_priority(act)) { - entry->id = FLOW_ACTION_PRIORITY; - entry->priority = tcf_skbedit_priority(act); - } else if (is_tcf_gate(act)) { - entry->id = FLOW_ACTION_GATE; - entry->gate.index = tcf_gate_index(act); - entry->gate.prio = tcf_gate_prio(act); - entry->gate.basetime = tcf_gate_basetime(act); - entry->gate.cycletime = tcf_gate_cycletime(act); - entry->gate.cycletimeext = tcf_gate_cycletimeext(act); - entry->gate.num_entries = tcf_gate_num_entries(act); - err = tcf_gate_get_entries(entry, act); - if (err) - goto err_out_locked; - } else { - err = -EOPNOTSUPP; + entry->hw_index = act->tcfa_index; + index = 0; + err = tc_setup_offload_act(act, entry, &index); + if (!err) + j += index; + else goto err_out_locked; - } spin_unlock_bh(&act->tcfa_lock); - - if (!is_tcf_pedit(act)) - j++; } err_out: if (err) - tc_cleanup_flow_action(flow_action); + tc_cleanup_offload_action(flow_action); return err; err_out_locked: spin_unlock_bh(&act->tcfa_lock); goto err_out; } -EXPORT_SYMBOL(tc_setup_flow_action); + +int tc_setup_offload_action(struct flow_action *flow_action, + const struct tcf_exts *exts) +{ +#ifdef CONFIG_NET_CLS_ACT + if (!exts) + return 0; + + return tc_setup_action(flow_action, exts->actions); +#else + return 0; +#endif +} +EXPORT_SYMBOL(tc_setup_offload_action); unsigned int tcf_exts_num_actions(struct tcf_exts *exts) { diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index ef54ed395874..1a9b1f140f9e 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -463,7 +463,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, cls_flower.rule->match.key = &f->mkey; cls_flower.classid = f->res.classid; - err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts); + err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts); if (err) { kfree(cls_flower.rule); if (skip_sw) { @@ -475,7 +475,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, skip_sw, &f->flags, &f->in_hw_count, rtnl_held); - tc_cleanup_flow_action(&cls_flower.rule->action); + tc_cleanup_offload_action(&cls_flower.rule->action); kfree(cls_flower.rule); if (err) { @@ -503,12 +503,12 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f, tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, rtnl_held); - tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes, - cls_flower.stats.pkts, - cls_flower.stats.drops, - cls_flower.stats.lastused, - cls_flower.stats.used_hw_stats, - cls_flower.stats.used_hw_stats_valid); + tcf_exts_hw_stats_update(&f->exts, cls_flower.stats.bytes, + cls_flower.stats.pkts, + cls_flower.stats.drops, + cls_flower.stats.lastused, + cls_flower.stats.used_hw_stats, + cls_flower.stats.used_hw_stats_valid); } static void __fl_put(struct cls_fl_filter *f) @@ -1919,12 +1919,14 @@ static int fl_set_parms(struct net *net, struct tcf_proto *tp, struct cls_fl_filter *f, struct fl_flow_mask *mask, unsigned long base, struct nlattr **tb, struct nlattr *est, - struct fl_flow_tmplt *tmplt, u32 flags, + struct fl_flow_tmplt *tmplt, + u32 flags, u32 fl_flags, struct netlink_ext_ack *extack) { int err; - err = tcf_exts_validate(net, tp, tb, est, &f->exts, flags, extack); + err = tcf_exts_validate_ex(net, tp, tb, est, &f->exts, flags, + fl_flags, extack); if (err < 0) return err; @@ -2038,7 +2040,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, } err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], - tp->chain->tmplt_priv, flags, extack); + tp->chain->tmplt_priv, flags, fnew->flags, + extack); if (err) goto errout; @@ -2268,7 +2271,7 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, cls_flower.rule->match.mask = &f->mask->key; cls_flower.rule->match.key = &f->mkey; - err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts); + err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts); if (err) { kfree(cls_flower.rule); if (tc_skip_sw(f->flags)) { @@ -2285,7 +2288,7 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, TC_SETUP_CLSFLOWER, &cls_flower, cb_priv, &f->flags, &f->in_hw_count); - tc_cleanup_flow_action(&cls_flower.rule->action); + tc_cleanup_offload_action(&cls_flower.rule->action); kfree(cls_flower.rule); if (err) { diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index 24f0046ce0b3..ca5670fd5228 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -97,7 +97,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, cls_mall.command = TC_CLSMATCHALL_REPLACE; cls_mall.cookie = cookie; - err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts); + err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts); if (err) { kfree(cls_mall.rule); mall_destroy_hw_filter(tp, head, cookie, NULL); @@ -111,7 +111,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, err = tc_setup_cb_add(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall, skip_sw, &head->flags, &head->in_hw_count, true); - tc_cleanup_flow_action(&cls_mall.rule->action); + tc_cleanup_offload_action(&cls_mall.rule->action); kfree(cls_mall.rule); if (err) { @@ -163,12 +163,13 @@ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = { static int mall_set_parms(struct net *net, struct tcf_proto *tp, struct cls_mall_head *head, unsigned long base, struct nlattr **tb, - struct nlattr *est, u32 flags, + struct nlattr *est, u32 flags, u32 fl_flags, struct netlink_ext_ack *extack) { int err; - err = tcf_exts_validate(net, tp, tb, est, &head->exts, flags, extack); + err = tcf_exts_validate_ex(net, tp, tb, est, &head->exts, flags, + fl_flags, extack); if (err < 0) return err; @@ -226,8 +227,8 @@ static int mall_change(struct net *net, struct sk_buff *in_skb, goto err_alloc_percpu; } - err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], flags, - extack); + err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], + flags, new->flags, extack); if (err) goto err_set_parms; @@ -301,7 +302,7 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY; cls_mall.cookie = (unsigned long)head; - err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts); + err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts); if (err) { kfree(cls_mall.rule); if (add && tc_skip_sw(head->flags)) { @@ -314,7 +315,7 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSMATCHALL, &cls_mall, cb_priv, &head->flags, &head->in_hw_count); - tc_cleanup_flow_action(&cls_mall.rule->action); + tc_cleanup_offload_action(&cls_mall.rule->action); kfree(cls_mall.rule); if (err) @@ -336,11 +337,11 @@ static void mall_stats_hw_filter(struct tcf_proto *tp, tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false, true); - tcf_exts_stats_update(&head->exts, cls_mall.stats.bytes, - cls_mall.stats.pkts, cls_mall.stats.drops, - cls_mall.stats.lastused, - cls_mall.stats.used_hw_stats, - cls_mall.stats.used_hw_stats_valid); + tcf_exts_hw_stats_update(&head->exts, cls_mall.stats.bytes, + cls_mall.stats.pkts, cls_mall.stats.drops, + cls_mall.stats.lastused, + cls_mall.stats.used_hw_stats, + cls_mall.stats.used_hw_stats_valid); } static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh, diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 4272814487f0..cf5649292ee0 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -709,12 +709,13 @@ static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = { static int u32_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, struct tc_u_knode *n, struct nlattr **tb, - struct nlattr *est, u32 flags, + struct nlattr *est, u32 flags, u32 fl_flags, struct netlink_ext_ack *extack) { int err; - err = tcf_exts_validate(net, tp, tb, est, &n->exts, flags, extack); + err = tcf_exts_validate_ex(net, tp, tb, est, &n->exts, flags, + fl_flags, extack); if (err < 0) return err; @@ -895,7 +896,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, return -ENOMEM; err = u32_set_parms(net, tp, base, new, tb, - tca[TCA_RATE], flags, extack); + tca[TCA_RATE], flags, new->flags, + extack); if (err) { u32_destroy_key(new, false); @@ -1060,8 +1062,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, } #endif - err = u32_set_parms(net, tp, base, n, tb, tca[TCA_RATE], flags, - extack); + err = u32_set_parms(net, tp, base, n, tb, tca[TCA_RATE], + flags, n->flags, extack); if (err == 0) { struct tc_u_knode __rcu **ins; struct tc_u_knode *pins; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 3b0f62095803..b07bd1c7330f 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -434,9 +434,9 @@ unsigned long dev_trans_start(struct net_device *dev) dev = vlan_dev_real_dev(dev); else if (netif_is_macvlan(dev)) dev = macvlan_dev_real_dev(dev); - res = netdev_get_tx_queue(dev, 0)->trans_start; + res = READ_ONCE(netdev_get_tx_queue(dev, 0)->trans_start); for (i = 1; i < dev->num_tx_queues; i++) { - val = netdev_get_tx_queue(dev, i)->trans_start; + val = READ_ONCE(netdev_get_tx_queue(dev, i)->trans_start); if (val && time_after(val, res)) res = val; } @@ -445,11 +445,63 @@ unsigned long dev_trans_start(struct net_device *dev) } EXPORT_SYMBOL(dev_trans_start); +static void netif_freeze_queues(struct net_device *dev) +{ + unsigned int i; + int cpu; + + cpu = smp_processor_id(); + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + /* We are the only thread of execution doing a + * freeze, but we have to grab the _xmit_lock in + * order to synchronize with threads which are in + * the ->hard_start_xmit() handler and already + * checked the frozen bit. + */ + __netif_tx_lock(txq, cpu); + set_bit(__QUEUE_STATE_FROZEN, &txq->state); + __netif_tx_unlock(txq); + } +} + +void netif_tx_lock(struct net_device *dev) +{ + spin_lock(&dev->tx_global_lock); + netif_freeze_queues(dev); +} +EXPORT_SYMBOL(netif_tx_lock); + +static void netif_unfreeze_queues(struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + /* No need to grab the _xmit_lock here. If the + * queue is not stopped for another reason, we + * force a schedule. + */ + clear_bit(__QUEUE_STATE_FROZEN, &txq->state); + netif_schedule_queue(txq); + } +} + +void netif_tx_unlock(struct net_device *dev) +{ + netif_unfreeze_queues(dev); + spin_unlock(&dev->tx_global_lock); +} +EXPORT_SYMBOL(netif_tx_unlock); + static void dev_watchdog(struct timer_list *t) { struct net_device *dev = from_timer(dev, t, watchdog_timer); + bool release = true; - netif_tx_lock(dev); + spin_lock(&dev->tx_global_lock); if (!qdisc_tx_is_noop(dev)) { if (netif_device_present(dev) && netif_running(dev) && @@ -462,31 +514,34 @@ static void dev_watchdog(struct timer_list *t) struct netdev_queue *txq; txq = netdev_get_tx_queue(dev, i); - trans_start = txq->trans_start; + trans_start = READ_ONCE(txq->trans_start); if (netif_xmit_stopped(txq) && time_after(jiffies, (trans_start + dev->watchdog_timeo))) { some_queue_timedout = 1; - txq->trans_timeout++; + atomic_long_inc(&txq->trans_timeout); break; } } - if (some_queue_timedout) { + if (unlikely(some_queue_timedout)) { trace_net_dev_xmit_timeout(dev, i); WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", dev->name, netdev_drivername(dev), i); + netif_freeze_queues(dev); dev->netdev_ops->ndo_tx_timeout(dev, i); + netif_unfreeze_queues(dev); } if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo))) - dev_hold(dev); + release = false; } } - netif_tx_unlock(dev); + spin_unlock(&dev->tx_global_lock); - dev_put(dev); + if (release) + dev_put_track(dev, &dev->watchdog_dev_tracker); } void __netdev_watchdog_up(struct net_device *dev) @@ -496,7 +551,7 @@ void __netdev_watchdog_up(struct net_device *dev) dev->watchdog_timeo = 5*HZ; if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo))) - dev_hold(dev); + dev_hold_track(dev, &dev->watchdog_dev_tracker, GFP_ATOMIC); } } EXPORT_SYMBOL_GPL(__netdev_watchdog_up); @@ -510,7 +565,7 @@ static void dev_watchdog_down(struct net_device *dev) { netif_tx_lock_bh(dev); if (del_timer(&dev->watchdog_timer)) - dev_put(dev); + dev_put_track(dev, &dev->watchdog_dev_tracker); netif_tx_unlock_bh(dev); } @@ -920,7 +975,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, sch->enqueue = ops->enqueue; sch->dequeue = ops->dequeue; sch->dev_queue = dev_queue; - dev_hold(dev); + dev_hold_track(dev, &sch->dev_tracker, GFP_KERNEL); refcount_set(&sch->refcnt, 1); return sch; @@ -1020,7 +1075,7 @@ static void qdisc_destroy(struct Qdisc *qdisc) ops->destroy(qdisc); module_put(ops->owner); - dev_put(qdisc_dev(qdisc)); + dev_put_track(qdisc_dev(qdisc), &qdisc->dev_tracker); trace_qdisc_destroy(qdisc); @@ -1148,7 +1203,7 @@ static void transition_one_qdisc(struct net_device *dev, rcu_assign_pointer(dev_queue->qdisc, new_qdisc); if (need_watchdog_p) { - dev_queue->trans_start = 0; + WRITE_ONCE(dev_queue->trans_start, 0); *need_watchdog_p = 1; } } diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index ecbb10db1111..ed4ccef5d6a8 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -208,17 +208,17 @@ static bool loss_4state(struct netem_sched_data *q) * next state and if the next packet has to be transmitted or lost. * The four states correspond to: * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period - * LOST_IN_BURST_PERIOD => isolated losses within a gap period - * LOST_IN_GAP_PERIOD => lost packets within a burst period - * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period + * LOST_IN_GAP_PERIOD => isolated losses within a gap period + * LOST_IN_BURST_PERIOD => lost packets within a burst period + * TX_IN_BURST_PERIOD => successfully transmitted packets within a burst period */ switch (clg->state) { case TX_IN_GAP_PERIOD: if (rnd < clg->a4) { - clg->state = LOST_IN_BURST_PERIOD; + clg->state = LOST_IN_GAP_PERIOD; return true; } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) { - clg->state = LOST_IN_GAP_PERIOD; + clg->state = LOST_IN_BURST_PERIOD; return true; } else if (clg->a1 + clg->a4 < rnd) { clg->state = TX_IN_GAP_PERIOD; @@ -227,24 +227,24 @@ static bool loss_4state(struct netem_sched_data *q) break; case TX_IN_BURST_PERIOD: if (rnd < clg->a5) { - clg->state = LOST_IN_GAP_PERIOD; + clg->state = LOST_IN_BURST_PERIOD; return true; } else { clg->state = TX_IN_BURST_PERIOD; } break; - case LOST_IN_GAP_PERIOD: + case LOST_IN_BURST_PERIOD: if (rnd < clg->a3) clg->state = TX_IN_BURST_PERIOD; else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) { clg->state = TX_IN_GAP_PERIOD; } else if (clg->a2 + clg->a3 < rnd) { - clg->state = LOST_IN_GAP_PERIOD; + clg->state = LOST_IN_BURST_PERIOD; return true; } break; - case LOST_IN_BURST_PERIOD: + case LOST_IN_GAP_PERIOD: clg->state = TX_IN_GAP_PERIOD; break; } diff --git a/net/sctp/input.c b/net/sctp/input.c index 1f1786021d9c..90e12bafdd48 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -746,23 +746,21 @@ static int __sctp_hash_endpoint(struct sctp_endpoint *ep) struct sock *sk = ep->base.sk; struct net *net = sock_net(sk); struct sctp_hashbucket *head; - struct sctp_ep_common *epb; - epb = &ep->base; - epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port); - head = &sctp_ep_hashtable[epb->hashent]; + ep->hashent = sctp_ep_hashfn(net, ep->base.bind_addr.port); + head = &sctp_ep_hashtable[ep->hashent]; if (sk->sk_reuseport) { bool any = sctp_is_ep_boundall(sk); - struct sctp_ep_common *epb2; + struct sctp_endpoint *ep2; struct list_head *list; int cnt = 0, err = 1; list_for_each(list, &ep->base.bind_addr.address_list) cnt++; - sctp_for_each_hentry(epb2, &head->chain) { - struct sock *sk2 = epb2->sk; + sctp_for_each_hentry(ep2, &head->chain) { + struct sock *sk2 = ep2->base.sk; if (!net_eq(sock_net(sk2), net) || sk2 == sk || !uid_eq(sock_i_uid(sk2), sock_i_uid(sk)) || @@ -789,7 +787,7 @@ static int __sctp_hash_endpoint(struct sctp_endpoint *ep) } write_lock(&head->lock); - hlist_add_head(&epb->node, &head->chain); + hlist_add_head(&ep->node, &head->chain); write_unlock(&head->lock); return 0; } @@ -811,19 +809,16 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep) { struct sock *sk = ep->base.sk; struct sctp_hashbucket *head; - struct sctp_ep_common *epb; - epb = &ep->base; + ep->hashent = sctp_ep_hashfn(sock_net(sk), ep->base.bind_addr.port); - epb->hashent = sctp_ep_hashfn(sock_net(sk), epb->bind_addr.port); - - head = &sctp_ep_hashtable[epb->hashent]; + head = &sctp_ep_hashtable[ep->hashent]; if (rcu_access_pointer(sk->sk_reuseport_cb)) reuseport_detach_sock(sk); write_lock(&head->lock); - hlist_del_init(&epb->node); + hlist_del_init(&ep->node); write_unlock(&head->lock); } @@ -856,7 +851,6 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint( const union sctp_addr *paddr) { struct sctp_hashbucket *head; - struct sctp_ep_common *epb; struct sctp_endpoint *ep; struct sock *sk; __be16 lport; @@ -866,8 +860,7 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint( hash = sctp_ep_hashfn(net, ntohs(lport)); head = &sctp_ep_hashtable[hash]; read_lock(&head->lock); - sctp_for_each_hentry(epb, &head->chain) { - ep = sctp_ep(epb); + sctp_for_each_hentry(ep, &head->chain) { if (sctp_endpoint_is_match(ep, net, laddr)) goto hit; } diff --git a/net/sctp/output.c b/net/sctp/output.c index cdfdbd353c67..72fe6669c50d 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -134,7 +134,7 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag, dst_hold(tp->dst); sk_setup_caps(sk, tp->dst); } - packet->max_size = sk_can_gso(sk) ? tp->dst->dev->gso_max_size + packet->max_size = sk_can_gso(sk) ? READ_ONCE(tp->dst->dev->gso_max_size) : asoc->pathmtu; rcu_read_unlock(); } diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index ff47091c385e..a18609f608fb 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -547,6 +547,9 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, sctp_assoc_update_retran_path(transport->asoc); transport->asoc->rtx_data_chunks += transport->asoc->unack_data; + if (transport->pl.state == SCTP_PL_COMPLETE && + transport->asoc->unack_data) + sctp_transport_reset_probe_timer(transport); break; case SCTP_RTXR_FAST_RTX: SCTP_INC_STATS(net, SCTP_MIB_FAST_RETRANSMITS); diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 982a87b3e11f..f13d6a34f32f 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -161,7 +161,6 @@ static void *sctp_eps_seq_next(struct seq_file *seq, void *v, loff_t *pos) static int sctp_eps_seq_show(struct seq_file *seq, void *v) { struct sctp_hashbucket *head; - struct sctp_ep_common *epb; struct sctp_endpoint *ep; struct sock *sk; int hash = *(loff_t *)v; @@ -171,18 +170,17 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v) head = &sctp_ep_hashtable[hash]; read_lock_bh(&head->lock); - sctp_for_each_hentry(epb, &head->chain) { - ep = sctp_ep(epb); - sk = epb->sk; + sctp_for_each_hentry(ep, &head->chain) { + sk = ep->base.sk; if (!net_eq(sock_net(sk), seq_file_net(seq))) continue; seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5u %5lu ", ep, sk, sctp_sk(sk)->type, sk->sk_state, hash, - epb->bind_addr.port, + ep->base.bind_addr.port, from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), sock_i_ino(sk)); - sctp_seq_dump_local_addrs(seq, epb); + sctp_seq_dump_local_addrs(seq, &ep->base); seq_printf(seq, "\n"); } read_unlock_bh(&head->lock); diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 354c1c4de19b..cc544a97c4af 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -1124,12 +1124,11 @@ enum sctp_disposition sctp_sf_send_probe(struct net *net, if (!sctp_transport_pl_enabled(transport)) return SCTP_DISPOSITION_CONSUME; - if (sctp_transport_pl_send(transport)) { - reply = sctp_make_heartbeat(asoc, transport, transport->pl.probe_size); - if (!reply) - return SCTP_DISPOSITION_NOMEM; - sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); - } + sctp_transport_pl_send(transport); + reply = sctp_make_heartbeat(asoc, transport, transport->pl.probe_size); + if (!reply) + return SCTP_DISPOSITION_NOMEM; + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); sctp_add_cmd_sf(commands, SCTP_CMD_PROBE_TIMER_UPDATE, SCTP_TRANSPORT(transport)); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 33391254fa82..eed3b87f51f4 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -5068,12 +5068,9 @@ static int sctp_init_sock(struct sock *sk) SCTP_DBG_OBJCNT_INC(sock); - local_bh_disable(); sk_sockets_allocated_inc(sk); sock_prot_inuse_add(net, sk->sk_prot, 1); - local_bh_enable(); - return 0; } @@ -5099,10 +5096,8 @@ static void sctp_destroy_sock(struct sock *sk) list_del(&sp->auto_asconf_list); } sctp_endpoint_free(sp->ep); - local_bh_disable(); sk_sockets_allocated_dec(sk); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); - local_bh_enable(); } /* Triggered when there are no references on the socket anymore */ @@ -5299,14 +5294,14 @@ int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p) { int err = 0; int hash = 0; - struct sctp_ep_common *epb; + struct sctp_endpoint *ep; struct sctp_hashbucket *head; for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize; hash++, head++) { read_lock_bh(&head->lock); - sctp_for_each_hentry(epb, &head->chain) { - err = cb(sctp_ep(epb), p); + sctp_for_each_hentry(ep, &head->chain) { + err = cb(ep, p); if (err) break; } diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 133f1719bf1b..f8fd98784977 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -213,13 +213,18 @@ void sctp_transport_reset_reconf_timer(struct sctp_transport *transport) void sctp_transport_reset_probe_timer(struct sctp_transport *transport) { - if (timer_pending(&transport->probe_timer)) - return; if (!mod_timer(&transport->probe_timer, jiffies + transport->probe_interval)) sctp_transport_hold(transport); } +void sctp_transport_reset_raise_timer(struct sctp_transport *transport) +{ + if (!mod_timer(&transport->probe_timer, + jiffies + transport->probe_interval * 30)) + sctp_transport_hold(transport); +} + /* This transport has been assigned to an association. * Initialize fields from the association or from the sock itself. * Register the reference count in the association. @@ -258,12 +263,11 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk) sctp_transport_pl_update(transport); } -bool sctp_transport_pl_send(struct sctp_transport *t) +void sctp_transport_pl_send(struct sctp_transport *t) { if (t->pl.probe_count < SCTP_MAX_PROBES) goto out; - t->pl.last_rtx_chunks = t->asoc->rtx_data_chunks; t->pl.probe_count = 0; if (t->pl.state == SCTP_PL_BASE) { if (t->pl.probe_size == SCTP_BASE_PLPMTU) { /* BASE_PLPMTU Confirmation Failed */ @@ -298,17 +302,9 @@ bool sctp_transport_pl_send(struct sctp_transport *t) } out: - if (t->pl.state == SCTP_PL_COMPLETE && t->pl.raise_count < 30 && - !t->pl.probe_count && t->pl.last_rtx_chunks == t->asoc->rtx_data_chunks) { - t->pl.raise_count++; - return false; - } - pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n", __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high); - t->pl.probe_count++; - return true; } bool sctp_transport_pl_recv(struct sctp_transport *t) @@ -316,7 +312,6 @@ bool sctp_transport_pl_recv(struct sctp_transport *t) pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n", __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high); - t->pl.last_rtx_chunks = t->asoc->rtx_data_chunks; t->pl.pmtu = t->pl.probe_size; t->pl.probe_count = 0; if (t->pl.state == SCTP_PL_BASE) { @@ -338,14 +333,14 @@ bool sctp_transport_pl_recv(struct sctp_transport *t) t->pl.probe_size += SCTP_PL_MIN_STEP; if (t->pl.probe_size >= t->pl.probe_high) { t->pl.probe_high = 0; - t->pl.raise_count = 0; t->pl.state = SCTP_PL_COMPLETE; /* Search -> Search Complete */ t->pl.probe_size = t->pl.pmtu; t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); sctp_assoc_sync_pmtu(t->asoc); + sctp_transport_reset_raise_timer(t); } - } else if (t->pl.state == SCTP_PL_COMPLETE && t->pl.raise_count == 30) { + } else if (t->pl.state == SCTP_PL_COMPLETE) { /* Raise probe_size again after 30 * interval in Search Complete */ t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */ t->pl.probe_size += SCTP_PL_MIN_STEP; @@ -393,6 +388,7 @@ static bool sctp_transport_pl_toobig(struct sctp_transport *t, u32 pmtu) t->pl.probe_high = 0; t->pl.pmtu = SCTP_BASE_PLPMTU; t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); + sctp_transport_reset_probe_timer(t); return true; } } diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 1c9289f56dc4..ba9d1a8ebb4a 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -89,8 +89,8 @@ int smc_hash_sk(struct sock *sk) write_lock_bh(&h->lock); sk_add_node(sk, head); - sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); write_unlock_bh(&h->lock); + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); return 0; } diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 387d28b2f8dd..85be94cabb01 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -1101,18 +1101,24 @@ static void smcr_buf_unuse(struct smc_buf_desc *rmb_desc, smc_buf_free(lgr, true, rmb_desc); } else { rmb_desc->used = 0; + memset(rmb_desc->cpu_addr, 0, rmb_desc->len); } } static void smc_buf_unuse(struct smc_connection *conn, struct smc_link_group *lgr) { - if (conn->sndbuf_desc) + if (conn->sndbuf_desc) { conn->sndbuf_desc->used = 0; - if (conn->rmb_desc && lgr->is_smcd) + memset(conn->sndbuf_desc->cpu_addr, 0, conn->sndbuf_desc->len); + } + if (conn->rmb_desc && lgr->is_smcd) { conn->rmb_desc->used = 0; - else if (conn->rmb_desc) + memset(conn->rmb_desc->cpu_addr, 0, conn->rmb_desc->len + + sizeof(struct smcd_cdc_msg)); + } else if (conn->rmb_desc) { smcr_buf_unuse(conn->rmb_desc, lgr); + } } /* remove a finished connection from its link group */ @@ -2148,7 +2154,6 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb) if (buf_desc) { SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize); SMC_STAT_BUF_REUSE(smc, is_smcd, is_rmb); - memset(buf_desc->cpu_addr, 0, bufsize); break; /* found reusable slot */ } diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index 67e9d9fde085..e171cc6483f8 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c @@ -64,6 +64,7 @@ struct smc_pnetentry { struct { char eth_name[IFNAMSIZ + 1]; struct net_device *ndev; + netdevice_tracker dev_tracker; }; struct { char ib_name[IB_DEVICE_NAME_MAX + 1]; @@ -119,7 +120,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name) smc_pnet_match(pnetelem->pnet_name, pnet_name)) { list_del(&pnetelem->list); if (pnetelem->type == SMC_PNET_ETH && pnetelem->ndev) { - dev_put(pnetelem->ndev); + dev_put_track(pnetelem->ndev, &pnetelem->dev_tracker); pr_warn_ratelimited("smc: net device %s " "erased user defined " "pnetid %.16s\n", @@ -195,7 +196,7 @@ static int smc_pnet_add_by_ndev(struct net_device *ndev) list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) { if (pnetelem->type == SMC_PNET_ETH && !pnetelem->ndev && !strncmp(pnetelem->eth_name, ndev->name, IFNAMSIZ)) { - dev_hold(ndev); + dev_hold_track(ndev, &pnetelem->dev_tracker, GFP_ATOMIC); pnetelem->ndev = ndev; rc = 0; pr_warn_ratelimited("smc: adding net device %s with " @@ -226,7 +227,7 @@ static int smc_pnet_remove_by_ndev(struct net_device *ndev) write_lock(&pnettable->lock); list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) { if (pnetelem->type == SMC_PNET_ETH && pnetelem->ndev == ndev) { - dev_put(pnetelem->ndev); + dev_put_track(pnetelem->ndev, &pnetelem->dev_tracker); pnetelem->ndev = NULL; rc = 0; pr_warn_ratelimited("smc: removing net device %s with " @@ -368,7 +369,7 @@ static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net, memcpy(new_pe->pnet_name, pnet_name, SMC_MAX_PNETID_LEN); strncpy(new_pe->eth_name, eth_name, IFNAMSIZ); new_pe->ndev = ndev; - + netdev_tracker_alloc(ndev, &new_pe->dev_tracker, GFP_KERNEL); rc = -EEXIST; new_netdev = true; write_lock(&pnettable->lock); diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index 83460470e883..b62565278fac 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -28,6 +28,7 @@ typedef void switchdev_deferred_func_t(struct net_device *dev, struct switchdev_deferred_item { struct list_head list; struct net_device *dev; + netdevice_tracker dev_tracker; switchdev_deferred_func_t *func; unsigned long data[]; }; @@ -63,7 +64,7 @@ void switchdev_deferred_process(void) while ((dfitem = switchdev_deferred_dequeue())) { dfitem->func(dfitem->dev, dfitem->data); - dev_put(dfitem->dev); + dev_put_track(dfitem->dev, &dfitem->dev_tracker); kfree(dfitem); } } @@ -90,7 +91,7 @@ static int switchdev_deferred_enqueue(struct net_device *dev, dfitem->dev = dev; dfitem->func = func; memcpy(dfitem->data, data, data_len); - dev_hold(dev); + dev_hold_track(dev, &dfitem->dev_tracker, GFP_ATOMIC); spin_lock_bh(&deferred_lock); list_add_tail(&dfitem->list, &deferred); spin_unlock_bh(&deferred_lock); diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 60bc74b76adc..473a790f5894 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -787,7 +787,7 @@ int tipc_attach_loopback(struct net *net) if (!dev) return -ENODEV; - dev_hold(dev); + dev_hold_track(dev, &tn->loopback_pt.dev_tracker, GFP_KERNEL); tn->loopback_pt.dev = dev; tn->loopback_pt.type = htons(ETH_P_TIPC); tn->loopback_pt.func = tipc_loopback_rcv_pkt; @@ -800,7 +800,7 @@ void tipc_detach_loopback(struct net *net) struct tipc_net *tn = tipc_net(net); dev_remove_pack(&tn->loopback_pt); - dev_put(net->loopback_dev); + dev_put_track(net->loopback_dev, &tn->loopback_pt.dev_tracker); } /* Caller should hold rtnl_lock to protect the bearer */ diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c index d293614d5fc6..9325479295b8 100644 --- a/net/tipc/crypto.c +++ b/net/tipc/crypto.c @@ -761,21 +761,10 @@ static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb, skb_tailroom(skb), tailen); } - if (unlikely(!skb_cloned(skb) && tailen <= skb_tailroom(skb))) { - nsg = 1; - trailer = skb; - } else { - /* TODO: We could avoid skb_cow_data() if skb has no frag_list - * e.g. by skb_fill_page_desc() to add another page to the skb - * with the wanted tailen... However, page skbs look not often, - * so take it easy now! - * Cloned skbs e.g. from link_xmit() seems no choice though :( - */ - nsg = skb_cow_data(skb, tailen, &trailer); - if (unlikely(nsg < 0)) { - pr_err("TX: skb_cow_data() returned %d\n", nsg); - return nsg; - } + nsg = skb_cow_data(skb, tailen, &trailer); + if (unlikely(nsg < 0)) { + pr_err("TX: skb_cow_data() returned %d\n", nsg); + return nsg; } pskb_put(skb, trailer, tailen); diff --git a/net/tipc/link.c b/net/tipc/link.c index 09ae8448f394..8d9e09f48f4c 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1298,7 +1298,8 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb, return false; #ifdef CONFIG_TIPC_CRYPTO case MSG_CRYPTO: - if (TIPC_SKB_CB(skb)->decrypted) { + if (sysctl_tipc_key_exchange_enabled && + TIPC_SKB_CB(skb)->decrypted) { tipc_crypto_msg_rcv(l->net, skb); return true; } diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index dfe623a4e72f..3f271e29812f 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -2328,10 +2328,6 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_prot_info *prot = &tls_ctx->prot_info; struct tls_crypto_info *crypto_info; - struct tls12_crypto_info_aes_gcm_128 *gcm_128_info; - struct tls12_crypto_info_aes_gcm_256 *gcm_256_info; - struct tls12_crypto_info_aes_ccm_128 *ccm_128_info; - struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305_info; struct tls_sw_context_tx *sw_ctx_tx = NULL; struct tls_sw_context_rx *sw_ctx_rx = NULL; struct cipher_context *cctx; @@ -2394,15 +2390,15 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) switch (crypto_info->cipher_type) { case TLS_CIPHER_AES_GCM_128: { + struct tls12_crypto_info_aes_gcm_128 *gcm_128_info; + + gcm_128_info = (void *)crypto_info; nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE; iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; - iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv; + iv = gcm_128_info->iv; rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE; - rec_seq = - ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq; - gcm_128_info = - (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; + rec_seq = gcm_128_info->rec_seq; keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE; key = gcm_128_info->key; salt = gcm_128_info->salt; @@ -2411,15 +2407,15 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) break; } case TLS_CIPHER_AES_GCM_256: { + struct tls12_crypto_info_aes_gcm_256 *gcm_256_info; + + gcm_256_info = (void *)crypto_info; nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE; tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE; iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE; - iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv; + iv = gcm_256_info->iv; rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE; - rec_seq = - ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq; - gcm_256_info = - (struct tls12_crypto_info_aes_gcm_256 *)crypto_info; + rec_seq = gcm_256_info->rec_seq; keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE; key = gcm_256_info->key; salt = gcm_256_info->salt; @@ -2428,15 +2424,15 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) break; } case TLS_CIPHER_AES_CCM_128: { + struct tls12_crypto_info_aes_ccm_128 *ccm_128_info; + + ccm_128_info = (void *)crypto_info; nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE; tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE; iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE; - iv = ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->iv; + iv = ccm_128_info->iv; rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE; - rec_seq = - ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->rec_seq; - ccm_128_info = - (struct tls12_crypto_info_aes_ccm_128 *)crypto_info; + rec_seq = ccm_128_info->rec_seq; keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE; key = ccm_128_info->key; salt = ccm_128_info->salt; @@ -2445,6 +2441,8 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) break; } case TLS_CIPHER_CHACHA20_POLY1305: { + struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305_info; + chacha20_poly1305_info = (void *)crypto_info; nonce_size = 0; tag_size = TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index b0bfc78e421c..4d6e33bbd446 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -117,24 +117,64 @@ #include "scm.h" +spinlock_t unix_table_locks[2 * UNIX_HASH_SIZE]; +EXPORT_SYMBOL_GPL(unix_table_locks); struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE]; EXPORT_SYMBOL_GPL(unix_socket_table); -DEFINE_SPINLOCK(unix_table_lock); -EXPORT_SYMBOL_GPL(unix_table_lock); static atomic_long_t unix_nr_socks; +/* SMP locking strategy: + * hash table is protected with spinlock unix_table_locks + * each socket state is protected by separate spin lock. + */ -static struct hlist_head *unix_sockets_unbound(void *addr) +static unsigned int unix_unbound_hash(struct sock *sk) { - unsigned long hash = (unsigned long)addr; + unsigned long hash = (unsigned long)sk; hash ^= hash >> 16; hash ^= hash >> 8; - hash %= UNIX_HASH_SIZE; - return &unix_socket_table[UNIX_HASH_SIZE + hash]; + hash ^= sk->sk_type; + + return UNIX_HASH_SIZE + (hash & (UNIX_HASH_SIZE - 1)); +} + +static unsigned int unix_bsd_hash(struct inode *i) +{ + return i->i_ino & (UNIX_HASH_SIZE - 1); +} + +static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr, + int addr_len, int type) +{ + __wsum csum = csum_partial(sunaddr, addr_len, 0); + unsigned int hash; + + hash = (__force unsigned int)csum_fold(csum); + hash ^= hash >> 8; + hash ^= type; + + return hash & (UNIX_HASH_SIZE - 1); } -#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE) +static void unix_table_double_lock(unsigned int hash1, unsigned int hash2) +{ + /* hash1 and hash2 is never the same because + * one is between 0 and UNIX_HASH_SIZE - 1, and + * another is between UNIX_HASH_SIZE and UNIX_HASH_SIZE * 2. + */ + if (hash1 > hash2) + swap(hash1, hash2); + + spin_lock(&unix_table_locks[hash1]); + spin_lock_nested(&unix_table_locks[hash2], SINGLE_DEPTH_NESTING); +} + +static void unix_table_double_unlock(unsigned int hash1, unsigned int hash2) +{ + spin_unlock(&unix_table_locks[hash1]); + spin_unlock(&unix_table_locks[hash2]); +} #ifdef CONFIG_SECURITY_NETWORK static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) @@ -164,20 +204,6 @@ static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb) } #endif /* CONFIG_SECURITY_NETWORK */ -/* - * SMP locking strategy: - * hash table is protected with spinlock unix_table_lock - * each socket state is protected by separate spin lock. - */ - -static inline unsigned int unix_hash_fold(__wsum n) -{ - unsigned int hash = (__force unsigned int)csum_fold(n); - - hash ^= hash>>8; - return hash&(UNIX_HASH_SIZE-1); -} - #define unix_peer(sk) (unix_sk(sk)->peer) static inline int unix_our_peer(struct sock *sk, struct sock *osk) @@ -214,6 +240,22 @@ struct sock *unix_peer_get(struct sock *s) } EXPORT_SYMBOL_GPL(unix_peer_get); +static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr, + int addr_len) +{ + struct unix_address *addr; + + addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL); + if (!addr) + return NULL; + + refcount_set(&addr->refcnt, 1); + addr->len = addr_len; + memcpy(addr->name, sunaddr, addr_len); + + return addr; +} + static inline void unix_release_addr(struct unix_address *addr) { if (refcount_dec_and_test(&addr->refcnt)) @@ -227,29 +269,29 @@ static inline void unix_release_addr(struct unix_address *addr) * - if started by zero, it is abstract name. */ -static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp) +static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len) { - *hashp = 0; - - if (len <= sizeof(short) || len > sizeof(*sunaddr)) + if (addr_len <= offsetof(struct sockaddr_un, sun_path) || + addr_len > sizeof(*sunaddr)) return -EINVAL; - if (!sunaddr || sunaddr->sun_family != AF_UNIX) + + if (sunaddr->sun_family != AF_UNIX) return -EINVAL; - if (sunaddr->sun_path[0]) { - /* - * This may look like an off by one error but it is a bit more - * subtle. 108 is the longest valid AF_UNIX path for a binding. - * sun_path[108] doesn't as such exist. However in kernel space - * we are guaranteed that it is a valid memory location in our - * kernel address buffer. - */ - ((char *)sunaddr)[len] = 0; - len = strlen(sunaddr->sun_path)+1+sizeof(short); - return len; - } - *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0)); - return len; + return 0; +} + +static void unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len) +{ + /* This may look like an off by one error but it is a bit more + * subtle. 108 is the longest valid AF_UNIX path for a binding. + * sun_path[108] doesn't as such exist. However in kernel space + * we are guaranteed that it is a valid memory location in our + * kernel address buffer because syscall functions always pass + * a pointer of struct sockaddr_storage which has a bigger buffer + * than 108. + */ + ((char *)sunaddr)[addr_len] = 0; } static void __unix_remove_socket(struct sock *sk) @@ -257,32 +299,34 @@ static void __unix_remove_socket(struct sock *sk) sk_del_node_init(sk); } -static void __unix_insert_socket(struct hlist_head *list, struct sock *sk) +static void __unix_insert_socket(struct sock *sk) { WARN_ON(!sk_unhashed(sk)); - sk_add_node(sk, list); + sk_add_node(sk, &unix_socket_table[sk->sk_hash]); } -static void __unix_set_addr(struct sock *sk, struct unix_address *addr, - unsigned hash) +static void __unix_set_addr_hash(struct sock *sk, struct unix_address *addr, + unsigned int hash) { __unix_remove_socket(sk); smp_store_release(&unix_sk(sk)->addr, addr); - __unix_insert_socket(&unix_socket_table[hash], sk); + + sk->sk_hash = hash; + __unix_insert_socket(sk); } -static inline void unix_remove_socket(struct sock *sk) +static void unix_remove_socket(struct sock *sk) { - spin_lock(&unix_table_lock); + spin_lock(&unix_table_locks[sk->sk_hash]); __unix_remove_socket(sk); - spin_unlock(&unix_table_lock); + spin_unlock(&unix_table_locks[sk->sk_hash]); } -static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk) +static void unix_insert_unbound_socket(struct sock *sk) { - spin_lock(&unix_table_lock); - __unix_insert_socket(list, sk); - spin_unlock(&unix_table_lock); + spin_lock(&unix_table_locks[sk->sk_hash]); + __unix_insert_socket(sk); + spin_unlock(&unix_table_locks[sk->sk_hash]); } static struct sock *__unix_find_socket_byname(struct net *net, @@ -310,32 +354,31 @@ static inline struct sock *unix_find_socket_byname(struct net *net, { struct sock *s; - spin_lock(&unix_table_lock); + spin_lock(&unix_table_locks[hash]); s = __unix_find_socket_byname(net, sunname, len, hash); if (s) sock_hold(s); - spin_unlock(&unix_table_lock); + spin_unlock(&unix_table_locks[hash]); return s; } static struct sock *unix_find_socket_byinode(struct inode *i) { + unsigned int hash = unix_bsd_hash(i); struct sock *s; - spin_lock(&unix_table_lock); - sk_for_each(s, - &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) { + spin_lock(&unix_table_locks[hash]); + sk_for_each(s, &unix_socket_table[hash]) { struct dentry *dentry = unix_sk(s)->path.dentry; if (dentry && d_backing_inode(dentry) == i) { sock_hold(s); - goto found; + spin_unlock(&unix_table_locks[hash]); + return s; } } - s = NULL; -found: - spin_unlock(&unix_table_lock); - return s; + spin_unlock(&unix_table_locks[hash]); + return NULL; } /* Support code for asymmetrically connected dgram sockets @@ -522,9 +565,7 @@ static void unix_sock_destructor(struct sock *sk) unix_release_addr(u->addr); atomic_long_dec(&unix_nr_socks); - local_bh_disable(); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); - local_bh_enable(); #ifdef UNIX_REFCNT_DEBUG pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk, atomic_long_read(&unix_nr_socks)); @@ -872,6 +913,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, sock_init_data(sock, sk); + sk->sk_hash = unix_unbound_hash(sk); sk->sk_allocation = GFP_KERNEL_ACCOUNT; sk->sk_write_space = unix_write_space; sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen; @@ -887,11 +929,9 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, init_waitqueue_head(&u->peer_wait); init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay); memset(&u->scm_stat, 0, sizeof(struct scm_stat)); - unix_insert_socket(unix_sockets_unbound(sk), sk); + unix_insert_unbound_socket(sk); - local_bh_disable(); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); - local_bh_enable(); return sk; @@ -952,15 +992,90 @@ static int unix_release(struct socket *sock) return 0; } -static int unix_autobind(struct socket *sock) +static struct sock *unix_find_bsd(struct net *net, struct sockaddr_un *sunaddr, + int addr_len, int type) { - struct sock *sk = sock->sk; - struct net *net = sock_net(sk); + struct inode *inode; + struct path path; + struct sock *sk; + int err; + + unix_mkname_bsd(sunaddr, addr_len); + err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path); + if (err) + goto fail; + + err = path_permission(&path, MAY_WRITE); + if (err) + goto path_put; + + err = -ECONNREFUSED; + inode = d_backing_inode(path.dentry); + if (!S_ISSOCK(inode->i_mode)) + goto path_put; + + sk = unix_find_socket_byinode(inode); + if (!sk) + goto path_put; + + err = -EPROTOTYPE; + if (sk->sk_type == type) + touch_atime(&path); + else + goto sock_put; + + path_put(&path); + + return sk; + +sock_put: + sock_put(sk); +path_put: + path_put(&path); +fail: + return ERR_PTR(err); +} + +static struct sock *unix_find_abstract(struct net *net, + struct sockaddr_un *sunaddr, + int addr_len, int type) +{ + unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type); + struct dentry *dentry; + struct sock *sk; + + sk = unix_find_socket_byname(net, sunaddr, addr_len, hash); + if (!sk) + return ERR_PTR(-ECONNREFUSED); + + dentry = unix_sk(sk)->path.dentry; + if (dentry) + touch_atime(&unix_sk(sk)->path); + + return sk; +} + +static struct sock *unix_find_other(struct net *net, + struct sockaddr_un *sunaddr, + int addr_len, int type) +{ + struct sock *sk; + + if (sunaddr->sun_path[0]) + sk = unix_find_bsd(net, sunaddr, addr_len, type); + else + sk = unix_find_abstract(net, sunaddr, addr_len, type); + + return sk; +} + +static int unix_autobind(struct sock *sk) +{ + unsigned int new_hash, old_hash = sk->sk_hash; struct unix_sock *u = unix_sk(sk); - static u32 ordernum = 1; struct unix_address *addr; + u32 lastnum, ordernum; int err; - unsigned int retries = 0; err = mutex_lock_interruptible(&u->bindlock); if (err) @@ -970,141 +1085,103 @@ static int unix_autobind(struct socket *sock) goto out; err = -ENOMEM; - addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL); + addr = kzalloc(sizeof(*addr) + + offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL); if (!addr) goto out; + addr->len = offsetof(struct sockaddr_un, sun_path) + 6; addr->name->sun_family = AF_UNIX; refcount_set(&addr->refcnt, 1); + ordernum = prandom_u32(); + lastnum = ordernum & 0xFFFFF; retry: - addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short); - addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0)); - addr->hash ^= sk->sk_type; + ordernum = (ordernum + 1) & 0xFFFFF; + sprintf(addr->name->sun_path + 1, "%05x", ordernum); - spin_lock(&unix_table_lock); - ordernum = (ordernum+1)&0xFFFFF; + new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type); + unix_table_double_lock(old_hash, new_hash); - if (__unix_find_socket_byname(net, addr->name, addr->len, addr->hash)) { - spin_unlock(&unix_table_lock); - /* - * __unix_find_socket_byname() may take long time if many names + if (__unix_find_socket_byname(sock_net(sk), addr->name, addr->len, + new_hash)) { + unix_table_double_unlock(old_hash, new_hash); + + /* __unix_find_socket_byname() may take long time if many names * are already in use. */ cond_resched(); - /* Give up if all names seems to be in use. */ - if (retries++ == 0xFFFFF) { + + if (ordernum == lastnum) { + /* Give up if all names seems to be in use. */ err = -ENOSPC; - kfree(addr); + unix_release_addr(addr); goto out; } + goto retry; } - __unix_set_addr(sk, addr, addr->hash); - spin_unlock(&unix_table_lock); + __unix_set_addr_hash(sk, addr, new_hash); + unix_table_double_unlock(old_hash, new_hash); err = 0; out: mutex_unlock(&u->bindlock); return err; } -static struct sock *unix_find_other(struct net *net, - struct sockaddr_un *sunname, int len, - int type, unsigned int hash, int *error) -{ - struct sock *u; - struct path path; - int err = 0; - - if (sunname->sun_path[0]) { - struct inode *inode; - err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path); - if (err) - goto fail; - inode = d_backing_inode(path.dentry); - err = path_permission(&path, MAY_WRITE); - if (err) - goto put_fail; - - err = -ECONNREFUSED; - if (!S_ISSOCK(inode->i_mode)) - goto put_fail; - u = unix_find_socket_byinode(inode); - if (!u) - goto put_fail; - - if (u->sk_type == type) - touch_atime(&path); - - path_put(&path); - - err = -EPROTOTYPE; - if (u->sk_type != type) { - sock_put(u); - goto fail; - } - } else { - err = -ECONNREFUSED; - u = unix_find_socket_byname(net, sunname, len, type ^ hash); - if (u) { - struct dentry *dentry; - dentry = unix_sk(u)->path.dentry; - if (dentry) - touch_atime(&unix_sk(u)->path); - } else - goto fail; - } - return u; - -put_fail: - path_put(&path); -fail: - *error = err; - return NULL; -} - -static int unix_bind_bsd(struct sock *sk, struct unix_address *addr) +static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr, + int addr_len) { - struct unix_sock *u = unix_sk(sk); umode_t mode = S_IFSOCK | (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask()); + unsigned int new_hash, old_hash = sk->sk_hash; + struct unix_sock *u = unix_sk(sk); struct user_namespace *ns; // barf... - struct path parent; + struct unix_address *addr; struct dentry *dentry; - unsigned int hash; + struct path parent; int err; + unix_mkname_bsd(sunaddr, addr_len); + addr_len = strlen(sunaddr->sun_path) + + offsetof(struct sockaddr_un, sun_path) + 1; + + addr = unix_create_addr(sunaddr, addr_len); + if (!addr) + return -ENOMEM; + /* * Get the parent directory, calculate the hash for last * component. */ dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); - ns = mnt_user_ns(parent.mnt); + if (IS_ERR(dentry)) { + err = PTR_ERR(dentry); + goto out; + } /* * All right, let's create it. */ + ns = mnt_user_ns(parent.mnt); err = security_path_mknod(&parent, dentry, mode, 0); if (!err) err = vfs_mknod(ns, d_inode(parent.dentry), dentry, mode, 0); if (err) - goto out; + goto out_path; err = mutex_lock_interruptible(&u->bindlock); if (err) goto out_unlink; if (u->addr) goto out_unlock; - addr->hash = UNIX_HASH_SIZE; - hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1); - spin_lock(&unix_table_lock); + new_hash = unix_bsd_hash(d_backing_inode(dentry)); + unix_table_double_lock(old_hash, new_hash); u->path.mnt = mntget(parent.mnt); u->path.dentry = dget(dentry); - __unix_set_addr(sk, addr, hash); - spin_unlock(&unix_table_lock); + __unix_set_addr_hash(sk, addr, new_hash); + unix_table_double_unlock(old_hash, new_hash); mutex_unlock(&u->bindlock); done_path_create(&parent, dentry); return 0; @@ -1115,74 +1192,76 @@ out_unlock: out_unlink: /* failed after successful mknod? unlink what we'd created... */ vfs_unlink(ns, d_inode(parent.dentry), dentry, NULL); -out: +out_path: done_path_create(&parent, dentry); - return err; +out: + unix_release_addr(addr); + return err == -EEXIST ? -EADDRINUSE : err; } -static int unix_bind_abstract(struct sock *sk, struct unix_address *addr) +static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr, + int addr_len) { + unsigned int new_hash, old_hash = sk->sk_hash; struct unix_sock *u = unix_sk(sk); + struct unix_address *addr; int err; + addr = unix_create_addr(sunaddr, addr_len); + if (!addr) + return -ENOMEM; + err = mutex_lock_interruptible(&u->bindlock); if (err) - return err; + goto out; if (u->addr) { - mutex_unlock(&u->bindlock); - return -EINVAL; + err = -EINVAL; + goto out_mutex; } - spin_lock(&unix_table_lock); + new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type); + unix_table_double_lock(old_hash, new_hash); + if (__unix_find_socket_byname(sock_net(sk), addr->name, addr->len, - addr->hash)) { - spin_unlock(&unix_table_lock); - mutex_unlock(&u->bindlock); - return -EADDRINUSE; - } - __unix_set_addr(sk, addr, addr->hash); - spin_unlock(&unix_table_lock); + new_hash)) + goto out_spin; + + __unix_set_addr_hash(sk, addr, new_hash); + unix_table_double_unlock(old_hash, new_hash); mutex_unlock(&u->bindlock); return 0; + +out_spin: + unix_table_double_unlock(old_hash, new_hash); + err = -EADDRINUSE; +out_mutex: + mutex_unlock(&u->bindlock); +out: + unix_release_addr(addr); + return err; } static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { - struct sock *sk = sock->sk; struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; - char *sun_path = sunaddr->sun_path; + struct sock *sk = sock->sk; int err; - unsigned int hash; - struct unix_address *addr; - - if (addr_len < offsetofend(struct sockaddr_un, sun_family) || - sunaddr->sun_family != AF_UNIX) - return -EINVAL; - if (addr_len == sizeof(short)) - return unix_autobind(sock); + if (addr_len == offsetof(struct sockaddr_un, sun_path) && + sunaddr->sun_family == AF_UNIX) + return unix_autobind(sk); - err = unix_mkname(sunaddr, addr_len, &hash); - if (err < 0) + err = unix_validate_addr(sunaddr, addr_len); + if (err) return err; - addr_len = err; - addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL); - if (!addr) - return -ENOMEM; - memcpy(addr->name, sunaddr, addr_len); - addr->len = addr_len; - addr->hash = hash ^ sk->sk_type; - refcount_set(&addr->refcnt, 1); - - if (sun_path[0]) - err = unix_bind_bsd(sk, addr); + if (sunaddr->sun_path[0]) + err = unix_bind_bsd(sk, sunaddr, addr_len); else - err = unix_bind_abstract(sk, addr); - if (err) - unix_release_addr(addr); - return err == -EEXIST ? -EADDRINUSE : err; + err = unix_bind_abstract(sk, sunaddr, addr_len); + + return err; } static void unix_state_double_lock(struct sock *sk1, struct sock *sk2) @@ -1217,7 +1296,6 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, struct net *net = sock_net(sk); struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr; struct sock *other; - unsigned int hash; int err; err = -EINVAL; @@ -1225,19 +1303,23 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, goto out; if (addr->sa_family != AF_UNSPEC) { - err = unix_mkname(sunaddr, alen, &hash); - if (err < 0) + err = unix_validate_addr(sunaddr, alen); + if (err) goto out; - alen = err; if (test_bit(SOCK_PASSCRED, &sock->flags) && - !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0) - goto out; + !unix_sk(sk)->addr) { + err = unix_autobind(sk); + if (err) + goto out; + } restart: - other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err); - if (!other) + other = unix_find_other(net, sunaddr, alen, sock->type); + if (IS_ERR(other)) { + err = PTR_ERR(other); goto out; + } unix_state_double_lock(sk, other); @@ -1327,19 +1409,19 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, struct sock *newsk = NULL; struct sock *other = NULL; struct sk_buff *skb = NULL; - unsigned int hash; int st; int err; long timeo; - err = unix_mkname(sunaddr, addr_len, &hash); - if (err < 0) + err = unix_validate_addr(sunaddr, addr_len); + if (err) goto out; - addr_len = err; - if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr && - (err = unix_autobind(sock)) != 0) - goto out; + if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr) { + err = unix_autobind(sk); + if (err) + goto out; + } timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); @@ -1365,9 +1447,12 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, restart: /* Find listening sock. */ - other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err); - if (!other) + other = unix_find_other(net, sunaddr, addr_len, sk->sk_type); + if (IS_ERR(other)) { + err = PTR_ERR(other); + other = NULL; goto out; + } /* Latch state of peer */ unix_state_lock(other); @@ -1455,9 +1540,9 @@ restart: * * The contents of *(otheru->addr) and otheru->path * are seen fully set up here, since we have found - * otheru in hash under unix_table_lock. Insertion + * otheru in hash under unix_table_locks. Insertion * into the hash chain we'd found it in had been done - * in an earlier critical area protected by unix_table_lock, + * in an earlier critical area protected by unix_table_locks, * the same one where we'd set *(otheru->addr) contents, * as well as otheru->path and otheru->addr itself. * @@ -1604,7 +1689,7 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer) if (!addr) { sunaddr->sun_family = AF_UNIX; sunaddr->sun_path[0] = 0; - err = sizeof(short); + err = offsetof(struct sockaddr_un, sun_path); } else { err = addr->len; memcpy(sunaddr, addr->name, addr->len); @@ -1760,9 +1845,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, struct unix_sock *u = unix_sk(sk); DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name); struct sock *other = NULL; - int namelen = 0; /* fake GCC */ int err; - unsigned int hash; struct sk_buff *skb; long timeo; struct scm_cookie scm; @@ -1779,10 +1862,9 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, goto out; if (msg->msg_namelen) { - err = unix_mkname(sunaddr, msg->msg_namelen, &hash); - if (err < 0) + err = unix_validate_addr(sunaddr, msg->msg_namelen); + if (err) goto out; - namelen = err; } else { sunaddr = NULL; err = -ENOTCONN; @@ -1791,9 +1873,11 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, goto out; } - if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr - && (err = unix_autobind(sock)) != 0) - goto out; + if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr) { + err = unix_autobind(sk); + if (err) + goto out; + } err = -EMSGSIZE; if (len > sk->sk_sndbuf - 32) @@ -1833,10 +1917,13 @@ restart: if (sunaddr == NULL) goto out_free; - other = unix_find_other(net, sunaddr, namelen, sk->sk_type, - hash, &err); - if (other == NULL) + other = unix_find_other(net, sunaddr, msg->msg_namelen, + sk->sk_type); + if (IS_ERR(other)) { + err = PTR_ERR(other); + other = NULL; goto out_free; + } } if (sk_filter(other, skb) < 0) { @@ -3132,7 +3219,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock, #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1) #define get_bucket(x) ((x) >> BUCKET_SPACE) -#define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1)) +#define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1)) #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o)) static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos) @@ -3156,7 +3243,7 @@ static struct sock *unix_next_socket(struct seq_file *seq, struct sock *sk, loff_t *pos) { - unsigned long bucket; + unsigned long bucket = get_bucket(*pos); while (sk > (struct sock *)SEQ_START_TOKEN) { sk = sk_next(sk); @@ -3167,12 +3254,13 @@ static struct sock *unix_next_socket(struct seq_file *seq, } do { + spin_lock(&unix_table_locks[bucket]); sk = unix_from_bucket(seq, pos); if (sk) return sk; next_bucket: - bucket = get_bucket(*pos) + 1; + spin_unlock(&unix_table_locks[bucket++]); *pos = set_bucket_offset(bucket, 1); } while (bucket < ARRAY_SIZE(unix_socket_table)); @@ -3180,10 +3268,7 @@ next_bucket: } static void *unix_seq_start(struct seq_file *seq, loff_t *pos) - __acquires(unix_table_lock) { - spin_lock(&unix_table_lock); - if (!*pos) return SEQ_START_TOKEN; @@ -3200,9 +3285,11 @@ static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos) } static void unix_seq_stop(struct seq_file *seq, void *v) - __releases(unix_table_lock) { - spin_unlock(&unix_table_lock); + struct sock *sk = v; + + if (sk) + spin_unlock(&unix_table_locks[sk->sk_hash]); } static int unix_seq_show(struct seq_file *seq, void *v) @@ -3227,15 +3314,16 @@ static int unix_seq_show(struct seq_file *seq, void *v) (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING), sock_i_ino(s)); - if (u->addr) { // under unix_table_lock here + if (u->addr) { // under unix_table_locks here int i, len; seq_putc(seq, ' '); i = 0; - len = u->addr->len - sizeof(short); - if (!UNIX_ABSTRACT(s)) + len = u->addr->len - + offsetof(struct sockaddr_un, sun_path); + if (u->addr->name->sun_path[0]) { len--; - else { + } else { seq_putc(seq, '@'); i++; } @@ -3385,10 +3473,13 @@ static void __init bpf_iter_register(void) static int __init af_unix_init(void) { - int rc = -1; + int i, rc = -1; BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb)); + for (i = 0; i < 2 * UNIX_HASH_SIZE; i++) + spin_lock_init(&unix_table_locks[i]); + rc = proto_register(&unix_dgram_proto, 1); if (rc != 0) { pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__); diff --git a/net/unix/diag.c b/net/unix/diag.c index 7e7d7f45685a..bb0b5ea1655f 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c @@ -13,13 +13,14 @@ static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) { - /* might or might not have unix_table_lock */ + /* might or might not have unix_table_locks */ struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr); if (!addr) return 0; - return nla_put(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short), + return nla_put(nlskb, UNIX_DIAG_NAME, + addr->len - offsetof(struct sockaddr_un, sun_path), addr->name->sun_path); } @@ -203,13 +204,13 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) s_slot = cb->args[0]; num = s_num = cb->args[1]; - spin_lock(&unix_table_lock); for (slot = s_slot; slot < ARRAY_SIZE(unix_socket_table); s_num = 0, slot++) { struct sock *sk; num = 0; + spin_lock(&unix_table_locks[slot]); sk_for_each(sk, &unix_socket_table[slot]) { if (!net_eq(sock_net(sk), net)) continue; @@ -220,14 +221,16 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) if (sk_diag_dump(sk, skb, req, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, - NLM_F_MULTI) < 0) + NLM_F_MULTI) < 0) { + spin_unlock(&unix_table_locks[slot]); goto done; + } next: num++; } + spin_unlock(&unix_table_locks[slot]); } done: - spin_unlock(&unix_table_lock); cb->args[0] = slot; cb->args[1] = num; @@ -236,21 +239,19 @@ done: static struct sock *unix_lookup_by_ino(unsigned int ino) { - int i; struct sock *sk; + int i; - spin_lock(&unix_table_lock); for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) { + spin_lock(&unix_table_locks[i]); sk_for_each(sk, &unix_socket_table[i]) if (ino == sock_i_ino(sk)) { sock_hold(sk); - spin_unlock(&unix_table_lock); - + spin_unlock(&unix_table_locks[i]); return sk; } + spin_unlock(&unix_table_locks[i]); } - - spin_unlock(&unix_table_lock); return NULL; } diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c index c09bea89151b..01d44e2598e2 100644 --- a/net/unix/sysctl_net_unix.c +++ b/net/unix/sysctl_net_unix.c @@ -30,10 +30,6 @@ int __net_init unix_sysctl_register(struct net *net) if (table == NULL) goto err_alloc; - /* Don't export sysctls to unprivileged users */ - if (net->user_ns != &init_user_ns) - table[0].procname = NULL; - table[0].data = &net->unx.sysctl_max_dgram_qlen; net->unx.ctl = register_net_sysctl(net, "net/unix", table); if (net->unx.ctl == NULL) diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c index 19189cf30a72..e111e13b6660 100644 --- a/net/vmw_vsock/hyperv_transport.c +++ b/net/vmw_vsock/hyperv_transport.c @@ -225,14 +225,20 @@ static size_t hvs_channel_writable_bytes(struct vmbus_channel *chan) return round_down(ret, 8); } +static int __hvs_send_data(struct vmbus_channel *chan, + struct vmpipe_proto_header *hdr, + size_t to_write) +{ + hdr->pkt_type = 1; + hdr->data_size = to_write; + return vmbus_sendpacket(chan, hdr, sizeof(*hdr) + to_write, + 0, VM_PKT_DATA_INBAND, 0); +} + static int hvs_send_data(struct vmbus_channel *chan, struct hvs_send_buf *send_buf, size_t to_write) { - send_buf->hdr.pkt_type = 1; - send_buf->hdr.data_size = to_write; - return vmbus_sendpacket(chan, &send_buf->hdr, - sizeof(send_buf->hdr) + to_write, - 0, VM_PKT_DATA_INBAND, 0); + return __hvs_send_data(chan, &send_buf->hdr, to_write); } static void hvs_channel_cb(void *ctx) @@ -468,7 +474,7 @@ static void hvs_shutdown_lock_held(struct hvsock *hvs, int mode) return; /* It can't fail: see hvs_channel_writable_bytes(). */ - (void)hvs_send_data(hvs->chan, (struct hvs_send_buf *)&hdr, 0); + (void)__hvs_send_data(hvs->chan, &hdr, 0); hvs->fin_sent = true; } diff --git a/net/wireless/chan.c b/net/wireless/chan.c index 869c43d4414c..eb822052d344 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -245,19 +245,7 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef) oper_freq - MHZ_TO_KHZ(oper_width) / 2) return false; break; - case NL80211_CHAN_WIDTH_40: - if (chandef->center_freq1 != control_freq + 10 && - chandef->center_freq1 != control_freq - 10) - return false; - if (chandef->center_freq2) - return false; - break; case NL80211_CHAN_WIDTH_80P80: - if (chandef->center_freq1 != control_freq + 30 && - chandef->center_freq1 != control_freq + 10 && - chandef->center_freq1 != control_freq - 10 && - chandef->center_freq1 != control_freq - 30) - return false; if (!chandef->center_freq2) return false; /* adjacent is not allowed -- that's a 160 MHz channel */ @@ -265,28 +253,42 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef) chandef->center_freq2 - chandef->center_freq1 == 80) return false; break; - case NL80211_CHAN_WIDTH_80: - if (chandef->center_freq1 != control_freq + 30 && - chandef->center_freq1 != control_freq + 10 && - chandef->center_freq1 != control_freq - 10 && - chandef->center_freq1 != control_freq - 30) - return false; + default: if (chandef->center_freq2) return false; break; - case NL80211_CHAN_WIDTH_160: - if (chandef->center_freq1 != control_freq + 70 && - chandef->center_freq1 != control_freq + 50 && - chandef->center_freq1 != control_freq + 30 && - chandef->center_freq1 != control_freq + 10 && - chandef->center_freq1 != control_freq - 10 && - chandef->center_freq1 != control_freq - 30 && - chandef->center_freq1 != control_freq - 50 && - chandef->center_freq1 != control_freq - 70) - return false; - if (chandef->center_freq2) - return false; + } + + switch (chandef->width) { + case NL80211_CHAN_WIDTH_5: + case NL80211_CHAN_WIDTH_10: + case NL80211_CHAN_WIDTH_20: + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_1: + case NL80211_CHAN_WIDTH_2: + case NL80211_CHAN_WIDTH_4: + case NL80211_CHAN_WIDTH_8: + case NL80211_CHAN_WIDTH_16: + /* all checked above */ break; + case NL80211_CHAN_WIDTH_160: + if (chandef->center_freq1 == control_freq + 70 || + chandef->center_freq1 == control_freq + 50 || + chandef->center_freq1 == control_freq - 50 || + chandef->center_freq1 == control_freq - 70) + break; + fallthrough; + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_80: + if (chandef->center_freq1 == control_freq + 30 || + chandef->center_freq1 == control_freq - 30) + break; + fallthrough; + case NL80211_CHAN_WIDTH_40: + if (chandef->center_freq1 == control_freq + 10 || + chandef->center_freq1 == control_freq - 10) + break; + fallthrough; default: return false; } @@ -712,6 +714,19 @@ static bool cfg80211_is_wiphy_oper_chan(struct wiphy *wiphy, return false; } +static bool +cfg80211_offchan_chain_is_active(struct cfg80211_registered_device *rdev, + struct ieee80211_channel *channel) +{ + if (!rdev->background_radar_wdev) + return false; + + if (!cfg80211_chandef_valid(&rdev->background_radar_chandef)) + return false; + + return cfg80211_is_sub_chan(&rdev->background_radar_chandef, channel); +} + bool cfg80211_any_wiphy_oper_chan(struct wiphy *wiphy, struct ieee80211_channel *chan) { @@ -728,6 +743,9 @@ bool cfg80211_any_wiphy_oper_chan(struct wiphy *wiphy, if (cfg80211_is_wiphy_oper_chan(&rdev->wiphy, chan)) return true; + + if (cfg80211_offchan_chain_is_active(rdev, chan)) + return true; } return false; diff --git a/net/wireless/core.c b/net/wireless/core.c index eb297e1015e0..3a54c8e6b6c6 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -545,6 +545,10 @@ use_default_name: INIT_WORK(&rdev->rfkill_block, cfg80211_rfkill_block_work); INIT_WORK(&rdev->conn_work, cfg80211_conn_work); INIT_WORK(&rdev->event_work, cfg80211_event_work); + INIT_WORK(&rdev->background_cac_abort_wk, + cfg80211_background_cac_abort_wk); + INIT_DELAYED_WORK(&rdev->background_cac_done_wk, + cfg80211_background_cac_done_wk); init_waitqueue_head(&rdev->dev_wait); @@ -733,6 +737,7 @@ int wiphy_register(struct wiphy *wiphy) if (wiphy->interface_modes & ~(BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT) | BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_P2P_DEVICE) | @@ -1054,11 +1059,13 @@ void wiphy_unregister(struct wiphy *wiphy) cancel_work_sync(&rdev->conn_work); flush_work(&rdev->event_work); cancel_delayed_work_sync(&rdev->dfs_update_channels_wk); + cancel_delayed_work_sync(&rdev->background_cac_done_wk); flush_work(&rdev->destroy_work); flush_work(&rdev->sched_scan_stop_wk); flush_work(&rdev->propagate_radar_detect_wk); flush_work(&rdev->propagate_cac_done_wk); flush_work(&rdev->mgmt_registrations_update_wk); + flush_work(&rdev->background_cac_abort_wk); #ifdef CONFIG_PM if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup) @@ -1207,6 +1214,8 @@ void __cfg80211_leave(struct cfg80211_registered_device *rdev, cfg80211_pmsr_wdev_down(wdev); + cfg80211_stop_background_radar_detection(wdev); + switch (wdev->iftype) { case NL80211_IFTYPE_ADHOC: __cfg80211_leave_ibss(rdev, dev, true); diff --git a/net/wireless/core.h b/net/wireless/core.h index 1720abf36f92..3a7dbd63d8c6 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -84,6 +84,11 @@ struct cfg80211_registered_device { struct delayed_work dfs_update_channels_wk; + struct wireless_dev *background_radar_wdev; + struct cfg80211_chan_def background_radar_chandef; + struct delayed_work background_cac_done_wk; + struct work_struct background_cac_abort_wk; + /* netlink port which started critical protocol (0 means not started) */ u32 crit_proto_nlportid; @@ -491,6 +496,17 @@ cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy, void cfg80211_sched_dfs_chan_update(struct cfg80211_registered_device *rdev); +int +cfg80211_start_background_radar_detection(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev, + struct cfg80211_chan_def *chandef); + +void cfg80211_stop_background_radar_detection(struct wireless_dev *wdev); + +void cfg80211_background_cac_done_wk(struct work_struct *work); + +void cfg80211_background_cac_abort_wk(struct work_struct *work); + bool cfg80211_any_wiphy_oper_chan(struct wiphy *wiphy, struct ieee80211_channel *chan); diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 783acd2c4211..c8155a483ec2 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -905,13 +905,13 @@ void cfg80211_dfs_channels_update_work(struct work_struct *work) } -void cfg80211_radar_event(struct wiphy *wiphy, - struct cfg80211_chan_def *chandef, - gfp_t gfp) +void __cfg80211_radar_event(struct wiphy *wiphy, + struct cfg80211_chan_def *chandef, + bool offchan, gfp_t gfp) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); - trace_cfg80211_radar_event(wiphy, chandef); + trace_cfg80211_radar_event(wiphy, chandef, offchan); /* only set the chandef supplied channel to unavailable, in * case the radar is detected on only one of multiple channels @@ -919,6 +919,9 @@ void cfg80211_radar_event(struct wiphy *wiphy, */ cfg80211_set_dfs_state(wiphy, chandef, NL80211_DFS_UNAVAILABLE); + if (offchan) + queue_work(cfg80211_wq, &rdev->background_cac_abort_wk); + cfg80211_sched_dfs_chan_update(rdev); nl80211_radar_notify(rdev, chandef, NL80211_RADAR_DETECTED, NULL, gfp); @@ -926,7 +929,7 @@ void cfg80211_radar_event(struct wiphy *wiphy, memcpy(&rdev->radar_chandef, chandef, sizeof(struct cfg80211_chan_def)); queue_work(cfg80211_wq, &rdev->propagate_radar_detect_wk); } -EXPORT_SYMBOL(cfg80211_radar_event); +EXPORT_SYMBOL(__cfg80211_radar_event); void cfg80211_cac_event(struct net_device *netdev, const struct cfg80211_chan_def *chandef, @@ -970,3 +973,143 @@ void cfg80211_cac_event(struct net_device *netdev, nl80211_radar_notify(rdev, chandef, event, netdev, gfp); } EXPORT_SYMBOL(cfg80211_cac_event); + +static void +__cfg80211_background_cac_event(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev, + const struct cfg80211_chan_def *chandef, + enum nl80211_radar_event event) +{ + struct wiphy *wiphy = &rdev->wiphy; + struct net_device *netdev; + + lockdep_assert_wiphy(&rdev->wiphy); + + if (!cfg80211_chandef_valid(chandef)) + return; + + if (!rdev->background_radar_wdev) + return; + + switch (event) { + case NL80211_RADAR_CAC_FINISHED: + cfg80211_set_dfs_state(wiphy, chandef, NL80211_DFS_AVAILABLE); + memcpy(&rdev->cac_done_chandef, chandef, sizeof(*chandef)); + queue_work(cfg80211_wq, &rdev->propagate_cac_done_wk); + cfg80211_sched_dfs_chan_update(rdev); + wdev = rdev->background_radar_wdev; + break; + case NL80211_RADAR_CAC_ABORTED: + if (!cancel_delayed_work(&rdev->background_cac_done_wk)) + return; + wdev = rdev->background_radar_wdev; + break; + case NL80211_RADAR_CAC_STARTED: + break; + default: + return; + } + + netdev = wdev ? wdev->netdev : NULL; + nl80211_radar_notify(rdev, chandef, event, netdev, GFP_KERNEL); +} + +static void +cfg80211_background_cac_event(struct cfg80211_registered_device *rdev, + const struct cfg80211_chan_def *chandef, + enum nl80211_radar_event event) +{ + wiphy_lock(&rdev->wiphy); + __cfg80211_background_cac_event(rdev, rdev->background_radar_wdev, + chandef, event); + wiphy_unlock(&rdev->wiphy); +} + +void cfg80211_background_cac_done_wk(struct work_struct *work) +{ + struct delayed_work *delayed_work = to_delayed_work(work); + struct cfg80211_registered_device *rdev; + + rdev = container_of(delayed_work, struct cfg80211_registered_device, + background_cac_done_wk); + cfg80211_background_cac_event(rdev, &rdev->background_radar_chandef, + NL80211_RADAR_CAC_FINISHED); +} + +void cfg80211_background_cac_abort_wk(struct work_struct *work) +{ + struct cfg80211_registered_device *rdev; + + rdev = container_of(work, struct cfg80211_registered_device, + background_cac_abort_wk); + cfg80211_background_cac_event(rdev, &rdev->background_radar_chandef, + NL80211_RADAR_CAC_ABORTED); +} + +void cfg80211_background_cac_abort(struct wiphy *wiphy) +{ + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); + + queue_work(cfg80211_wq, &rdev->background_cac_abort_wk); +} +EXPORT_SYMBOL(cfg80211_background_cac_abort); + +int +cfg80211_start_background_radar_detection(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev, + struct cfg80211_chan_def *chandef) +{ + unsigned int cac_time_ms; + int err; + + lockdep_assert_wiphy(&rdev->wiphy); + + if (!wiphy_ext_feature_isset(&rdev->wiphy, + NL80211_EXT_FEATURE_RADAR_BACKGROUND)) + return -EOPNOTSUPP; + + /* Offchannel chain already locked by another wdev */ + if (rdev->background_radar_wdev && rdev->background_radar_wdev != wdev) + return -EBUSY; + + /* CAC already in progress on the offchannel chain */ + if (rdev->background_radar_wdev == wdev && + delayed_work_pending(&rdev->background_cac_done_wk)) + return -EBUSY; + + err = rdev_set_radar_background(rdev, chandef); + if (err) + return err; + + cac_time_ms = cfg80211_chandef_dfs_cac_time(&rdev->wiphy, chandef); + if (!cac_time_ms) + cac_time_ms = IEEE80211_DFS_MIN_CAC_TIME_MS; + + rdev->background_radar_chandef = *chandef; + rdev->background_radar_wdev = wdev; /* Get offchain ownership */ + + __cfg80211_background_cac_event(rdev, wdev, chandef, + NL80211_RADAR_CAC_STARTED); + queue_delayed_work(cfg80211_wq, &rdev->background_cac_done_wk, + msecs_to_jiffies(cac_time_ms)); + + return 0; +} + +void cfg80211_stop_background_radar_detection(struct wireless_dev *wdev) +{ + struct wiphy *wiphy = wdev->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); + + lockdep_assert_wiphy(wiphy); + + if (wdev != rdev->background_radar_wdev) + return; + + rdev_set_radar_background(rdev, NULL); + rdev->background_radar_wdev = NULL; /* Release offchain ownership */ + + __cfg80211_background_cac_event(rdev, wdev, + &rdev->background_radar_chandef, + NL80211_RADAR_CAC_ABORTED); +} diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index a27b3b5fa210..578bff9c378b 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -776,6 +776,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_MBSSID_CONFIG] = NLA_POLICY_NESTED(nl80211_mbssid_config_policy), [NL80211_ATTR_MBSSID_ELEMS] = { .type = NLA_NESTED }, + [NL80211_ATTR_RADAR_BACKGROUND] = { .type = NLA_FLAG }, + [NL80211_ATTR_AP_SETTINGS_FLAGS] = { .type = NLA_U32 }, }; /* policy for the key attributes */ @@ -3669,14 +3671,16 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_ADHOC: { - const u8 *ssid_ie; + const struct element *ssid_elem; + if (!wdev->current_bss) break; rcu_read_lock(); - ssid_ie = ieee80211_bss_get_ie(&wdev->current_bss->pub, - WLAN_EID_SSID); - if (ssid_ie && - nla_put(msg, NL80211_ATTR_SSID, ssid_ie[1], ssid_ie + 2)) + ssid_elem = ieee80211_bss_get_elem(&wdev->current_bss->pub, + WLAN_EID_SSID); + if (ssid_elem && + nla_put(msg, NL80211_ATTR_SSID, ssid_elem->datalen, + ssid_elem->data)) goto nla_put_failure_rcu_locked; rcu_read_unlock(); break; @@ -5711,8 +5715,11 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) nl80211_calculate_ap_params(params); - if (info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT]) - params->flags |= AP_SETTINGS_EXTERNAL_AUTH_SUPPORT; + if (info->attrs[NL80211_ATTR_AP_SETTINGS_FLAGS]) + params->flags = nla_get_u32( + info->attrs[NL80211_ATTR_AP_SETTINGS_FLAGS]); + else if (info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT]) + params->flags |= NL80211_AP_SETTINGS_EXTERNAL_AUTH_SUPPORT; wdev_lock(wdev); err = rdev_start_ap(rdev, dev, params); @@ -9274,38 +9281,60 @@ static int nl80211_start_radar_detection(struct sk_buff *skb, struct cfg80211_chan_def chandef; enum nl80211_dfs_regions dfs_region; unsigned int cac_time_ms; - int err; + int err = -EINVAL; + + flush_delayed_work(&rdev->dfs_update_channels_wk); + + wiphy_lock(wiphy); dfs_region = reg_get_dfs_region(wiphy); if (dfs_region == NL80211_DFS_UNSET) - return -EINVAL; + goto unlock; err = nl80211_parse_chandef(rdev, info, &chandef); if (err) - return err; - - if (netif_carrier_ok(dev)) - return -EBUSY; - - if (wdev->cac_started) - return -EBUSY; + goto unlock; err = cfg80211_chandef_dfs_required(wiphy, &chandef, wdev->iftype); if (err < 0) - return err; + goto unlock; - if (err == 0) - return -EINVAL; + if (err == 0) { + err = -EINVAL; + goto unlock; + } - if (!cfg80211_chandef_dfs_usable(wiphy, &chandef)) - return -EINVAL; + if (!cfg80211_chandef_dfs_usable(wiphy, &chandef)) { + err = -EINVAL; + goto unlock; + } + + if (nla_get_flag(info->attrs[NL80211_ATTR_RADAR_BACKGROUND])) { + err = cfg80211_start_background_radar_detection(rdev, wdev, + &chandef); + goto unlock; + } + + if (netif_carrier_ok(dev)) { + err = -EBUSY; + goto unlock; + } + + if (wdev->cac_started) { + err = -EBUSY; + goto unlock; + } /* CAC start is offloaded to HW and can't be started manually */ - if (wiphy_ext_feature_isset(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD)) - return -EOPNOTSUPP; + if (wiphy_ext_feature_isset(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD)) { + err = -EOPNOTSUPP; + goto unlock; + } - if (!rdev->ops->start_radar_detection) - return -EOPNOTSUPP; + if (!rdev->ops->start_radar_detection) { + err = -EOPNOTSUPP; + goto unlock; + } cac_time_ms = cfg80211_chandef_dfs_cac_time(&rdev->wiphy, &chandef); if (WARN_ON(!cac_time_ms)) @@ -9318,6 +9347,9 @@ static int nl80211_start_radar_detection(struct sk_buff *skb, wdev->cac_start_time = jiffies; wdev->cac_time_ms = cac_time_ms; } +unlock: + wiphy_unlock(wiphy); + return err; } @@ -15954,7 +15986,8 @@ static const struct genl_small_ops nl80211_small_ops[] = { .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_start_radar_detection, .flags = GENL_UNS_ADMIN_PERM, - .internal_flags = NL80211_FLAG_NEED_NETDEV_UP, + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | + NL80211_FLAG_NO_WIPHY_MTX, }, { .cmd = NL80211_CMD_GET_PROTOCOL_FEATURES, @@ -17035,6 +17068,44 @@ static void nl80211_send_remain_on_chan_event( nlmsg_free(msg); } +void cfg80211_assoc_comeback(struct net_device *netdev, + struct cfg80211_bss *bss, u32 timeout) +{ + struct wireless_dev *wdev = netdev->ieee80211_ptr; + struct wiphy *wiphy = wdev->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); + struct sk_buff *msg; + void *hdr; + + trace_cfg80211_assoc_comeback(wdev, bss->bssid, timeout); + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return; + + hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ASSOC_COMEBACK); + if (!hdr) { + nlmsg_free(msg); + return; + } + + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bss->bssid) || + nla_put_u32(msg, NL80211_ATTR_TIMEOUT, timeout)) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, GFP_KERNEL); + return; + + nla_put_failure: + nlmsg_free(msg); +} +EXPORT_SYMBOL(cfg80211_assoc_comeback); + void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie, struct ieee80211_channel *chan, unsigned int duration, gfp_t gfp) diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index cc1efec4b27b..439bcf52369c 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -1395,4 +1395,21 @@ rdev_set_fils_aad(struct cfg80211_registered_device *rdev, return ret; } +static inline int +rdev_set_radar_background(struct cfg80211_registered_device *rdev, + struct cfg80211_chan_def *chandef) +{ + struct wiphy *wiphy = &rdev->wiphy; + int ret; + + if (!rdev->ops->set_radar_background) + return -EOPNOTSUPP; + + trace_rdev_set_radar_background(wiphy, chandef); + ret = rdev->ops->set_radar_background(wiphy, chandef); + trace_rdev_return_int(wiphy, ret); + + return ret; +} + #endif /* __CFG80211_RDEV_OPS */ diff --git a/net/wireless/reg.c b/net/wireless/reg.c index f8f01a3e020b..ec25924a1c26 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -2371,6 +2371,7 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev) switch (iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: + case NL80211_IFTYPE_MESH_POINT: if (!wdev->beacon_interval) goto wdev_inactive_unlock; chandef = wdev->chandef; @@ -2409,6 +2410,7 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev) case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: wiphy_lock(wiphy); ret = cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype); wiphy_unlock(wiphy); diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 22e92be61938..b888522f133b 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -406,22 +406,20 @@ static int cfg80211_add_nontrans_list(struct cfg80211_bss *trans_bss, struct cfg80211_bss *nontrans_bss) { - const u8 *ssid; - size_t ssid_len; + const struct element *ssid_elem; struct cfg80211_bss *bss = NULL; rcu_read_lock(); - ssid = ieee80211_bss_get_ie(nontrans_bss, WLAN_EID_SSID); - if (!ssid) { + ssid_elem = ieee80211_bss_get_elem(nontrans_bss, WLAN_EID_SSID); + if (!ssid_elem) { rcu_read_unlock(); return -EINVAL; } - ssid_len = ssid[1]; - ssid = ssid + 2; /* check if nontrans_bss is in the list */ list_for_each_entry(bss, &trans_bss->nontrans_list, nontrans_list) { - if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len)) { + if (is_bss(bss, nontrans_bss->bssid, ssid_elem->data, + ssid_elem->datalen)) { rcu_read_unlock(); return 0; } @@ -1795,33 +1793,52 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, } int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen, - enum nl80211_band band) + enum nl80211_band band, + enum cfg80211_bss_frame_type ftype) { - const u8 *tmp; - int channel_number = -1; + const struct element *tmp; + + if (band == NL80211_BAND_6GHZ) { + struct ieee80211_he_operation *he_oper; - if (band == NL80211_BAND_S1GHZ) { - tmp = cfg80211_find_ie(WLAN_EID_S1G_OPERATION, ie, ielen); - if (tmp && tmp[1] >= sizeof(struct ieee80211_s1g_oper_ie)) { - struct ieee80211_s1g_oper_ie *s1gop = (void *)(tmp + 2); + tmp = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, + ielen); + if (tmp && tmp->datalen >= sizeof(*he_oper) && + tmp->datalen >= ieee80211_he_oper_size(&tmp->data[1])) { + const struct ieee80211_he_6ghz_oper *he_6ghz_oper; + + he_oper = (void *)&tmp->data[1]; + + he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper); + if (!he_6ghz_oper) + return -1; - channel_number = s1gop->primary_ch; + if (ftype != CFG80211_BSS_FTYPE_BEACON || + he_6ghz_oper->control & IEEE80211_HE_6GHZ_OPER_CTRL_DUP_BEACON) + return he_6ghz_oper->primary; + } + } else if (band == NL80211_BAND_S1GHZ) { + tmp = cfg80211_find_elem(WLAN_EID_S1G_OPERATION, ie, ielen); + if (tmp && tmp->datalen >= sizeof(struct ieee80211_s1g_oper_ie)) { + struct ieee80211_s1g_oper_ie *s1gop = (void *)tmp->data; + + return s1gop->primary_ch; } } else { - tmp = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ie, ielen); - if (tmp && tmp[1] == 1) { - channel_number = tmp[2]; - } else { - tmp = cfg80211_find_ie(WLAN_EID_HT_OPERATION, ie, ielen); - if (tmp && tmp[1] >= sizeof(struct ieee80211_ht_operation)) { - struct ieee80211_ht_operation *htop = (void *)(tmp + 2); + tmp = cfg80211_find_elem(WLAN_EID_DS_PARAMS, ie, ielen); + if (tmp && tmp->datalen == 1) + return tmp->data[0]; - channel_number = htop->primary_chan; - } + tmp = cfg80211_find_elem(WLAN_EID_HT_OPERATION, ie, ielen); + if (tmp && + tmp->datalen >= sizeof(struct ieee80211_ht_operation)) { + struct ieee80211_ht_operation *htop = (void *)tmp->data; + + return htop->primary_chan; } } - return channel_number; + return -1; } EXPORT_SYMBOL(cfg80211_get_ies_channel_number); @@ -1831,18 +1848,20 @@ EXPORT_SYMBOL(cfg80211_get_ies_channel_number); * from neighboring channels and the Beacon frames use the DSSS Parameter Set * element to indicate the current (transmitting) channel, but this might also * be needed on other bands if RX frequency does not match with the actual - * operating channel of a BSS. + * operating channel of a BSS, or if the AP reports a different primary channel. */ static struct ieee80211_channel * cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen, struct ieee80211_channel *channel, - enum nl80211_bss_scan_width scan_width) + enum nl80211_bss_scan_width scan_width, + enum cfg80211_bss_frame_type ftype) { u32 freq; int channel_number; struct ieee80211_channel *alt_channel; - channel_number = cfg80211_get_ies_channel_number(ie, ielen, channel->band); + channel_number = cfg80211_get_ies_channel_number(ie, ielen, + channel->band, ftype); if (channel_number < 0) { /* No channel information in frame payload */ @@ -1850,6 +1869,16 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen, } freq = ieee80211_channel_to_freq_khz(channel_number, channel->band); + + /* + * In 6GHz, duplicated beacon indication is relevant for + * beacons only. + */ + if (channel->band == NL80211_BAND_6GHZ && + (freq == channel->center_freq || + abs(freq - channel->center_freq) > 80)) + return channel; + alt_channel = ieee80211_get_channel_khz(wiphy, freq); if (!alt_channel) { if (channel->band == NL80211_BAND_2GHZ) { @@ -1911,7 +1940,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, return NULL; channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan, - data->scan_width); + data->scan_width, ftype); if (!channel) return NULL; @@ -2234,7 +2263,8 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy, struct ieee80211_mgmt *mgmt, size_t len) { u8 *ie, *new_ie, *pos; - const u8 *nontrans_ssid, *trans_ssid, *mbssid; + const struct element *nontrans_ssid; + const u8 *trans_ssid, *mbssid; size_t ielen = len - offsetof(struct ieee80211_mgmt, u.probe_resp.variable); size_t new_ie_len; @@ -2261,11 +2291,11 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy, return; new_ie_len -= mbssid[1]; - nontrans_ssid = ieee80211_bss_get_ie(nontrans_bss, WLAN_EID_SSID); + nontrans_ssid = ieee80211_bss_get_elem(nontrans_bss, WLAN_EID_SSID); if (!nontrans_ssid) return; - new_ie_len += nontrans_ssid[1]; + new_ie_len += nontrans_ssid->datalen; /* generate new ie for nontrans BSS * 1. replace SSID with nontrans BSS' SSID @@ -2282,7 +2312,7 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy, pos = new_ie; /* copy the nontransmitted SSID */ - cpy_len = nontrans_ssid[1] + 2; + cpy_len = nontrans_ssid->datalen + 2; memcpy(pos, nontrans_ssid, cpy_len); pos += cpy_len; /* copy the IEs between SSID and MBSSID */ @@ -2333,6 +2363,7 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy, size_t ielen, min_hdr_len = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); int bss_type; + enum cfg80211_bss_frame_type ftype; BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) != offsetof(struct ieee80211_mgmt, u.beacon.variable)); @@ -2369,8 +2400,16 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy, variable = ext->u.s1g_beacon.variable; } + if (ieee80211_is_beacon(mgmt->frame_control)) + ftype = CFG80211_BSS_FTYPE_BEACON; + else if (ieee80211_is_probe_resp(mgmt->frame_control)) + ftype = CFG80211_BSS_FTYPE_PRESP; + else + ftype = CFG80211_BSS_FTYPE_UNKNOWN; + channel = cfg80211_get_bss_channel(wiphy, variable, - ielen, data->chan, data->scan_width); + ielen, data->chan, data->scan_width, + ftype); if (!channel) return NULL; @@ -2687,7 +2726,7 @@ int cfg80211_wext_siwscan(struct net_device *dev, struct cfg80211_registered_device *rdev; struct wiphy *wiphy; struct iw_scan_req *wreq = NULL; - struct cfg80211_scan_request *creq = NULL; + struct cfg80211_scan_request *creq; int i, err, n_channels = 0; enum nl80211_band band; @@ -2702,10 +2741,8 @@ int cfg80211_wext_siwscan(struct net_device *dev, if (IS_ERR(rdev)) return PTR_ERR(rdev); - if (rdev->scan_req || rdev->scan_msg) { - err = -EBUSY; - goto out; - } + if (rdev->scan_req || rdev->scan_msg) + return -EBUSY; wiphy = &rdev->wiphy; @@ -2718,10 +2755,8 @@ int cfg80211_wext_siwscan(struct net_device *dev, creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) + n_channels * sizeof(void *), GFP_ATOMIC); - if (!creq) { - err = -ENOMEM; - goto out; - } + if (!creq) + return -ENOMEM; creq->wiphy = wiphy; creq->wdev = dev->ieee80211_ptr; diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 08a70b4f090c..ff4d48fcbfb2 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -680,7 +680,9 @@ void __cfg80211_connect_result(struct net_device *dev, bool wextev) { struct wireless_dev *wdev = dev->ieee80211_ptr; - const u8 *country_ie; + const struct element *country_elem; + const u8 *country_data; + u8 country_datalen; #ifdef CONFIG_CFG80211_WEXT union iwreq_data wrqu; #endif @@ -762,26 +764,22 @@ void __cfg80211_connect_result(struct net_device *dev, cfg80211_upload_connect_keys(wdev); rcu_read_lock(); - country_ie = ieee80211_bss_get_ie(cr->bss, WLAN_EID_COUNTRY); - if (!country_ie) { + country_elem = ieee80211_bss_get_elem(cr->bss, WLAN_EID_COUNTRY); + if (!country_elem) { rcu_read_unlock(); return; } - country_ie = kmemdup(country_ie, 2 + country_ie[1], GFP_ATOMIC); + country_datalen = country_elem->datalen; + country_data = kmemdup(country_elem->data, country_datalen, GFP_ATOMIC); rcu_read_unlock(); - if (!country_ie) + if (!country_data) return; - /* - * ieee80211_bss_get_ie() ensures we can access: - * - country_ie + 2, the start of the country ie data, and - * - and country_ie[1] which is the IE length - */ regulatory_hint_country_ie(wdev->wiphy, cr->bss->channel->band, - country_ie + 2, country_ie[1]); - kfree(country_ie); + country_data, country_datalen); + kfree(country_data); } /* Consumes bss object one way or another */ diff --git a/net/wireless/trace.h b/net/wireless/trace.h index ad6c16a06bcb..228079d7690a 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -3053,18 +3053,21 @@ TRACE_EVENT(cfg80211_ch_switch_started_notify, ); TRACE_EVENT(cfg80211_radar_event, - TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef), - TP_ARGS(wiphy, chandef), + TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, + bool offchan), + TP_ARGS(wiphy, chandef, offchan), TP_STRUCT__entry( WIPHY_ENTRY CHAN_DEF_ENTRY + __field(bool, offchan) ), TP_fast_assign( WIPHY_ASSIGN; CHAN_DEF_ASSIGN(chandef); + __entry->offchan = offchan; ), - TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT, - WIPHY_PR_ARG, CHAN_DEF_PR_ARG) + TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", offchan %d", + WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->offchan) ); TRACE_EVENT(cfg80211_cac_event, @@ -3674,6 +3677,42 @@ TRACE_EVENT(cfg80211_bss_color_notify, __entry->color_bitmap) ); +TRACE_EVENT(rdev_set_radar_background, + TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef), + + TP_ARGS(wiphy, chandef), + + TP_STRUCT__entry( + WIPHY_ENTRY + CHAN_DEF_ENTRY + ), + + TP_fast_assign( + WIPHY_ASSIGN; + CHAN_DEF_ASSIGN(chandef) + ), + + TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT, + WIPHY_PR_ARG, CHAN_DEF_PR_ARG) +); + +TRACE_EVENT(cfg80211_assoc_comeback, + TP_PROTO(struct wireless_dev *wdev, const u8 *bssid, u32 timeout), + TP_ARGS(wdev, bssid, timeout), + TP_STRUCT__entry( + WDEV_ENTRY + MAC_ENTRY(bssid) + __field(u32, timeout) + ), + TP_fast_assign( + WDEV_ASSIGN; + MAC_ASSIGN(bssid, bssid); + __entry->timeout = timeout; + ), + TP_printk(WDEV_PR_FMT ", " MAC_PR_FMT ", timeout: %u TUs", + WDEV_PR_ARG, MAC_PR_ARG(bssid), __entry->timeout) +); + #endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */ #undef TRACE_INCLUDE_PATH diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c index 193a18a53142..cd09a9042261 100644 --- a/net/wireless/wext-sme.c +++ b/net/wireless/wext-sme.c @@ -212,18 +212,18 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev, wdev_lock(wdev); if (wdev->current_bss) { - const u8 *ie; + const struct element *ssid_elem; rcu_read_lock(); - ie = ieee80211_bss_get_ie(&wdev->current_bss->pub, - WLAN_EID_SSID); - if (ie) { + ssid_elem = ieee80211_bss_get_elem(&wdev->current_bss->pub, + WLAN_EID_SSID); + if (ssid_elem) { data->flags = 1; - data->length = ie[1]; + data->length = ssid_elem->datalen; if (data->length > IW_ESSID_MAX_SIZE) ret = -EINVAL; else - memcpy(ssid, ie + 2, data->length); + memcpy(ssid, ssid_elem->data, data->length); } rcu_read_unlock(); } else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) { diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index e1c4197af468..b981a4828d08 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c @@ -41,7 +41,7 @@ static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more) return 0; } - if (!more && x25->fraglen > 0) { /* End of fragment */ + if (x25->fraglen > 0) { /* End of fragment */ int len = x25->fraglen + skb->len; if ((skbn = alloc_skb(len, GFP_ATOMIC)) == NULL){ diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 7a466ea962c5..e3d35850fdea 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -794,9 +794,7 @@ static int xsk_release(struct socket *sock) sk_del_node_init_rcu(sk); mutex_unlock(&net->xdp.lock); - local_bh_disable(); sock_prot_inuse_add(net, sk->sk_prot, -1); - local_bh_enable(); xsk_delete_from_maps(xs); mutex_lock(&xs->mutex); @@ -1396,9 +1394,7 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol, sk_add_node_rcu(sk, &net->xdp.list); mutex_unlock(&net->xdp.lock); - local_bh_disable(); sock_prot_inuse_add(net, &xsk_proto, 1); - local_bh_enable(); return 0; } diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index e843b0d9e2a6..3fa066419d37 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -259,6 +259,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, } xso->dev = dev; + netdev_tracker_alloc(dev, &xso->dev_tracker, GFP_ATOMIC); xso->real_dev = dev; xso->num_exthdrs = 1; xso->flags = xuo->flags; @@ -269,7 +270,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, xso->flags = 0; xso->dev = NULL; xso->real_dev = NULL; - dev_put(dev); + dev_put_track(dev, &xso->dev_tracker); if (err != -EOPNOTSUPP) return err; |