diff options
Diffstat (limited to 'net')
109 files changed, 8421 insertions, 4601 deletions
diff --git a/net/802/hippi.c b/net/802/hippi.c index 887e73d520e4..1997b7dd265e 100644 --- a/net/802/hippi.c +++ b/net/802/hippi.c @@ -65,7 +65,7 @@ static int hippi_header(struct sk_buff *skb, struct net_device *dev, hip->le.src_addr_type = 2; /* 12 bit SC address */ memcpy(hip->le.src_switch_addr, dev->dev_addr + 3, 3); - memset(&hip->le.reserved, 0, 16); + memset_startat(&hip->le, 0, reserved); hip->snap.dsap = HIPPI_EXTENDED_SAP; hip->snap.ssap = HIPPI_EXTENDED_SAP; diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index abaa5d96ded2..788076b002b3 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -319,8 +319,8 @@ static void vlan_transfer_features(struct net_device *dev, { struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); - vlandev->gso_max_size = dev->gso_max_size; - vlandev->gso_max_segs = dev->gso_max_segs; + netif_set_gso_max_size(vlandev, dev->gso_max_size); + netif_set_gso_max_segs(vlandev, dev->gso_max_segs); if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto)) vlandev->hard_header_len = dev->hard_header_len; diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 59bc13b5f14f..acf8c791f320 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -476,10 +476,9 @@ static struct sk_buff *vlan_gro_receive(struct list_head *head, type = vhdr->h_vlan_encapsulated_proto; - rcu_read_lock(); ptype = gro_find_receive_by_type(type); if (!ptype) - goto out_unlock; + goto out; flush = 0; @@ -501,8 +500,6 @@ static struct sk_buff *vlan_gro_receive(struct list_head *head, ipv6_gro_receive, inet_gro_receive, head, skb); -out_unlock: - rcu_read_unlock(); out: skb_gro_flush_final(skb, pp, flush); @@ -516,14 +513,12 @@ static int vlan_gro_complete(struct sk_buff *skb, int nhoff) struct packet_offload *ptype; int err = -ENOENT; - rcu_read_lock(); ptype = gro_find_complete_by_type(type); if (ptype) err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete, ipv6_gro_complete, inet_gro_complete, skb, nhoff + sizeof(*vhdr)); - rcu_read_unlock(); return err; } diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index a54535cbcf4c..866556e041b7 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -573,8 +573,8 @@ static int vlan_dev_init(struct net_device *dev) NETIF_F_ALL_FCOE; dev->features |= dev->hw_features | NETIF_F_LLTX; - dev->gso_max_size = real_dev->gso_max_size; - dev->gso_max_segs = real_dev->gso_max_segs; + netif_set_gso_max_size(dev, real_dev->gso_max_size); + netif_set_gso_max_segs(dev, real_dev->gso_max_segs); if (dev->features & NETIF_F_VLAN_FEATURES) netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n"); diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c index ec87dea23719..08bf6c839e25 100644 --- a/net/8021q/vlanproc.c +++ b/net/8021q/vlanproc.c @@ -252,7 +252,7 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset) stats = dev_get_stats(vlandev, &temp); seq_printf(seq, - "%s VID: %d REORDER_HDR: %i dev->priv_flags: %hx\n", + "%s VID: %d REORDER_HDR: %i dev->priv_flags: %llx\n", vlandev->name, vlan->vlan_id, (int)(vlan->flags & 1), vlandev->priv_flags); diff --git a/net/Kconfig b/net/Kconfig index 074472dfa94a..8a1f9d0287de 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -455,4 +455,9 @@ config ETHTOOL_NETLINK netlink. It provides better extensibility and some new features, e.g. notification messages. +config NETDEV_ADDR_LIST_TEST + tristate "Unit tests for device address list" + default KUNIT_ALL_TESTS + depends on KUNIT + endif # if NET diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile index 291770fc9551..a52bba8500e1 100644 --- a/net/bluetooth/Makefile +++ b/net/bluetooth/Makefile @@ -15,7 +15,7 @@ bluetooth_6lowpan-y := 6lowpan.o bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \ ecdh_helper.o hci_request.o mgmt_util.o mgmt_config.o hci_codec.o \ - eir.o + eir.o hci_sync.o bluetooth-$(CONFIG_BT_BREDR) += sco.o bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o diff --git a/net/bluetooth/aosp.c b/net/bluetooth/aosp.c index a1b7762335a5..432ae3aac9e3 100644 --- a/net/bluetooth/aosp.c +++ b/net/bluetooth/aosp.c @@ -8,9 +8,43 @@ #include "aosp.h" +/* Command complete parameters of LE_Get_Vendor_Capabilities_Command + * The parameters grow over time. The base version that declares the + * version_supported field is v0.95. Refer to + * https://cs.android.com/android/platform/superproject/+/master:system/ + * bt/gd/hci/controller.cc;l=452?q=le_get_vendor_capabilities_handler + */ +struct aosp_rp_le_get_vendor_capa { + /* v0.95: 15 octets */ + __u8 status; + __u8 max_advt_instances; + __u8 offloaded_resolution_of_private_address; + __le16 total_scan_results_storage; + __u8 max_irk_list_sz; + __u8 filtering_support; + __u8 max_filter; + __u8 activity_energy_info_support; + __le16 version_supported; + __le16 total_num_of_advt_tracked; + __u8 extended_scan_support; + __u8 debug_logging_supported; + /* v0.96: 16 octets */ + __u8 le_address_generation_offloading_support; + /* v0.98: 21 octets */ + __le32 a2dp_source_offload_capability_mask; + __u8 bluetooth_quality_report_support; + /* v1.00: 25 octets */ + __le32 dynamic_audio_buffer_support; +} __packed; + +#define VENDOR_CAPA_BASE_SIZE 15 +#define VENDOR_CAPA_0_98_SIZE 21 + void aosp_do_open(struct hci_dev *hdev) { struct sk_buff *skb; + struct aosp_rp_le_get_vendor_capa *rp; + u16 version_supported; if (!hdev->aosp_capable) return; @@ -20,9 +54,54 @@ void aosp_do_open(struct hci_dev *hdev) /* LE Get Vendor Capabilities Command */ skb = __hci_cmd_sync(hdev, hci_opcode_pack(0x3f, 0x153), 0, NULL, HCI_CMD_TIMEOUT); - if (IS_ERR(skb)) + if (IS_ERR(skb)) { + bt_dev_err(hdev, "AOSP get vendor capabilities (%ld)", + PTR_ERR(skb)); return; + } + + /* A basic length check */ + if (skb->len < VENDOR_CAPA_BASE_SIZE) + goto length_error; + + rp = (struct aosp_rp_le_get_vendor_capa *)skb->data; + + version_supported = le16_to_cpu(rp->version_supported); + /* AOSP displays the verion number like v0.98, v1.00, etc. */ + bt_dev_info(hdev, "AOSP extensions version v%u.%02u", + version_supported >> 8, version_supported & 0xff); + + /* Do not support very old versions. */ + if (version_supported < 95) { + bt_dev_warn(hdev, "AOSP capabilities version %u too old", + version_supported); + goto done; + } + + if (version_supported < 98) { + bt_dev_warn(hdev, "AOSP quality report is not supported"); + goto done; + } + + if (skb->len < VENDOR_CAPA_0_98_SIZE) + goto length_error; + + /* The bluetooth_quality_report_support is defined at version + * v0.98. Refer to + * https://cs.android.com/android/platform/superproject/+/ + * master:system/bt/gd/hci/controller.cc;l=477 + */ + if (rp->bluetooth_quality_report_support) { + hdev->aosp_quality_report = true; + bt_dev_info(hdev, "AOSP quality report is supported"); + } + + goto done; + +length_error: + bt_dev_err(hdev, "AOSP capabilities length %d too short", skb->len); +done: kfree_skb(skb); } @@ -33,3 +112,90 @@ void aosp_do_close(struct hci_dev *hdev) bt_dev_dbg(hdev, "Cleanup of AOSP extension"); } + +/* BQR command */ +#define BQR_OPCODE hci_opcode_pack(0x3f, 0x015e) + +/* BQR report action */ +#define REPORT_ACTION_ADD 0x00 +#define REPORT_ACTION_DELETE 0x01 +#define REPORT_ACTION_CLEAR 0x02 + +/* BQR event masks */ +#define QUALITY_MONITORING BIT(0) +#define APPRAOCHING_LSTO BIT(1) +#define A2DP_AUDIO_CHOPPY BIT(2) +#define SCO_VOICE_CHOPPY BIT(3) + +#define DEFAULT_BQR_EVENT_MASK (QUALITY_MONITORING | APPRAOCHING_LSTO | \ + A2DP_AUDIO_CHOPPY | SCO_VOICE_CHOPPY) + +/* Reporting at milliseconds so as not to stress the controller too much. + * Range: 0 ~ 65535 ms + */ +#define DEFALUT_REPORT_INTERVAL_MS 5000 + +struct aosp_bqr_cp { + __u8 report_action; + __u32 event_mask; + __u16 min_report_interval; +} __packed; + +static int enable_quality_report(struct hci_dev *hdev) +{ + struct sk_buff *skb; + struct aosp_bqr_cp cp; + + cp.report_action = REPORT_ACTION_ADD; + cp.event_mask = DEFAULT_BQR_EVENT_MASK; + cp.min_report_interval = DEFALUT_REPORT_INTERVAL_MS; + + skb = __hci_cmd_sync(hdev, BQR_OPCODE, sizeof(cp), &cp, + HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Enabling Android BQR failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + + kfree_skb(skb); + return 0; +} + +static int disable_quality_report(struct hci_dev *hdev) +{ + struct sk_buff *skb; + struct aosp_bqr_cp cp = { 0 }; + + cp.report_action = REPORT_ACTION_CLEAR; + + skb = __hci_cmd_sync(hdev, BQR_OPCODE, sizeof(cp), &cp, + HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Disabling Android BQR failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + + kfree_skb(skb); + return 0; +} + +bool aosp_has_quality_report(struct hci_dev *hdev) +{ + return hdev->aosp_quality_report; +} + +int aosp_set_quality_report(struct hci_dev *hdev, bool enable) +{ + if (!aosp_has_quality_report(hdev)) + return -EOPNOTSUPP; + + bt_dev_dbg(hdev, "quality report enable %d", enable); + + /* Enable or disable the quality report feature. */ + if (enable) + return enable_quality_report(hdev); + else + return disable_quality_report(hdev); +} diff --git a/net/bluetooth/aosp.h b/net/bluetooth/aosp.h index 328fc6d39f70..2fd8886d51b2 100644 --- a/net/bluetooth/aosp.h +++ b/net/bluetooth/aosp.h @@ -8,9 +8,22 @@ void aosp_do_open(struct hci_dev *hdev); void aosp_do_close(struct hci_dev *hdev); +bool aosp_has_quality_report(struct hci_dev *hdev); +int aosp_set_quality_report(struct hci_dev *hdev, bool enable); + #else static inline void aosp_do_open(struct hci_dev *hdev) {} static inline void aosp_do_close(struct hci_dev *hdev) {} +static inline bool aosp_has_quality_report(struct hci_dev *hdev) +{ + return false; +} + +static inline int aosp_set_quality_report(struct hci_dev *hdev, bool enable) +{ + return -EOPNOTSUPP; +} + #endif diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c index 0a2d78e811cf..83eb84e8e688 100644 --- a/net/bluetooth/cmtp/core.c +++ b/net/bluetooth/cmtp/core.c @@ -501,9 +501,7 @@ static int __init cmtp_init(void) { BT_INFO("CMTP (CAPI Emulation) ver %s", VERSION); - cmtp_init_sockets(); - - return 0; + return cmtp_init_sockets(); } static void __exit cmtp_exit(void) diff --git a/net/bluetooth/hci_codec.c b/net/bluetooth/hci_codec.c index f0421d0edaa3..38201532f58e 100644 --- a/net/bluetooth/hci_codec.c +++ b/net/bluetooth/hci_codec.c @@ -25,9 +25,11 @@ static int hci_codec_list_add(struct list_head *list, } entry->transport = sent->transport; entry->len = len; - entry->num_caps = rp->num_caps; - if (rp->num_caps) + entry->num_caps = 0; + if (rp) { + entry->num_caps = rp->num_caps; memcpy(entry->caps, caps, len); + } list_add(&entry->list, list); return 0; @@ -58,6 +60,18 @@ static void hci_read_codec_capabilities(struct hci_dev *hdev, __u8 transport, __u32 len; cmd->transport = i; + + /* If Read_Codec_Capabilities command is not supported + * then just add codec to the list without caps + */ + if (!(hdev->commands[45] & 0x08)) { + hci_dev_lock(hdev); + hci_codec_list_add(&hdev->local_codecs, cmd, + NULL, NULL, 0); + hci_dev_unlock(hdev); + continue; + } + skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_CODEC_CAPS, sizeof(*cmd), cmd, HCI_CMD_TIMEOUT); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index bd669c95b9a7..cd6e1cf7e396 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -108,7 +108,7 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn) break; } - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); } static void hci_conn_cleanup(struct hci_conn *conn) @@ -900,25 +900,15 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status) hci_conn_del(conn); - /* The suspend notifier is waiting for all devices to disconnect and an - * LE connect cancel will result in an hci_le_conn_failed. Once the last - * connection is deleted, we should also wake the suspend queue to - * complete suspend operations. - */ - if (list_empty(&hdev->conn_hash.list) && - test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) { - wake_up(&hdev->suspend_wait_q); - } - /* Since we may have temporarily stopped the background scanning in * favor of connection establishment, we should restart it. */ - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); - /* Re-enable advertising in case this was a failed connection + /* Enable advertising in case this was a failed connection * attempt as a peripheral. */ - hci_req_reenable_advertising(hdev); + hci_enable_advertising(hdev); } static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode) @@ -1411,7 +1401,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, conn->conn_timeout = conn_timeout; conn->conn_reason = conn_reason; - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); done: hci_conn_hold(conn); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 8d33aa64846b..fdc0dcf8ee36 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -62,824 +62,6 @@ DEFINE_MUTEX(hci_cb_list_lock); /* HCI ID Numbering */ static DEFINE_IDA(hci_index_ida); -static int hci_reset_req(struct hci_request *req, unsigned long opt) -{ - BT_DBG("%s %ld", req->hdev->name, opt); - - /* Reset device */ - set_bit(HCI_RESET, &req->hdev->flags); - hci_req_add(req, HCI_OP_RESET, 0, NULL); - return 0; -} - -static void bredr_init(struct hci_request *req) -{ - req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; - - /* Read Local Supported Features */ - hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); - - /* Read Local Version */ - hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); - - /* Read BD Address */ - hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL); -} - -static void amp_init1(struct hci_request *req) -{ - req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; - - /* Read Local Version */ - hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); - - /* Read Local Supported Commands */ - hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); - - /* Read Local AMP Info */ - hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL); - - /* Read Data Blk size */ - hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL); - - /* Read Flow Control Mode */ - hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL); - - /* Read Location Data */ - hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL); -} - -static int amp_init2(struct hci_request *req) -{ - /* Read Local Supported Features. Not all AMP controllers - * support this so it's placed conditionally in the second - * stage init. - */ - if (req->hdev->commands[14] & 0x20) - hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); - - return 0; -} - -static int hci_init1_req(struct hci_request *req, unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - - BT_DBG("%s %ld", hdev->name, opt); - - /* Reset */ - if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) - hci_reset_req(req, 0); - - switch (hdev->dev_type) { - case HCI_PRIMARY: - bredr_init(req); - break; - case HCI_AMP: - amp_init1(req); - break; - default: - bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type); - break; - } - - return 0; -} - -static void bredr_setup(struct hci_request *req) -{ - __le16 param; - __u8 flt_type; - - /* Read Buffer Size (ACL mtu, max pkt, etc.) */ - hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL); - - /* Read Class of Device */ - hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL); - - /* Read Local Name */ - hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL); - - /* Read Voice Setting */ - hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL); - - /* Read Number of Supported IAC */ - hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL); - - /* Read Current IAC LAP */ - hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL); - - /* Clear Event Filters */ - flt_type = HCI_FLT_CLEAR_ALL; - hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type); - - /* Connection accept timeout ~20 secs */ - param = cpu_to_le16(0x7d00); - hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m); -} - -static void le_setup(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - - /* Read LE Buffer Size */ - hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL); - - /* Read LE Local Supported Features */ - hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL); - - /* Read LE Supported States */ - hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL); - - /* LE-only controllers have LE implicitly enabled */ - if (!lmp_bredr_capable(hdev)) - hci_dev_set_flag(hdev, HCI_LE_ENABLED); -} - -static void hci_setup_event_mask(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - - /* The second byte is 0xff instead of 0x9f (two reserved bits - * disabled) since a Broadcom 1.2 dongle doesn't respond to the - * command otherwise. - */ - u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; - - /* CSR 1.1 dongles does not accept any bitfield so don't try to set - * any event mask for pre 1.2 devices. - */ - if (hdev->hci_ver < BLUETOOTH_VER_1_2) - return; - - if (lmp_bredr_capable(hdev)) { - events[4] |= 0x01; /* Flow Specification Complete */ - } else { - /* Use a different default for LE-only devices */ - memset(events, 0, sizeof(events)); - events[1] |= 0x20; /* Command Complete */ - events[1] |= 0x40; /* Command Status */ - events[1] |= 0x80; /* Hardware Error */ - - /* If the controller supports the Disconnect command, enable - * the corresponding event. In addition enable packet flow - * control related events. - */ - if (hdev->commands[0] & 0x20) { - events[0] |= 0x10; /* Disconnection Complete */ - events[2] |= 0x04; /* Number of Completed Packets */ - events[3] |= 0x02; /* Data Buffer Overflow */ - } - - /* If the controller supports the Read Remote Version - * Information command, enable the corresponding event. - */ - if (hdev->commands[2] & 0x80) - events[1] |= 0x08; /* Read Remote Version Information - * Complete - */ - - if (hdev->le_features[0] & HCI_LE_ENCRYPTION) { - events[0] |= 0x80; /* Encryption Change */ - events[5] |= 0x80; /* Encryption Key Refresh Complete */ - } - } - - if (lmp_inq_rssi_capable(hdev) || - test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) - events[4] |= 0x02; /* Inquiry Result with RSSI */ - - if (lmp_ext_feat_capable(hdev)) - events[4] |= 0x04; /* Read Remote Extended Features Complete */ - - if (lmp_esco_capable(hdev)) { - events[5] |= 0x08; /* Synchronous Connection Complete */ - events[5] |= 0x10; /* Synchronous Connection Changed */ - } - - if (lmp_sniffsubr_capable(hdev)) - events[5] |= 0x20; /* Sniff Subrating */ - - if (lmp_pause_enc_capable(hdev)) - events[5] |= 0x80; /* Encryption Key Refresh Complete */ - - if (lmp_ext_inq_capable(hdev)) - events[5] |= 0x40; /* Extended Inquiry Result */ - - if (lmp_no_flush_capable(hdev)) - events[7] |= 0x01; /* Enhanced Flush Complete */ - - if (lmp_lsto_capable(hdev)) - events[6] |= 0x80; /* Link Supervision Timeout Changed */ - - if (lmp_ssp_capable(hdev)) { - events[6] |= 0x01; /* IO Capability Request */ - events[6] |= 0x02; /* IO Capability Response */ - events[6] |= 0x04; /* User Confirmation Request */ - events[6] |= 0x08; /* User Passkey Request */ - events[6] |= 0x10; /* Remote OOB Data Request */ - events[6] |= 0x20; /* Simple Pairing Complete */ - events[7] |= 0x04; /* User Passkey Notification */ - events[7] |= 0x08; /* Keypress Notification */ - events[7] |= 0x10; /* Remote Host Supported - * Features Notification - */ - } - - if (lmp_le_capable(hdev)) - events[7] |= 0x20; /* LE Meta-Event */ - - hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events); -} - -static int hci_init2_req(struct hci_request *req, unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - - if (hdev->dev_type == HCI_AMP) - return amp_init2(req); - - if (lmp_bredr_capable(hdev)) - bredr_setup(req); - else - hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED); - - if (lmp_le_capable(hdev)) - le_setup(req); - - /* All Bluetooth 1.2 and later controllers should support the - * HCI command for reading the local supported commands. - * - * Unfortunately some controllers indicate Bluetooth 1.2 support, - * but do not have support for this command. If that is the case, - * the driver can quirk the behavior and skip reading the local - * supported commands. - */ - if (hdev->hci_ver > BLUETOOTH_VER_1_1 && - !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks)) - hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); - - if (lmp_ssp_capable(hdev)) { - /* When SSP is available, then the host features page - * should also be available as well. However some - * controllers list the max_page as 0 as long as SSP - * has not been enabled. To achieve proper debugging - * output, force the minimum max_page to 1 at least. - */ - hdev->max_page = 0x01; - - if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { - u8 mode = 0x01; - - hci_req_add(req, HCI_OP_WRITE_SSP_MODE, - sizeof(mode), &mode); - } else { - struct hci_cp_write_eir cp; - - memset(hdev->eir, 0, sizeof(hdev->eir)); - memset(&cp, 0, sizeof(cp)); - - hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); - } - } - - if (lmp_inq_rssi_capable(hdev) || - test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) { - u8 mode; - - /* If Extended Inquiry Result events are supported, then - * they are clearly preferred over Inquiry Result with RSSI - * events. - */ - mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01; - - hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode); - } - - if (lmp_inq_tx_pwr_capable(hdev)) - hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL); - - if (lmp_ext_feat_capable(hdev)) { - struct hci_cp_read_local_ext_features cp; - - cp.page = 0x01; - hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES, - sizeof(cp), &cp); - } - - if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) { - u8 enable = 1; - hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable), - &enable); - } - - return 0; -} - -static void hci_setup_link_policy(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - struct hci_cp_write_def_link_policy cp; - u16 link_policy = 0; - - if (lmp_rswitch_capable(hdev)) - link_policy |= HCI_LP_RSWITCH; - if (lmp_hold_capable(hdev)) - link_policy |= HCI_LP_HOLD; - if (lmp_sniff_capable(hdev)) - link_policy |= HCI_LP_SNIFF; - if (lmp_park_capable(hdev)) - link_policy |= HCI_LP_PARK; - - cp.policy = cpu_to_le16(link_policy); - hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp); -} - -static void hci_set_le_support(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - struct hci_cp_write_le_host_supported cp; - - /* LE-only devices do not support explicit enablement */ - if (!lmp_bredr_capable(hdev)) - return; - - memset(&cp, 0, sizeof(cp)); - - if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { - cp.le = 0x01; - cp.simul = 0x00; - } - - if (cp.le != lmp_host_le_capable(hdev)) - hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), - &cp); -} - -static void hci_set_event_mask_page_2(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; - bool changed = false; - - /* If Connectionless Peripheral Broadcast central role is supported - * enable all necessary events for it. - */ - if (lmp_cpb_central_capable(hdev)) { - events[1] |= 0x40; /* Triggered Clock Capture */ - events[1] |= 0x80; /* Synchronization Train Complete */ - events[2] |= 0x10; /* Peripheral Page Response Timeout */ - events[2] |= 0x20; /* CPB Channel Map Change */ - changed = true; - } - - /* If Connectionless Peripheral Broadcast peripheral role is supported - * enable all necessary events for it. - */ - if (lmp_cpb_peripheral_capable(hdev)) { - events[2] |= 0x01; /* Synchronization Train Received */ - events[2] |= 0x02; /* CPB Receive */ - events[2] |= 0x04; /* CPB Timeout */ - events[2] |= 0x08; /* Truncated Page Complete */ - changed = true; - } - - /* Enable Authenticated Payload Timeout Expired event if supported */ - if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) { - events[2] |= 0x80; - changed = true; - } - - /* Some Broadcom based controllers indicate support for Set Event - * Mask Page 2 command, but then actually do not support it. Since - * the default value is all bits set to zero, the command is only - * required if the event mask has to be changed. In case no change - * to the event mask is needed, skip this command. - */ - if (changed) - hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, - sizeof(events), events); -} - -static int hci_init3_req(struct hci_request *req, unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - u8 p; - - hci_setup_event_mask(req); - - if (hdev->commands[6] & 0x20 && - !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) { - struct hci_cp_read_stored_link_key cp; - - bacpy(&cp.bdaddr, BDADDR_ANY); - cp.read_all = 0x01; - hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp); - } - - if (hdev->commands[5] & 0x10) - hci_setup_link_policy(req); - - if (hdev->commands[8] & 0x01) - hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL); - - if (hdev->commands[18] & 0x04 && - !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) - hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL); - - /* Some older Broadcom based Bluetooth 1.2 controllers do not - * support the Read Page Scan Type command. Check support for - * this command in the bit mask of supported commands. - */ - if (hdev->commands[13] & 0x01) - hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL); - - if (lmp_le_capable(hdev)) { - u8 events[8]; - - memset(events, 0, sizeof(events)); - - if (hdev->le_features[0] & HCI_LE_ENCRYPTION) - events[0] |= 0x10; /* LE Long Term Key Request */ - - /* If controller supports the Connection Parameters Request - * Link Layer Procedure, enable the corresponding event. - */ - if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC) - events[0] |= 0x20; /* LE Remote Connection - * Parameter Request - */ - - /* If the controller supports the Data Length Extension - * feature, enable the corresponding event. - */ - if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) - events[0] |= 0x40; /* LE Data Length Change */ - - /* If the controller supports LL Privacy feature, enable - * the corresponding event. - */ - if (hdev->le_features[0] & HCI_LE_LL_PRIVACY) - events[1] |= 0x02; /* LE Enhanced Connection - * Complete - */ - - /* If the controller supports Extended Scanner Filter - * Policies, enable the corresponding event. - */ - if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY) - events[1] |= 0x04; /* LE Direct Advertising - * Report - */ - - /* If the controller supports Channel Selection Algorithm #2 - * feature, enable the corresponding event. - */ - if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2) - events[2] |= 0x08; /* LE Channel Selection - * Algorithm - */ - - /* If the controller supports the LE Set Scan Enable command, - * enable the corresponding advertising report event. - */ - if (hdev->commands[26] & 0x08) - events[0] |= 0x02; /* LE Advertising Report */ - - /* If the controller supports the LE Create Connection - * command, enable the corresponding event. - */ - if (hdev->commands[26] & 0x10) - events[0] |= 0x01; /* LE Connection Complete */ - - /* If the controller supports the LE Connection Update - * command, enable the corresponding event. - */ - if (hdev->commands[27] & 0x04) - events[0] |= 0x04; /* LE Connection Update - * Complete - */ - - /* If the controller supports the LE Read Remote Used Features - * command, enable the corresponding event. - */ - if (hdev->commands[27] & 0x20) - events[0] |= 0x08; /* LE Read Remote Used - * Features Complete - */ - - /* If the controller supports the LE Read Local P-256 - * Public Key command, enable the corresponding event. - */ - if (hdev->commands[34] & 0x02) - events[0] |= 0x80; /* LE Read Local P-256 - * Public Key Complete - */ - - /* If the controller supports the LE Generate DHKey - * command, enable the corresponding event. - */ - if (hdev->commands[34] & 0x04) - events[1] |= 0x01; /* LE Generate DHKey Complete */ - - /* If the controller supports the LE Set Default PHY or - * LE Set PHY commands, enable the corresponding event. - */ - if (hdev->commands[35] & (0x20 | 0x40)) - events[1] |= 0x08; /* LE PHY Update Complete */ - - /* If the controller supports LE Set Extended Scan Parameters - * and LE Set Extended Scan Enable commands, enable the - * corresponding event. - */ - if (use_ext_scan(hdev)) - events[1] |= 0x10; /* LE Extended Advertising - * Report - */ - - /* If the controller supports the LE Extended Advertising - * command, enable the corresponding event. - */ - if (ext_adv_capable(hdev)) - events[2] |= 0x02; /* LE Advertising Set - * Terminated - */ - - hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events), - events); - - /* Read LE Advertising Channel TX Power */ - if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) { - /* HCI TS spec forbids mixing of legacy and extended - * advertising commands wherein READ_ADV_TX_POWER is - * also included. So do not call it if extended adv - * is supported otherwise controller will return - * COMMAND_DISALLOWED for extended commands. - */ - hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL); - } - - if (hdev->commands[38] & 0x80) { - /* Read LE Min/Max Tx Power*/ - hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER, - 0, NULL); - } - - if (hdev->commands[26] & 0x40) { - /* Read LE Accept List Size */ - hci_req_add(req, HCI_OP_LE_READ_ACCEPT_LIST_SIZE, - 0, NULL); - } - - if (hdev->commands[26] & 0x80) { - /* Clear LE Accept List */ - hci_req_add(req, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL); - } - - if (hdev->commands[34] & 0x40) { - /* Read LE Resolving List Size */ - hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE, - 0, NULL); - } - - if (hdev->commands[34] & 0x20) { - /* Clear LE Resolving List */ - hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL); - } - - if (hdev->commands[35] & 0x04) { - __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout); - - /* Set RPA timeout */ - hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2, - &rpa_timeout); - } - - if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) { - /* Read LE Maximum Data Length */ - hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL); - - /* Read LE Suggested Default Data Length */ - hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL); - } - - if (ext_adv_capable(hdev)) { - /* Read LE Number of Supported Advertising Sets */ - hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, - 0, NULL); - } - - hci_set_le_support(req); - } - - /* Read features beyond page 1 if available */ - for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) { - struct hci_cp_read_local_ext_features cp; - - cp.page = p; - hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES, - sizeof(cp), &cp); - } - - return 0; -} - -static int hci_init4_req(struct hci_request *req, unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - - /* Some Broadcom based Bluetooth controllers do not support the - * Delete Stored Link Key command. They are clearly indicating its - * absence in the bit mask of supported commands. - * - * Check the supported commands and only if the command is marked - * as supported send it. If not supported assume that the controller - * does not have actual support for stored link keys which makes this - * command redundant anyway. - * - * Some controllers indicate that they support handling deleting - * stored link keys, but they don't. The quirk lets a driver - * just disable this command. - */ - if (hdev->commands[6] & 0x80 && - !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) { - struct hci_cp_delete_stored_link_key cp; - - bacpy(&cp.bdaddr, BDADDR_ANY); - cp.delete_all = 0x01; - hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, - sizeof(cp), &cp); - } - - /* Set event mask page 2 if the HCI command for it is supported */ - if (hdev->commands[22] & 0x04) - hci_set_event_mask_page_2(req); - - /* Read local pairing options if the HCI command is supported */ - if (hdev->commands[41] & 0x08) - hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL); - - /* Get MWS transport configuration if the HCI command is supported */ - if (hdev->commands[30] & 0x08) - hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL); - - /* Check for Synchronization Train support */ - if (lmp_sync_train_capable(hdev)) - hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL); - - /* Enable Secure Connections if supported and configured */ - if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) && - bredr_sc_enabled(hdev)) { - u8 support = 0x01; - - hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT, - sizeof(support), &support); - } - - /* Set erroneous data reporting if supported to the wideband speech - * setting value - */ - if (hdev->commands[18] & 0x08 && - !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) { - bool enabled = hci_dev_test_flag(hdev, - HCI_WIDEBAND_SPEECH_ENABLED); - - if (enabled != - (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) { - struct hci_cp_write_def_err_data_reporting cp; - - cp.err_data_reporting = enabled ? - ERR_DATA_REPORTING_ENABLED : - ERR_DATA_REPORTING_DISABLED; - - hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, - sizeof(cp), &cp); - } - } - - /* Set Suggested Default Data Length to maximum if supported */ - if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) { - struct hci_cp_le_write_def_data_len cp; - - cp.tx_len = cpu_to_le16(hdev->le_max_tx_len); - cp.tx_time = cpu_to_le16(hdev->le_max_tx_time); - hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp); - } - - /* Set Default PHY parameters if command is supported */ - if (hdev->commands[35] & 0x20) { - struct hci_cp_le_set_default_phy cp; - - cp.all_phys = 0x00; - cp.tx_phys = hdev->le_tx_def_phys; - cp.rx_phys = hdev->le_rx_def_phys; - - hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp); - } - - return 0; -} - -static int __hci_init(struct hci_dev *hdev) -{ - int err; - - err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL); - if (err < 0) - return err; - - if (hci_dev_test_flag(hdev, HCI_SETUP)) - hci_debugfs_create_basic(hdev); - - err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL); - if (err < 0) - return err; - - /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode - * BR/EDR/LE type controllers. AMP controllers only need the - * first two stages of init. - */ - if (hdev->dev_type != HCI_PRIMARY) - return 0; - - err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL); - if (err < 0) - return err; - - err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL); - if (err < 0) - return err; - - /* Read local codec list if the HCI command is supported */ - if (hdev->commands[45] & 0x04) - hci_read_supported_codecs_v2(hdev); - else if (hdev->commands[29] & 0x20) - hci_read_supported_codecs(hdev); - - /* This function is only called when the controller is actually in - * configured state. When the controller is marked as unconfigured, - * this initialization procedure is not run. - * - * It means that it is possible that a controller runs through its - * setup phase and then discovers missing settings. If that is the - * case, then this function will not be called. It then will only - * be called during the config phase. - * - * So only when in setup phase or config phase, create the debugfs - * entries and register the SMP channels. - */ - if (!hci_dev_test_flag(hdev, HCI_SETUP) && - !hci_dev_test_flag(hdev, HCI_CONFIG)) - return 0; - - hci_debugfs_create_common(hdev); - - if (lmp_bredr_capable(hdev)) - hci_debugfs_create_bredr(hdev); - - if (lmp_le_capable(hdev)) - hci_debugfs_create_le(hdev); - - return 0; -} - -static int hci_init0_req(struct hci_request *req, unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - - BT_DBG("%s %ld", hdev->name, opt); - - /* Reset */ - if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) - hci_reset_req(req, 0); - - /* Read Local Version */ - hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); - - /* Read BD Address */ - if (hdev->set_bdaddr) - hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL); - - return 0; -} - -static int __hci_unconf_init(struct hci_dev *hdev) -{ - int err; - - if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) - return 0; - - err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL); - if (err < 0) - return err; - - if (hci_dev_test_flag(hdev, HCI_SETUP)) - hci_debugfs_create_basic(hdev); - - return 0; -} - static int hci_scan_req(struct hci_request *req, unsigned long opt) { __u8 scan = opt; @@ -975,7 +157,7 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state) switch (state) { case DISCOVERY_STOPPED: - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); if (old_state != DISCOVERY_STARTING) mgmt_discovering(hdev, 0); @@ -1289,32 +471,6 @@ done: return err; } -/** - * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address - * (BD_ADDR) for a HCI device from - * a firmware node property. - * @hdev: The HCI device - * - * Search the firmware node for 'local-bd-address'. - * - * All-zero BD addresses are rejected, because those could be properties - * that exist in the firmware tables, but were not updated by the firmware. For - * example, the DTS could define 'local-bd-address', with zero BD addresses. - */ -static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev) -{ - struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent); - bdaddr_t ba; - int ret; - - ret = fwnode_property_read_u8_array(fwnode, "local-bd-address", - (u8 *)&ba, sizeof(ba)); - if (ret < 0 || !bacmp(&ba, BDADDR_ANY)) - return; - - bacpy(&hdev->public_addr, &ba); -} - static int hci_dev_do_open(struct hci_dev *hdev) { int ret = 0; @@ -1323,205 +479,8 @@ static int hci_dev_do_open(struct hci_dev *hdev) hci_req_sync_lock(hdev); - if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { - ret = -ENODEV; - goto done; - } - - if (!hci_dev_test_flag(hdev, HCI_SETUP) && - !hci_dev_test_flag(hdev, HCI_CONFIG)) { - /* Check for rfkill but allow the HCI setup stage to - * proceed (which in itself doesn't cause any RF activity). - */ - if (hci_dev_test_flag(hdev, HCI_RFKILLED)) { - ret = -ERFKILL; - goto done; - } - - /* Check for valid public address or a configured static - * random address, but let the HCI setup proceed to - * be able to determine if there is a public address - * or not. - * - * In case of user channel usage, it is not important - * if a public address or static random address is - * available. - * - * This check is only valid for BR/EDR controllers - * since AMP controllers do not have an address. - */ - if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && - hdev->dev_type == HCI_PRIMARY && - !bacmp(&hdev->bdaddr, BDADDR_ANY) && - !bacmp(&hdev->static_addr, BDADDR_ANY)) { - ret = -EADDRNOTAVAIL; - goto done; - } - } - - if (test_bit(HCI_UP, &hdev->flags)) { - ret = -EALREADY; - goto done; - } - - if (hdev->open(hdev)) { - ret = -EIO; - goto done; - } - - set_bit(HCI_RUNNING, &hdev->flags); - hci_sock_dev_event(hdev, HCI_DEV_OPEN); - - atomic_set(&hdev->cmd_cnt, 1); - set_bit(HCI_INIT, &hdev->flags); - - if (hci_dev_test_flag(hdev, HCI_SETUP) || - test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) { - bool invalid_bdaddr; - - hci_sock_dev_event(hdev, HCI_DEV_SETUP); - - if (hdev->setup) - ret = hdev->setup(hdev); - - /* The transport driver can set the quirk to mark the - * BD_ADDR invalid before creating the HCI device or in - * its setup callback. - */ - invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, - &hdev->quirks); - - if (ret) - goto setup_failed; - - if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) { - if (!bacmp(&hdev->public_addr, BDADDR_ANY)) - hci_dev_get_bd_addr_from_property(hdev); - - if (bacmp(&hdev->public_addr, BDADDR_ANY) && - hdev->set_bdaddr) { - ret = hdev->set_bdaddr(hdev, - &hdev->public_addr); - - /* If setting of the BD_ADDR from the device - * property succeeds, then treat the address - * as valid even if the invalid BD_ADDR - * quirk indicates otherwise. - */ - if (!ret) - invalid_bdaddr = false; - } - } - -setup_failed: - /* The transport driver can set these quirks before - * creating the HCI device or in its setup callback. - * - * For the invalid BD_ADDR quirk it is possible that - * it becomes a valid address if the bootloader does - * provide it (see above). - * - * In case any of them is set, the controller has to - * start up as unconfigured. - */ - if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || - invalid_bdaddr) - hci_dev_set_flag(hdev, HCI_UNCONFIGURED); - - /* For an unconfigured controller it is required to - * read at least the version information provided by - * the Read Local Version Information command. - * - * If the set_bdaddr driver callback is provided, then - * also the original Bluetooth public device address - * will be read using the Read BD Address command. - */ - if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) - ret = __hci_unconf_init(hdev); - } - - if (hci_dev_test_flag(hdev, HCI_CONFIG)) { - /* If public address change is configured, ensure that - * the address gets programmed. If the driver does not - * support changing the public address, fail the power - * on procedure. - */ - if (bacmp(&hdev->public_addr, BDADDR_ANY) && - hdev->set_bdaddr) - ret = hdev->set_bdaddr(hdev, &hdev->public_addr); - else - ret = -EADDRNOTAVAIL; - } - - if (!ret) { - if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && - !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { - ret = __hci_init(hdev); - if (!ret && hdev->post_init) - ret = hdev->post_init(hdev); - } - } - - /* If the HCI Reset command is clearing all diagnostic settings, - * then they need to be reprogrammed after the init procedure - * completed. - */ - if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && - !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && - hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag) - ret = hdev->set_diag(hdev, true); - - msft_do_open(hdev); - aosp_do_open(hdev); - - clear_bit(HCI_INIT, &hdev->flags); - - if (!ret) { - hci_dev_hold(hdev); - hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); - hci_adv_instances_set_rpa_expired(hdev, true); - set_bit(HCI_UP, &hdev->flags); - hci_sock_dev_event(hdev, HCI_DEV_UP); - hci_leds_update_powered(hdev, true); - if (!hci_dev_test_flag(hdev, HCI_SETUP) && - !hci_dev_test_flag(hdev, HCI_CONFIG) && - !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && - !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && - hci_dev_test_flag(hdev, HCI_MGMT) && - hdev->dev_type == HCI_PRIMARY) { - ret = __hci_req_hci_power_on(hdev); - mgmt_power_on(hdev, ret); - } - } else { - /* Init failed, cleanup */ - flush_work(&hdev->tx_work); - - /* Since hci_rx_work() is possible to awake new cmd_work - * it should be flushed first to avoid unexpected call of - * hci_cmd_work() - */ - flush_work(&hdev->rx_work); - flush_work(&hdev->cmd_work); - - skb_queue_purge(&hdev->cmd_q); - skb_queue_purge(&hdev->rx_q); - - if (hdev->flush) - hdev->flush(hdev); - - if (hdev->sent_cmd) { - kfree_skb(hdev->sent_cmd); - hdev->sent_cmd = NULL; - } - - clear_bit(HCI_RUNNING, &hdev->flags); - hci_sock_dev_event(hdev, HCI_DEV_CLOSE); - - hdev->close(hdev); - hdev->flags &= BIT(HCI_RAW); - } + ret = hci_dev_open_sync(hdev); -done: hci_req_sync_unlock(hdev); return ret; } @@ -1583,155 +542,18 @@ done: return err; } -/* This function requires the caller holds hdev->lock */ -static void hci_pend_le_actions_clear(struct hci_dev *hdev) -{ - struct hci_conn_params *p; - - list_for_each_entry(p, &hdev->le_conn_params, list) { - if (p->conn) { - hci_conn_drop(p->conn); - hci_conn_put(p->conn); - p->conn = NULL; - } - list_del_init(&p->action); - } - - BT_DBG("All LE pending actions cleared"); -} - int hci_dev_do_close(struct hci_dev *hdev) { - bool auto_off; - int err = 0; + int err; BT_DBG("%s %p", hdev->name, hdev); - cancel_delayed_work(&hdev->power_off); - cancel_delayed_work(&hdev->ncmd_timer); - - hci_request_cancel_all(hdev); hci_req_sync_lock(hdev); - if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && - !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && - test_bit(HCI_UP, &hdev->flags)) { - /* Execute vendor specific shutdown routine */ - if (hdev->shutdown) - err = hdev->shutdown(hdev); - } - - if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { - cancel_delayed_work_sync(&hdev->cmd_timer); - hci_req_sync_unlock(hdev); - return err; - } - - hci_leds_update_powered(hdev, false); - - /* Flush RX and TX works */ - flush_work(&hdev->tx_work); - flush_work(&hdev->rx_work); - - if (hdev->discov_timeout > 0) { - hdev->discov_timeout = 0; - hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); - hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); - } - - if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) - cancel_delayed_work(&hdev->service_cache); - - if (hci_dev_test_flag(hdev, HCI_MGMT)) { - struct adv_info *adv_instance; - - cancel_delayed_work_sync(&hdev->rpa_expired); - - list_for_each_entry(adv_instance, &hdev->adv_instances, list) - cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); - } - - /* Avoid potential lockdep warnings from the *_flush() calls by - * ensuring the workqueue is empty up front. - */ - drain_workqueue(hdev->workqueue); - - hci_dev_lock(hdev); - - hci_discovery_set_state(hdev, DISCOVERY_STOPPED); - - auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF); - - if (!auto_off && hdev->dev_type == HCI_PRIMARY && - !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && - hci_dev_test_flag(hdev, HCI_MGMT)) - __mgmt_power_off(hdev); - - hci_inquiry_cache_flush(hdev); - hci_pend_le_actions_clear(hdev); - hci_conn_hash_flush(hdev); - hci_dev_unlock(hdev); - - smp_unregister(hdev); - - hci_sock_dev_event(hdev, HCI_DEV_DOWN); - - aosp_do_close(hdev); - msft_do_close(hdev); - - if (hdev->flush) - hdev->flush(hdev); - - /* Reset device */ - skb_queue_purge(&hdev->cmd_q); - atomic_set(&hdev->cmd_cnt, 1); - if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) && - !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { - set_bit(HCI_INIT, &hdev->flags); - __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL); - clear_bit(HCI_INIT, &hdev->flags); - } - - /* flush cmd work */ - flush_work(&hdev->cmd_work); - - /* Drop queues */ - skb_queue_purge(&hdev->rx_q); - skb_queue_purge(&hdev->cmd_q); - skb_queue_purge(&hdev->raw_q); - - /* Drop last sent command */ - if (hdev->sent_cmd) { - cancel_delayed_work_sync(&hdev->cmd_timer); - kfree_skb(hdev->sent_cmd); - hdev->sent_cmd = NULL; - } - - clear_bit(HCI_RUNNING, &hdev->flags); - hci_sock_dev_event(hdev, HCI_DEV_CLOSE); - - if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks)) - wake_up(&hdev->suspend_wait_q); - - /* After this point our queues are empty - * and no tasks are scheduled. */ - hdev->close(hdev); - - /* Clear flags */ - hdev->flags &= BIT(HCI_RAW); - hci_dev_clear_volatile_flags(hdev); - - /* Controller radio is available but is currently powered down */ - hdev->amp_status = AMP_STATUS_POWERED_DOWN; - - memset(hdev->eir, 0, sizeof(hdev->eir)); - memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); - bacpy(&hdev->random_addr, BDADDR_ANY); - hci_codec_list_clear(&hdev->local_codecs); + err = hci_dev_close_sync(hdev); hci_req_sync_unlock(hdev); - hci_dev_put(hdev); return err; } @@ -1787,7 +609,7 @@ static int hci_dev_do_reset(struct hci_dev *hdev) atomic_set(&hdev->cmd_cnt, 1); hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0; - ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL); + ret = hci_reset_sync(hdev); hci_req_sync_unlock(hdev); return ret; @@ -1850,7 +672,7 @@ done: return ret; } -static void hci_update_scan_state(struct hci_dev *hdev, u8 scan) +static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan) { bool conn_changed, discov_changed; @@ -1951,7 +773,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) * get correctly modified as this was a non-mgmt change. */ if (!err) - hci_update_scan_state(hdev, dr.dev_opt); + hci_update_passive_scan_state(hdev, dr.dev_opt); break; case HCISETLINKPOL: @@ -2133,9 +955,7 @@ static void hci_power_on(struct work_struct *work) hci_dev_test_flag(hdev, HCI_MGMT) && hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { cancel_delayed_work(&hdev->power_off); - hci_req_sync_lock(hdev); - err = __hci_req_hci_power_on(hdev); - hci_req_sync_unlock(hdev); + err = hci_powered_update_sync(hdev); mgmt_power_on(hdev, err); return; } @@ -3096,7 +1916,7 @@ bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, switch (hci_get_adv_monitor_offload_ext(hdev)) { case HCI_ADV_MONITOR_EXT_NONE: - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err); /* Message was not forwarded to controller - not an error */ return false; @@ -3160,7 +1980,7 @@ bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err) pending = hci_remove_adv_monitor(hdev, monitor, handle, err); if (!*err && !pending) - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending", hdev->name, handle, *err, pending ? "" : "not "); @@ -3192,7 +2012,7 @@ bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err) } if (update) - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending", hdev->name, *err, pending ? "" : "not "); @@ -3486,7 +2306,7 @@ void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) hci_conn_params_free(params); - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); BT_DBG("addr %pMR (type %u)", addr, addr_type); } @@ -3554,61 +2374,6 @@ void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, } } -static void hci_suspend_clear_tasks(struct hci_dev *hdev) -{ - int i; - - for (i = 0; i < __SUSPEND_NUM_TASKS; i++) - clear_bit(i, hdev->suspend_tasks); - - wake_up(&hdev->suspend_wait_q); -} - -static int hci_suspend_wait_event(struct hci_dev *hdev) -{ -#define WAKE_COND \ - (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) == \ - __SUSPEND_NUM_TASKS) - - int i; - int ret = wait_event_timeout(hdev->suspend_wait_q, - WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT); - - if (ret == 0) { - bt_dev_err(hdev, "Timed out waiting for suspend events"); - for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) { - if (test_bit(i, hdev->suspend_tasks)) - bt_dev_err(hdev, "Suspend timeout bit: %d", i); - clear_bit(i, hdev->suspend_tasks); - } - - ret = -ETIMEDOUT; - } else { - ret = 0; - } - - return ret; -} - -static void hci_prepare_suspend(struct work_struct *work) -{ - struct hci_dev *hdev = - container_of(work, struct hci_dev, suspend_prepare); - - hci_dev_lock(hdev); - hci_req_prepare_suspend(hdev, hdev->suspend_state_next); - hci_dev_unlock(hdev); -} - -static int hci_change_suspend_state(struct hci_dev *hdev, - enum suspended_state next) -{ - hdev->suspend_state_next = next; - set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks); - queue_work(hdev->req_workqueue, &hdev->suspend_prepare); - return hci_suspend_wait_event(hdev); -} - static void hci_clear_wake_reason(struct hci_dev *hdev) { hci_dev_lock(hdev); @@ -3745,7 +2510,8 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) INIT_WORK(&hdev->tx_work, hci_tx_work); INIT_WORK(&hdev->power_on, hci_power_on); INIT_WORK(&hdev->error_reset, hci_error_reset); - INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend); + + hci_cmd_sync_init(hdev); INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); @@ -3754,7 +2520,6 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) skb_queue_head_init(&hdev->raw_q); init_waitqueue_head(&hdev->req_wait_q); - init_waitqueue_head(&hdev->suspend_wait_q); INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout); INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout); @@ -3882,6 +2647,7 @@ int hci_register_dev(struct hci_dev *hdev) return id; err_wqueue: + debugfs_remove_recursive(hdev->debugfs); destroy_workqueue(hdev->workqueue); destroy_workqueue(hdev->req_workqueue); err: @@ -3904,11 +2670,10 @@ void hci_unregister_dev(struct hci_dev *hdev) cancel_work_sync(&hdev->power_on); - if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) { - hci_suspend_clear_tasks(hdev); + hci_cmd_sync_clear(hdev); + + if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) unregister_pm_notifier(&hdev->suspend_notifier); - cancel_work_sync(&hdev->suspend_prepare); - } msft_unregister(hdev); @@ -3975,7 +2740,6 @@ EXPORT_SYMBOL(hci_release_dev); int hci_suspend_dev(struct hci_dev *hdev) { int ret; - u8 state = BT_RUNNING; bt_dev_dbg(hdev, ""); @@ -3984,40 +2748,17 @@ int hci_suspend_dev(struct hci_dev *hdev) hci_dev_test_flag(hdev, HCI_UNREGISTER)) return 0; - /* If powering down, wait for completion. */ - if (mgmt_powering_down(hdev)) { - set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks); - ret = hci_suspend_wait_event(hdev); - if (ret) - goto done; - } - - /* Suspend consists of two actions: - * - First, disconnect everything and make the controller not - * connectable (disabling scanning) - * - Second, program event filter/accept list and enable scan - */ - ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT); - if (ret) - goto clear; - - state = BT_SUSPEND_DISCONNECT; + /* If powering down don't attempt to suspend */ + if (mgmt_powering_down(hdev)) + return 0; - /* Only configure accept list if device may wakeup. */ - if (hdev->wakeup && hdev->wakeup(hdev)) { - ret = hci_change_suspend_state(hdev, BT_SUSPEND_CONFIGURE_WAKE); - if (!ret) - state = BT_SUSPEND_CONFIGURE_WAKE; - } + hci_req_sync_lock(hdev); + ret = hci_suspend_sync(hdev); + hci_req_sync_unlock(hdev); -clear: hci_clear_wake_reason(hdev); - mgmt_suspending(hdev, state); + mgmt_suspending(hdev, hdev->suspend_state); -done: - /* We always allow suspend even if suspend preparation failed and - * attempt to recover in resume. - */ hci_sock_dev_event(hdev, HCI_DEV_SUSPEND); return ret; } @@ -4039,10 +2780,12 @@ int hci_resume_dev(struct hci_dev *hdev) if (mgmt_powering_down(hdev)) return 0; - ret = hci_change_suspend_state(hdev, BT_RUNNING); + hci_req_sync_lock(hdev); + ret = hci_resume_sync(hdev); + hci_req_sync_unlock(hdev); mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr, - hdev->wake_addr_type); + hdev->wake_addr_type); hci_sock_dev_event(hdev, HCI_DEV_RESUME); return ret; @@ -4270,25 +3013,6 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; } -/* Send HCI command and wait for command complete event */ -struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, - const void *param, u32 timeout) -{ - struct sk_buff *skb; - - if (!test_bit(HCI_UP, &hdev->flags)) - return ERR_PTR(-ENETDOWN); - - bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); - - hci_req_sync_lock(hdev); - skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout); - hci_req_sync_unlock(hdev); - - return skb; -} -EXPORT_SYMBOL(hci_cmd_sync); - /* Send ACL data */ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) { diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 7d0db1ca1248..efc5458b1345 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -545,9 +545,7 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) hdev->features[1][0] &= ~LMP_HOST_SSP; } - if (hci_dev_test_flag(hdev, HCI_MGMT)) - mgmt_ssp_enable_complete(hdev, sent->mode, status); - else if (!status) { + if (!status) { if (sent->mode) hci_dev_set_flag(hdev, HCI_SSP_ENABLED); else @@ -1239,6 +1237,55 @@ static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, hci_dev_unlock(hdev); } +static void hci_cc_le_remove_adv_set(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u8 status = *((__u8 *)skb->data); + u8 *instance; + int err; + + if (status) + return; + + instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET); + if (!instance) + return; + + hci_dev_lock(hdev); + + err = hci_remove_adv_instance(hdev, *instance); + if (!err) + mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev, + *instance); + + hci_dev_unlock(hdev); +} + +static void hci_cc_le_clear_adv_sets(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u8 status = *((__u8 *)skb->data); + struct adv_info *adv, *n; + int err; + + if (status) + return; + + if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS)) + return; + + hci_dev_lock(hdev); + + list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { + u8 instance = adv->instance; + + err = hci_remove_adv_instance(hdev, instance); + if (!err) + mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), + hdev, instance); + } + + hci_dev_unlock(hdev); +} + static void hci_cc_le_read_transmit_power(struct hci_dev *hdev, struct sk_buff *skb) { @@ -1326,8 +1373,10 @@ static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, &conn->le_conn_timeout, conn->conn_timeout); } else { - if (adv) { - adv->enabled = false; + if (cp->num_of_sets) { + if (adv) + adv->enabled = false; + /* If just one instance was disabled check if there are * any other instance enabled before clearing HCI_LE_ADV */ @@ -1463,16 +1512,10 @@ static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable) /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we * interrupted scanning due to a connect request. Mark - * therefore discovery as stopped. If this was not - * because of a connect request advertising might have - * been disabled because of active scanning, so - * re-enable it again if necessary. + * therefore discovery as stopped. */ if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED)) hci_discovery_set_state(hdev, DISCOVERY_STOPPED); - else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) && - hdev->discovery.state == DISCOVERY_FINDING) - hci_req_reenable_advertising(hdev); break; @@ -2371,9 +2414,14 @@ static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) { struct hci_cp_disconnect *cp; + struct hci_conn_params *params; struct hci_conn *conn; + bool mgmt_conn; - if (!status) + /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended + * otherwise cleanup the connection immediately. + */ + if (!status && !hdev->suspended) return; cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); @@ -2383,23 +2431,60 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); - if (conn) { + if (!conn) + goto unlock; + + if (status) { mgmt_disconnect_failed(hdev, &conn->dst, conn->type, conn->dst_type, status); if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { hdev->cur_adv_instance = conn->adv_instance; - hci_req_reenable_advertising(hdev); + hci_enable_advertising(hdev); } - /* If the disconnection failed for any reason, the upper layer - * does not retry to disconnect in current implementation. - * Hence, we need to do some basic cleanup here and re-enable - * advertising if necessary. - */ - hci_conn_del(conn); + goto done; + } + + mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); + + if (conn->type == ACL_LINK) { + if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) + hci_remove_link_key(hdev, &conn->dst); + } + + params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); + if (params) { + switch (params->auto_connect) { + case HCI_AUTO_CONN_LINK_LOSS: + if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT) + break; + fallthrough; + + case HCI_AUTO_CONN_DIRECT: + case HCI_AUTO_CONN_ALWAYS: + list_del_init(¶ms->action); + list_add(¶ms->action, &hdev->pend_le_conns); + break; + + default: + break; + } } + mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, + cp->reason, mgmt_conn); + + hci_disconn_cfm(conn, cp->reason); + +done: + /* If the disconnection failed for any reason, the upper layer + * does not retry to disconnect in current implementation. + * Hence, we need to do some basic cleanup here and re-enable + * advertising if necessary. + */ + hci_conn_del(conn); +unlock: hci_dev_unlock(hdev); } @@ -2977,7 +3062,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) case HCI_AUTO_CONN_ALWAYS: list_del_init(¶ms->action); list_add(¶ms->action, &hdev->pend_le_conns); - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); break; default: @@ -2987,14 +3072,6 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_disconn_cfm(conn, ev->reason); - /* The suspend notifier is waiting for all devices to disconnect so - * clear the bit from pending tasks and inform the wait queue. - */ - if (list_empty(&hdev->conn_hash.list) && - test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) { - wake_up(&hdev->suspend_wait_q); - } - /* Re-enable advertising if necessary, since it might * have been disabled by the connection. From the * HCI_LE_Set_Advertise_Enable command description in @@ -3007,7 +3084,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) */ if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { hdev->cur_adv_instance = conn->adv_instance; - hci_req_reenable_advertising(hdev); + hci_enable_advertising(hdev); } hci_conn_del(conn); @@ -3723,6 +3800,14 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, hci_cc_le_set_adv_set_random_addr(hdev, skb); break; + case HCI_OP_LE_REMOVE_ADV_SET: + hci_cc_le_remove_adv_set(hdev, skb); + break; + + case HCI_OP_LE_CLEAR_ADV_SETS: + hci_cc_le_clear_adv_sets(hdev, skb); + break; + case HCI_OP_LE_READ_TRANSMIT_POWER: hci_cc_le_read_transmit_power(hdev, skb); break; @@ -4445,7 +4530,6 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, { struct hci_ev_sync_conn_complete *ev = (void *) skb->data; struct hci_conn *conn; - unsigned int notify_evt; BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); @@ -4517,22 +4601,18 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, } bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode); - - switch (ev->air_mode) { - case 0x02: - notify_evt = HCI_NOTIFY_ENABLE_SCO_CVSD; - break; - case 0x03: - notify_evt = HCI_NOTIFY_ENABLE_SCO_TRANSP; - break; - } - /* Notify only in case of SCO over HCI transport data path which * is zero and non-zero value shall be non-HCI transport data path */ - if (conn->codec.data_path == 0) { - if (hdev->notify) - hdev->notify(hdev, notify_evt); + if (conn->codec.data_path == 0 && hdev->notify) { + switch (ev->air_mode) { + case 0x02: + hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); + break; + case 0x03: + hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP); + break; + } } hci_connect_cfm(conn, ev->status); @@ -5412,7 +5492,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, } unlock: - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); hci_dev_unlock(hdev); } @@ -5441,23 +5521,30 @@ static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, le16_to_cpu(ev->interval), le16_to_cpu(ev->latency), le16_to_cpu(ev->supervision_timeout)); - - if (use_ll_privacy(hdev) && - hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && - hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) - hci_req_disable_address_resolution(hdev); } static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data; struct hci_conn *conn; - struct adv_info *adv; + struct adv_info *adv, *n; BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); adv = hci_find_adv_instance(hdev, ev->handle); + /* The Bluetooth Core 5.3 specification clearly states that this event + * shall not be sent when the Host disables the advertising set. So in + * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event. + * + * When the Host disables an advertising set, all cleanup is done via + * its command callback and not needed to be duplicated here. + */ + if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) { + bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event"); + return; + } + if (ev->status) { if (!adv) return; @@ -5466,6 +5553,13 @@ static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_remove_adv_instance(hdev, ev->handle); mgmt_advertising_removed(NULL, hdev, ev->handle); + list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { + if (adv->enabled) + return; + } + + /* We are no longer advertising, clear HCI_LE_ADV */ + hci_dev_clear_flag(hdev, HCI_LE_ADV); return; } @@ -5529,8 +5623,9 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND) return NULL; - /* Ignore if the device is blocked */ - if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type)) + /* Ignore if the device is blocked or hdev is suspended */ + if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) || + hdev->suspended) return NULL; /* Most controller will fail if we try to create new connections @@ -5825,7 +5920,8 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) struct hci_ev_le_advertising_info *ev = ptr; s8 rssi; - if (ev->length <= HCI_MAX_AD_LENGTH) { + if (ev->length <= HCI_MAX_AD_LENGTH && + ev->data + ev->length <= skb_tail_pointer(skb)) { rssi = ev->data[ev->length]; process_adv_report(hdev, ev->evt_type, &ev->bdaddr, ev->bdaddr_type, NULL, 0, rssi, @@ -5835,6 +5931,11 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) } ptr += sizeof(*ev) + ev->length + 1; + + if (ptr > (void *) skb_tail_pointer(skb) - sizeof(*ev)) { + bt_dev_err(hdev, "Malicious advertising data. Stopping processing"); + break; + } } hci_dev_unlock(hdev); diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 92611bfc0b9e..8b3205e4b23e 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -32,10 +32,6 @@ #include "msft.h" #include "eir.h" -#define HCI_REQ_DONE 0 -#define HCI_REQ_PEND 1 -#define HCI_REQ_CANCELED 2 - void hci_req_init(struct hci_request *req, struct hci_dev *hdev) { skb_queue_head_init(&req->cmd_q); @@ -101,8 +97,8 @@ int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete) return req_run(req, NULL, complete); } -static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, - struct sk_buff *skb) +void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, + struct sk_buff *skb) { bt_dev_dbg(hdev, "result 0x%2.2x", result); @@ -126,70 +122,6 @@ void hci_req_sync_cancel(struct hci_dev *hdev, int err) } } -struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, - const void *param, u8 event, u32 timeout) -{ - struct hci_request req; - struct sk_buff *skb; - int err = 0; - - bt_dev_dbg(hdev, ""); - - hci_req_init(&req, hdev); - - hci_req_add_ev(&req, opcode, plen, param, event); - - hdev->req_status = HCI_REQ_PEND; - - err = hci_req_run_skb(&req, hci_req_sync_complete); - if (err < 0) - return ERR_PTR(err); - - err = wait_event_interruptible_timeout(hdev->req_wait_q, - hdev->req_status != HCI_REQ_PEND, timeout); - - if (err == -ERESTARTSYS) - return ERR_PTR(-EINTR); - - switch (hdev->req_status) { - case HCI_REQ_DONE: - err = -bt_to_errno(hdev->req_result); - break; - - case HCI_REQ_CANCELED: - err = -hdev->req_result; - break; - - default: - err = -ETIMEDOUT; - break; - } - - hdev->req_status = hdev->req_result = 0; - skb = hdev->req_skb; - hdev->req_skb = NULL; - - bt_dev_dbg(hdev, "end: err %d", err); - - if (err < 0) { - kfree_skb(skb); - return ERR_PTR(err); - } - - if (!skb) - return ERR_PTR(-ENODATA); - - return skb; -} -EXPORT_SYMBOL(__hci_cmd_sync_ev); - -struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, - const void *param, u32 timeout) -{ - return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout); -} -EXPORT_SYMBOL(__hci_cmd_sync); - /* Execute request and wait for completion. */ int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, unsigned long opt), @@ -436,82 +368,6 @@ static bool __hci_update_interleaved_scan(struct hci_dev *hdev) return false; } -/* This function controls the background scanning based on hdev->pend_le_conns - * list. If there are pending LE connection we start the background scanning, - * otherwise we stop it. - * - * This function requires the caller holds hdev->lock. - */ -static void __hci_update_background_scan(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - - if (!test_bit(HCI_UP, &hdev->flags) || - test_bit(HCI_INIT, &hdev->flags) || - hci_dev_test_flag(hdev, HCI_SETUP) || - hci_dev_test_flag(hdev, HCI_CONFIG) || - hci_dev_test_flag(hdev, HCI_AUTO_OFF) || - hci_dev_test_flag(hdev, HCI_UNREGISTER)) - return; - - /* No point in doing scanning if LE support hasn't been enabled */ - if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) - return; - - /* If discovery is active don't interfere with it */ - if (hdev->discovery.state != DISCOVERY_STOPPED) - return; - - /* Reset RSSI and UUID filters when starting background scanning - * since these filters are meant for service discovery only. - * - * The Start Discovery and Start Service Discovery operations - * ensure to set proper values for RSSI threshold and UUID - * filter list. So it is safe to just reset them here. - */ - hci_discovery_filter_clear(hdev); - - bt_dev_dbg(hdev, "ADV monitoring is %s", - hci_is_adv_monitoring(hdev) ? "on" : "off"); - - if (list_empty(&hdev->pend_le_conns) && - list_empty(&hdev->pend_le_reports) && - !hci_is_adv_monitoring(hdev)) { - /* If there is no pending LE connections or devices - * to be scanned for or no ADV monitors, we should stop the - * background scanning. - */ - - /* If controller is not scanning we are done. */ - if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) - return; - - hci_req_add_le_scan_disable(req, false); - - bt_dev_dbg(hdev, "stopping background scanning"); - } else { - /* If there is at least one pending LE connection, we should - * keep the background scan running. - */ - - /* If controller is connecting, we should not start scanning - * since some controllers are not able to scan and connect at - * the same time. - */ - if (hci_lookup_le_connect(hdev)) - return; - - /* If controller is currently scanning, we stop it to ensure we - * don't miss any advertising (due to duplicates filter). - */ - if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) - hci_req_add_le_scan_disable(req, false); - - hci_req_add_le_passive_scan(req); - bt_dev_dbg(hdev, "starting background scanning"); - } -} - void __hci_req_update_name(struct hci_request *req) { struct hci_dev *hdev = req->hdev; @@ -560,9 +416,6 @@ void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn) return; } - if (hdev->suspended) - set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); - if (use_ext_scan(hdev)) { struct hci_cp_le_set_ext_scan_enable cp; @@ -579,9 +432,7 @@ void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn) } /* Disable address resolution */ - if (use_ll_privacy(hdev) && - hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && - hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) { + if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) { __u8 enable = 0x00; hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); @@ -600,8 +451,7 @@ static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr, cp.bdaddr_type); hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp); - if (use_ll_privacy(req->hdev) && - hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) { + if (use_ll_privacy(req->hdev)) { struct smp_irk *irk; irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type); @@ -654,8 +504,7 @@ static int add_to_accept_list(struct hci_request *req, cp.bdaddr_type); hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp); - if (use_ll_privacy(hdev) && - hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) { + if (use_ll_privacy(hdev)) { struct smp_irk *irk; irk = hci_find_irk_by_addr(hdev, ¶ms->addr, @@ -694,8 +543,7 @@ static u8 update_accept_list(struct hci_request *req) */ bool allow_rpa = hdev->suspended; - if (use_ll_privacy(hdev) && - hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) + if (use_ll_privacy(hdev)) allow_rpa = true; /* Go through the current accept list programmed into the @@ -784,9 +632,7 @@ static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, return; } - if (use_ll_privacy(hdev) && - hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && - addr_resolv) { + if (use_ll_privacy(hdev) && addr_resolv) { u8 enable = 0x01; hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); @@ -943,8 +789,6 @@ void hci_req_add_le_passive_scan(struct hci_request *req) if (hdev->suspended) { window = hdev->le_scan_window_suspend; interval = hdev->le_scan_int_suspend; - - set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks); } else if (hci_is_le_conn_scanning(hdev)) { window = hdev->le_scan_window_connect; interval = hdev->le_scan_int_connect; @@ -977,59 +821,6 @@ void hci_req_add_le_passive_scan(struct hci_request *req) addr_resolv); } -static void hci_req_clear_event_filter(struct hci_request *req) -{ - struct hci_cp_set_event_filter f; - - if (!hci_dev_test_flag(req->hdev, HCI_BREDR_ENABLED)) - return; - - if (hci_dev_test_flag(req->hdev, HCI_EVENT_FILTER_CONFIGURED)) { - memset(&f, 0, sizeof(f)); - f.flt_type = HCI_FLT_CLEAR_ALL; - hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f); - } -} - -static void hci_req_set_event_filter(struct hci_request *req) -{ - struct bdaddr_list_with_flags *b; - struct hci_cp_set_event_filter f; - struct hci_dev *hdev = req->hdev; - u8 scan = SCAN_DISABLED; - bool scanning = test_bit(HCI_PSCAN, &hdev->flags); - - if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) - return; - - /* Always clear event filter when starting */ - hci_req_clear_event_filter(req); - - list_for_each_entry(b, &hdev->accept_list, list) { - if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP, - b->current_flags)) - continue; - - memset(&f, 0, sizeof(f)); - bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr); - f.flt_type = HCI_FLT_CONN_SETUP; - f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR; - f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON; - - bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr); - hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f); - scan = SCAN_PAGE; - } - - if (scan && !scanning) { - set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks); - hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); - } else if (!scan && scanning) { - set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); - hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); - } -} - static void cancel_adv_timeout(struct hci_dev *hdev) { if (hdev->adv_instance_timeout) { @@ -1088,185 +879,6 @@ int hci_req_resume_adv_instances(struct hci_dev *hdev) return hci_req_run(&req, NULL); } -static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode) -{ - bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode, - status); - if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) || - test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) { - clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks); - clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); - wake_up(&hdev->suspend_wait_q); - } - - if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) { - clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks); - wake_up(&hdev->suspend_wait_q); - } -} - -static void hci_req_prepare_adv_monitor_suspend(struct hci_request *req, - bool suspending) -{ - struct hci_dev *hdev = req->hdev; - - switch (hci_get_adv_monitor_offload_ext(hdev)) { - case HCI_ADV_MONITOR_EXT_MSFT: - if (suspending) - msft_suspend(hdev); - else - msft_resume(hdev); - break; - default: - return; - } - - /* No need to block when enabling since it's on resume path */ - if (hdev->suspended && suspending) - set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks); -} - -/* Call with hci_dev_lock */ -void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next) -{ - int old_state; - struct hci_conn *conn; - struct hci_request req; - u8 page_scan; - int disconnect_counter; - - if (next == hdev->suspend_state) { - bt_dev_dbg(hdev, "Same state before and after: %d", next); - goto done; - } - - hdev->suspend_state = next; - hci_req_init(&req, hdev); - - if (next == BT_SUSPEND_DISCONNECT) { - /* Mark device as suspended */ - hdev->suspended = true; - - /* Pause discovery if not already stopped */ - old_state = hdev->discovery.state; - if (old_state != DISCOVERY_STOPPED) { - set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks); - hci_discovery_set_state(hdev, DISCOVERY_STOPPING); - queue_work(hdev->req_workqueue, &hdev->discov_update); - } - - hdev->discovery_paused = true; - hdev->discovery_old_state = old_state; - - /* Stop directed advertising */ - old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING); - if (old_state) { - set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks); - cancel_delayed_work(&hdev->discov_off); - queue_delayed_work(hdev->req_workqueue, - &hdev->discov_off, 0); - } - - /* Pause other advertisements */ - if (hdev->adv_instance_cnt) - __hci_req_pause_adv_instances(&req); - - hdev->advertising_paused = true; - hdev->advertising_old_state = old_state; - - /* Disable page scan if enabled */ - if (test_bit(HCI_PSCAN, &hdev->flags)) { - page_scan = SCAN_DISABLED; - hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, - &page_scan); - set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); - } - - /* Disable LE passive scan if enabled */ - if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { - cancel_interleave_scan(hdev); - hci_req_add_le_scan_disable(&req, false); - } - - /* Disable advertisement filters */ - hci_req_prepare_adv_monitor_suspend(&req, true); - - /* Prevent disconnects from causing scanning to be re-enabled */ - hdev->scanning_paused = true; - - /* Run commands before disconnecting */ - hci_req_run(&req, suspend_req_complete); - - disconnect_counter = 0; - /* Soft disconnect everything (power off) */ - list_for_each_entry(conn, &hdev->conn_hash.list, list) { - hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF); - disconnect_counter++; - } - - if (disconnect_counter > 0) { - bt_dev_dbg(hdev, - "Had %d disconnects. Will wait on them", - disconnect_counter); - set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks); - } - } else if (next == BT_SUSPEND_CONFIGURE_WAKE) { - /* Unpause to take care of updating scanning params */ - hdev->scanning_paused = false; - /* Enable event filter for paired devices */ - hci_req_set_event_filter(&req); - /* Enable passive scan at lower duty cycle */ - __hci_update_background_scan(&req); - /* Pause scan changes again. */ - hdev->scanning_paused = true; - hci_req_run(&req, suspend_req_complete); - } else { - hdev->suspended = false; - hdev->scanning_paused = false; - - /* Clear any event filters and restore scan state */ - hci_req_clear_event_filter(&req); - __hci_req_update_scan(&req); - - /* Reset passive/background scanning to normal */ - __hci_update_background_scan(&req); - /* Enable all of the advertisement filters */ - hci_req_prepare_adv_monitor_suspend(&req, false); - - /* Unpause directed advertising */ - hdev->advertising_paused = false; - if (hdev->advertising_old_state) { - set_bit(SUSPEND_UNPAUSE_ADVERTISING, - hdev->suspend_tasks); - hci_dev_set_flag(hdev, HCI_ADVERTISING); - queue_work(hdev->req_workqueue, - &hdev->discoverable_update); - hdev->advertising_old_state = 0; - } - - /* Resume other advertisements */ - if (hdev->adv_instance_cnt) - __hci_req_resume_adv_instances(&req); - - /* Unpause discovery */ - hdev->discovery_paused = false; - if (hdev->discovery_old_state != DISCOVERY_STOPPED && - hdev->discovery_old_state != DISCOVERY_STOPPING) { - set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks); - hci_discovery_set_state(hdev, DISCOVERY_STARTING); - queue_work(hdev->req_workqueue, &hdev->discov_update); - } - - hci_req_run(&req, suspend_req_complete); - } - - hdev->suspend_state = next; - -done: - clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks); - wake_up(&hdev->suspend_wait_q); -} - static bool adv_cur_instance_is_scannable(struct hci_dev *hdev) { return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance); @@ -1548,8 +1160,7 @@ void hci_req_disable_address_resolution(struct hci_dev *hdev) struct hci_request req; __u8 enable = 0x00; - if (!use_ll_privacy(hdev) && - !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) + if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) return; hci_req_init(&req, hdev); @@ -1692,8 +1303,7 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, /* If Controller supports LL Privacy use own address type is * 0x03 */ - if (use_ll_privacy(hdev) && - hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) + if (use_ll_privacy(hdev)) *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; else *own_addr_type = ADDR_LE_DEV_RANDOM; @@ -1871,7 +1481,8 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp); - if (own_addr_type == ADDR_LE_DEV_RANDOM && + if ((own_addr_type == ADDR_LE_DEV_RANDOM || + own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) && bacmp(&random_addr, BDADDR_ANY)) { struct hci_cp_le_set_adv_set_rand_addr cp; @@ -2160,8 +1771,7 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy, /* If Controller supports LL Privacy use own address type is * 0x03 */ - if (use_ll_privacy(hdev) && - hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) + if (use_ll_privacy(hdev)) *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; else *own_addr_type = ADDR_LE_DEV_RANDOM; @@ -2301,47 +1911,6 @@ static void scan_update_work(struct work_struct *work) hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL); } -static int connectable_update(struct hci_request *req, unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - - hci_dev_lock(hdev); - - __hci_req_update_scan(req); - - /* If BR/EDR is not enabled and we disable advertising as a - * by-product of disabling connectable, we need to update the - * advertising flags. - */ - if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) - __hci_req_update_adv_data(req, hdev->cur_adv_instance); - - /* Update the advertising parameters if necessary */ - if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || - !list_empty(&hdev->adv_instances)) { - if (ext_adv_capable(hdev)) - __hci_req_start_ext_adv(req, hdev->cur_adv_instance); - else - __hci_req_enable_advertising(req); - } - - __hci_update_background_scan(req); - - hci_dev_unlock(hdev); - - return 0; -} - -static void connectable_update_work(struct work_struct *work) -{ - struct hci_dev *hdev = container_of(work, struct hci_dev, - connectable_update); - u8 status; - - hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status); - mgmt_set_connectable_complete(hdev, status); -} - static u8 get_service_classes(struct hci_dev *hdev) { struct bt_uuid *uuid; @@ -2445,16 +2014,6 @@ static int discoverable_update(struct hci_request *req, unsigned long opt) return 0; } -static void discoverable_update_work(struct work_struct *work) -{ - struct hci_dev *hdev = container_of(work, struct hci_dev, - discoverable_update); - u8 status; - - hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status); - mgmt_set_discoverable_complete(hdev, status); -} - void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn, u8 reason) { @@ -2548,35 +2107,6 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason) return 0; } -static int update_bg_scan(struct hci_request *req, unsigned long opt) -{ - hci_dev_lock(req->hdev); - __hci_update_background_scan(req); - hci_dev_unlock(req->hdev); - return 0; -} - -static void bg_scan_update(struct work_struct *work) -{ - struct hci_dev *hdev = container_of(work, struct hci_dev, - bg_scan_update); - struct hci_conn *conn; - u8 status; - int err; - - err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status); - if (!err) - return; - - hci_dev_lock(hdev); - - conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); - if (conn) - hci_le_conn_failed(conn, status); - - hci_dev_unlock(hdev); -} - static int le_scan_disable(struct hci_request *req, unsigned long opt) { hci_req_add_le_scan_disable(req, false); @@ -3163,10 +2693,7 @@ int __hci_req_hci_power_on(struct hci_dev *hdev) void hci_request_setup(struct hci_dev *hdev) { INIT_WORK(&hdev->discov_update, discov_update); - INIT_WORK(&hdev->bg_scan_update, bg_scan_update); INIT_WORK(&hdev->scan_update, scan_update_work); - INIT_WORK(&hdev->connectable_update, connectable_update_work); - INIT_WORK(&hdev->discoverable_update, discoverable_update_work); INIT_DELAYED_WORK(&hdev->discov_off, discov_off); INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work); INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work); @@ -3179,10 +2706,7 @@ void hci_request_cancel_all(struct hci_dev *hdev) hci_req_sync_cancel(hdev, ENODEV); cancel_work_sync(&hdev->discov_update); - cancel_work_sync(&hdev->bg_scan_update); cancel_work_sync(&hdev->scan_update); - cancel_work_sync(&hdev->connectable_update); - cancel_work_sync(&hdev->discoverable_update); cancel_delayed_work_sync(&hdev->discov_off); cancel_delayed_work_sync(&hdev->le_scan_disable); cancel_delayed_work_sync(&hdev->le_scan_restart); diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h index f31420f58525..5f8e8846ec74 100644 --- a/net/bluetooth/hci_request.h +++ b/net/bluetooth/hci_request.h @@ -22,9 +22,17 @@ #include <asm/unaligned.h> +#define HCI_REQ_DONE 0 +#define HCI_REQ_PEND 1 +#define HCI_REQ_CANCELED 2 + #define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock) #define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock) +#define HCI_REQ_DONE 0 +#define HCI_REQ_PEND 1 +#define HCI_REQ_CANCELED 2 + struct hci_request { struct hci_dev *hdev; struct sk_buff_head cmd_q; @@ -40,6 +48,8 @@ void hci_req_purge(struct hci_request *req); bool hci_req_status_pend(struct hci_dev *hdev); int hci_req_run(struct hci_request *req, hci_req_complete_t complete); int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete); +void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, + struct sk_buff *skb); void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, const void *param); void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, @@ -117,10 +127,5 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason); void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn, u8 reason); -static inline void hci_update_background_scan(struct hci_dev *hdev) -{ - queue_work(hdev->req_workqueue, &hdev->bg_scan_update); -} - void hci_request_setup(struct hci_dev *hdev); void hci_request_cancel_all(struct hci_dev *hdev); diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index d0dad1fafe07..446573a12571 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -889,10 +889,6 @@ static int hci_sock_release(struct socket *sock) } sock_orphan(sk); - - skb_queue_purge(&sk->sk_receive_queue); - skb_queue_purge(&sk->sk_write_queue); - release_sock(sk); sock_put(sk); return 0; @@ -2058,6 +2054,12 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, return err; } +static void hci_sock_destruct(struct sock *sk) +{ + skb_queue_purge(&sk->sk_receive_queue); + skb_queue_purge(&sk->sk_write_queue); +} + static const struct proto_ops hci_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, @@ -2111,6 +2113,7 @@ static int hci_sock_create(struct net *net, struct socket *sock, int protocol, sock->state = SS_UNCONNECTED; sk->sk_state = BT_OPEN; + sk->sk_destruct = hci_sock_destruct; bt_sock_link(&hci_sk_list, sk); return 0; diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c new file mode 100644 index 000000000000..ad86caf41f91 --- /dev/null +++ b/net/bluetooth/hci_sync.c @@ -0,0 +1,4922 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * BlueZ - Bluetooth protocol stack for Linux + * + * Copyright (C) 2021 Intel Corporation + */ + +#include <linux/property.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/mgmt.h> + +#include "hci_request.h" +#include "hci_debugfs.h" +#include "smp.h" +#include "eir.h" +#include "msft.h" +#include "aosp.h" +#include "leds.h" + +static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, + struct sk_buff *skb) +{ + bt_dev_dbg(hdev, "result 0x%2.2x", result); + + if (hdev->req_status != HCI_REQ_PEND) + return; + + hdev->req_result = result; + hdev->req_status = HCI_REQ_DONE; + + if (skb) { + struct sock *sk = hci_skb_sk(skb); + + /* Drop sk reference if set */ + if (sk) + sock_put(sk); + + hdev->req_skb = skb_get(skb); + } + + wake_up_interruptible(&hdev->req_wait_q); +} + +static struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, + u32 plen, const void *param, + struct sock *sk) +{ + int len = HCI_COMMAND_HDR_SIZE + plen; + struct hci_command_hdr *hdr; + struct sk_buff *skb; + + skb = bt_skb_alloc(len, GFP_ATOMIC); + if (!skb) + return NULL; + + hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE); + hdr->opcode = cpu_to_le16(opcode); + hdr->plen = plen; + + if (plen) + skb_put_data(skb, param, plen); + + bt_dev_dbg(hdev, "skb len %d", skb->len); + + hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; + hci_skb_opcode(skb) = opcode; + + /* Grab a reference if command needs to be associated with a sock (e.g. + * likely mgmt socket that initiated the command). + */ + if (sk) { + hci_skb_sk(skb) = sk; + sock_hold(sk); + } + + return skb; +} + +static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen, + const void *param, u8 event, struct sock *sk) +{ + struct hci_dev *hdev = req->hdev; + struct sk_buff *skb; + + bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); + + /* If an error occurred during request building, there is no point in + * queueing the HCI command. We can simply return. + */ + if (req->err) + return; + + skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk); + if (!skb) { + bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", + opcode); + req->err = -ENOMEM; + return; + } + + if (skb_queue_empty(&req->cmd_q)) + bt_cb(skb)->hci.req_flags |= HCI_REQ_START; + + bt_cb(skb)->hci.req_event = event; + + skb_queue_tail(&req->cmd_q, skb); +} + +static int hci_cmd_sync_run(struct hci_request *req) +{ + struct hci_dev *hdev = req->hdev; + struct sk_buff *skb; + unsigned long flags; + + bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q)); + + /* If an error occurred during request building, remove all HCI + * commands queued on the HCI request queue. + */ + if (req->err) { + skb_queue_purge(&req->cmd_q); + return req->err; + } + + /* Do not allow empty requests */ + if (skb_queue_empty(&req->cmd_q)) + return -ENODATA; + + skb = skb_peek_tail(&req->cmd_q); + bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete; + bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB; + + spin_lock_irqsave(&hdev->cmd_q.lock, flags); + skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); + spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); + + queue_work(hdev->workqueue, &hdev->cmd_work); + + return 0; +} + +/* This function requires the caller holds hdev->req_lock. */ +struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, u8 event, u32 timeout, + struct sock *sk) +{ + struct hci_request req; + struct sk_buff *skb; + int err = 0; + + bt_dev_dbg(hdev, "Opcode 0x%4x", opcode); + + hci_req_init(&req, hdev); + + hci_cmd_sync_add(&req, opcode, plen, param, event, sk); + + hdev->req_status = HCI_REQ_PEND; + + err = hci_cmd_sync_run(&req); + if (err < 0) + return ERR_PTR(err); + + err = wait_event_interruptible_timeout(hdev->req_wait_q, + hdev->req_status != HCI_REQ_PEND, + timeout); + + if (err == -ERESTARTSYS) + return ERR_PTR(-EINTR); + + switch (hdev->req_status) { + case HCI_REQ_DONE: + err = -bt_to_errno(hdev->req_result); + break; + + case HCI_REQ_CANCELED: + err = -hdev->req_result; + break; + + default: + err = -ETIMEDOUT; + break; + } + + hdev->req_status = 0; + hdev->req_result = 0; + skb = hdev->req_skb; + hdev->req_skb = NULL; + + bt_dev_dbg(hdev, "end: err %d", err); + + if (err < 0) { + kfree_skb(skb); + return ERR_PTR(err); + } + + return skb; +} +EXPORT_SYMBOL(__hci_cmd_sync_sk); + +/* This function requires the caller holds hdev->req_lock. */ +struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, u32 timeout) +{ + return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL); +} +EXPORT_SYMBOL(__hci_cmd_sync); + +/* Send HCI command and wait for command complete event */ +struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, u32 timeout) +{ + struct sk_buff *skb; + + if (!test_bit(HCI_UP, &hdev->flags)) + return ERR_PTR(-ENETDOWN); + + bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); + + hci_req_sync_lock(hdev); + skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout); + hci_req_sync_unlock(hdev); + + return skb; +} +EXPORT_SYMBOL(hci_cmd_sync); + +/* This function requires the caller holds hdev->req_lock. */ +struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, u8 event, u32 timeout) +{ + return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, + NULL); +} +EXPORT_SYMBOL(__hci_cmd_sync_ev); + +/* This function requires the caller holds hdev->req_lock. */ +int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, u8 event, u32 timeout, + struct sock *sk) +{ + struct sk_buff *skb; + u8 status; + + skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode, + PTR_ERR(skb)); + return PTR_ERR(skb); + } + + /* If command return a status event skb will be set to NULL as there are + * no parameters, in case of failure IS_ERR(skb) would have be set to + * the actual error would be found with PTR_ERR(skb). + */ + if (!skb) + return 0; + + status = skb->data[0]; + + kfree_skb(skb); + + return status; +} +EXPORT_SYMBOL(__hci_cmd_sync_status_sk); + +int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, u32 timeout) +{ + return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout, + NULL); +} +EXPORT_SYMBOL(__hci_cmd_sync_status); + +static void hci_cmd_sync_work(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work); + struct hci_cmd_sync_work_entry *entry; + hci_cmd_sync_work_func_t func; + hci_cmd_sync_work_destroy_t destroy; + void *data; + + bt_dev_dbg(hdev, ""); + + mutex_lock(&hdev->cmd_sync_work_lock); + entry = list_first_entry(&hdev->cmd_sync_work_list, + struct hci_cmd_sync_work_entry, list); + if (entry) { + list_del(&entry->list); + func = entry->func; + data = entry->data; + destroy = entry->destroy; + kfree(entry); + } else { + func = NULL; + data = NULL; + destroy = NULL; + } + mutex_unlock(&hdev->cmd_sync_work_lock); + + if (func) { + int err; + + hci_req_sync_lock(hdev); + + err = func(hdev, data); + + if (destroy) + destroy(hdev, data, err); + + hci_req_sync_unlock(hdev); + } +} + +void hci_cmd_sync_init(struct hci_dev *hdev) +{ + INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work); + INIT_LIST_HEAD(&hdev->cmd_sync_work_list); + mutex_init(&hdev->cmd_sync_work_lock); +} + +void hci_cmd_sync_clear(struct hci_dev *hdev) +{ + struct hci_cmd_sync_work_entry *entry, *tmp; + + cancel_work_sync(&hdev->cmd_sync_work); + + list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) { + if (entry->destroy) + entry->destroy(hdev, entry->data, -ECANCELED); + + list_del(&entry->list); + kfree(entry); + } +} + +int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + struct hci_cmd_sync_work_entry *entry; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + entry->func = func; + entry->data = data; + entry->destroy = destroy; + + mutex_lock(&hdev->cmd_sync_work_lock); + list_add_tail(&entry->list, &hdev->cmd_sync_work_list); + mutex_unlock(&hdev->cmd_sync_work_lock); + + queue_work(hdev->req_workqueue, &hdev->cmd_sync_work); + + return 0; +} +EXPORT_SYMBOL(hci_cmd_sync_queue); + +int hci_update_eir_sync(struct hci_dev *hdev) +{ + struct hci_cp_write_eir cp; + + bt_dev_dbg(hdev, ""); + + if (!hdev_is_powered(hdev)) + return 0; + + if (!lmp_ext_inq_capable(hdev)) + return 0; + + if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) + return 0; + + if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) + return 0; + + memset(&cp, 0, sizeof(cp)); + + eir_create(hdev, cp.data); + + if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) + return 0; + + memcpy(hdev->eir, cp.data, sizeof(cp.data)); + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp, + HCI_CMD_TIMEOUT); +} + +static u8 get_service_classes(struct hci_dev *hdev) +{ + struct bt_uuid *uuid; + u8 val = 0; + + list_for_each_entry(uuid, &hdev->uuids, list) + val |= uuid->svc_hint; + + return val; +} + +int hci_update_class_sync(struct hci_dev *hdev) +{ + u8 cod[3]; + + bt_dev_dbg(hdev, ""); + + if (!hdev_is_powered(hdev)) + return 0; + + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + return 0; + + if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) + return 0; + + cod[0] = hdev->minor_class; + cod[1] = hdev->major_class; + cod[2] = get_service_classes(hdev); + + if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) + cod[1] |= 0x20; + + if (memcmp(cod, hdev->dev_class, 3) == 0) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CLASS_OF_DEV, + sizeof(cod), cod, HCI_CMD_TIMEOUT); +} + +static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable) +{ + /* If there is no connection we are OK to advertise. */ + if (hci_conn_num(hdev, LE_LINK) == 0) + return true; + + /* Check le_states if there is any connection in peripheral role. */ + if (hdev->conn_hash.le_num_peripheral > 0) { + /* Peripheral connection state and non connectable mode + * bit 20. + */ + if (!connectable && !(hdev->le_states[2] & 0x10)) + return false; + + /* Peripheral connection state and connectable mode bit 38 + * and scannable bit 21. + */ + if (connectable && (!(hdev->le_states[4] & 0x40) || + !(hdev->le_states[2] & 0x20))) + return false; + } + + /* Check le_states if there is any connection in central role. */ + if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) { + /* Central connection state and non connectable mode bit 18. */ + if (!connectable && !(hdev->le_states[2] & 0x02)) + return false; + + /* Central connection state and connectable mode bit 35 and + * scannable 19. + */ + if (connectable && (!(hdev->le_states[4] & 0x08) || + !(hdev->le_states[2] & 0x08))) + return false; + } + + return true; +} + +static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags) +{ + /* If privacy is not enabled don't use RPA */ + if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) + return false; + + /* If basic privacy mode is enabled use RPA */ + if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) + return true; + + /* If limited privacy mode is enabled don't use RPA if we're + * both discoverable and bondable. + */ + if ((flags & MGMT_ADV_FLAG_DISCOV) && + hci_dev_test_flag(hdev, HCI_BONDABLE)) + return false; + + /* We're neither bondable nor discoverable in the limited + * privacy mode, therefore use RPA. + */ + return true; +} + +static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa) +{ + /* If we're advertising or initiating an LE connection we can't + * go ahead and change the random address at this time. This is + * because the eventual initiator address used for the + * subsequently created connection will be undefined (some + * controllers use the new address and others the one we had + * when the operation started). + * + * In this kind of scenario skip the update and let the random + * address be updated at the next cycle. + */ + if (hci_dev_test_flag(hdev, HCI_LE_ADV) || + hci_lookup_le_connect(hdev)) { + bt_dev_dbg(hdev, "Deferring random address update"); + hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); + return 0; + } + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RANDOM_ADDR, + 6, rpa, HCI_CMD_TIMEOUT); +} + +int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy, + bool rpa, u8 *own_addr_type) +{ + int err; + + /* If privacy is enabled use a resolvable private address. If + * current RPA has expired or there is something else than + * the current RPA in use, then generate a new one. + */ + if (rpa) { + /* If Controller supports LL Privacy use own address type is + * 0x03 + */ + if (use_ll_privacy(hdev)) + *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; + else + *own_addr_type = ADDR_LE_DEV_RANDOM; + + /* Check if RPA is valid */ + if (rpa_valid(hdev)) + return 0; + + err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); + if (err < 0) { + bt_dev_err(hdev, "failed to generate new RPA"); + return err; + } + + err = hci_set_random_addr_sync(hdev, &hdev->rpa); + if (err) + return err; + + return 0; + } + + /* In case of required privacy without resolvable private address, + * use an non-resolvable private address. This is useful for active + * scanning and non-connectable advertising. + */ + if (require_privacy) { + bdaddr_t nrpa; + + while (true) { + /* The non-resolvable private address is generated + * from random six bytes with the two most significant + * bits cleared. + */ + get_random_bytes(&nrpa, 6); + nrpa.b[5] &= 0x3f; + + /* The non-resolvable private address shall not be + * equal to the public address. + */ + if (bacmp(&hdev->bdaddr, &nrpa)) + break; + } + + *own_addr_type = ADDR_LE_DEV_RANDOM; + + return hci_set_random_addr_sync(hdev, &nrpa); + } + + /* If forcing static address is in use or there is no public + * address use the static address as random address (but skip + * the HCI command if the current random address is already the + * static one. + * + * In case BR/EDR has been disabled on a dual-mode controller + * and a static address has been configured, then use that + * address instead of the public BR/EDR address. + */ + if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || + !bacmp(&hdev->bdaddr, BDADDR_ANY) || + (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && + bacmp(&hdev->static_addr, BDADDR_ANY))) { + *own_addr_type = ADDR_LE_DEV_RANDOM; + if (bacmp(&hdev->static_addr, &hdev->random_addr)) + return hci_set_random_addr_sync(hdev, + &hdev->static_addr); + return 0; + } + + /* Neither privacy nor static address is being used so use a + * public address. + */ + *own_addr_type = ADDR_LE_DEV_PUBLIC; + + return 0; +} + +static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) +{ + struct hci_cp_le_set_ext_adv_enable *cp; + struct hci_cp_ext_adv_set *set; + u8 data[sizeof(*cp) + sizeof(*set) * 1]; + u8 size; + + /* If request specifies an instance that doesn't exist, fail */ + if (instance > 0) { + struct adv_info *adv; + + adv = hci_find_adv_instance(hdev, instance); + if (!adv) + return -EINVAL; + + /* If not enabled there is nothing to do */ + if (!adv->enabled) + return 0; + } + + memset(data, 0, sizeof(data)); + + cp = (void *)data; + set = (void *)cp->data; + + /* Instance 0x00 indicates all advertising instances will be disabled */ + cp->num_of_sets = !!instance; + cp->enable = 0x00; + + set->handle = instance; + + size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, + size, data, HCI_CMD_TIMEOUT); +} + +static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance, + bdaddr_t *random_addr) +{ + struct hci_cp_le_set_adv_set_rand_addr cp; + int err; + + if (!instance) { + /* Instance 0x00 doesn't have an adv_info, instead it uses + * hdev->random_addr to track its address so whenever it needs + * to be updated this also set the random address since + * hdev->random_addr is shared with scan state machine. + */ + err = hci_set_random_addr_sync(hdev, random_addr); + if (err) + return err; + } + + memset(&cp, 0, sizeof(cp)); + + cp.handle = instance; + bacpy(&cp.bdaddr, random_addr); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) +{ + struct hci_cp_le_set_ext_adv_params cp; + bool connectable; + u32 flags; + bdaddr_t random_addr; + u8 own_addr_type; + int err; + struct adv_info *adv; + bool secondary_adv; + + if (instance > 0) { + adv = hci_find_adv_instance(hdev, instance); + if (!adv) + return -EINVAL; + } else { + adv = NULL; + } + + /* Updating parameters of an active instance will return a + * Command Disallowed error, so we must first disable the + * instance if it is active. + */ + if (adv && !adv->pending) { + err = hci_disable_ext_adv_instance_sync(hdev, instance); + if (err) + return err; + } + + flags = hci_adv_instance_flags(hdev, instance); + + /* If the "connectable" instance flag was not set, then choose between + * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. + */ + connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || + mgmt_get_connectable(hdev); + + if (!is_advertising_allowed(hdev, connectable)) + return -EPERM; + + /* Set require_privacy to true only when non-connectable + * advertising is used. In that case it is fine to use a + * non-resolvable private address. + */ + err = hci_get_random_address(hdev, !connectable, + adv_use_rpa(hdev, flags), adv, + &own_addr_type, &random_addr); + if (err < 0) + return err; + + memset(&cp, 0, sizeof(cp)); + + if (adv) { + hci_cpu_to_le24(adv->min_interval, cp.min_interval); + hci_cpu_to_le24(adv->max_interval, cp.max_interval); + cp.tx_power = adv->tx_power; + } else { + hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval); + hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval); + cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE; + } + + secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK); + + if (connectable) { + if (secondary_adv) + cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND); + else + cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND); + } else if (hci_adv_instance_is_scannable(hdev, instance) || + (flags & MGMT_ADV_PARAM_SCAN_RSP)) { + if (secondary_adv) + cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND); + else + cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND); + } else { + if (secondary_adv) + cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND); + else + cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND); + } + + /* If Own_Address_Type equals 0x02 or 0x03, the Peer_Address parameter + * contains the peer’s Identity Address and the Peer_Address_Type + * parameter contains the peer’s Identity Type (i.e., 0x00 or 0x01). + * These parameters are used to locate the corresponding local IRK in + * the resolving list; this IRK is used to generate their own address + * used in the advertisement. + */ + if (own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) + hci_copy_identity_address(hdev, &cp.peer_addr, + &cp.peer_addr_type); + + cp.own_addr_type = own_addr_type; + cp.channel_map = hdev->le_adv_channel_map; + cp.handle = instance; + + if (flags & MGMT_ADV_FLAG_SEC_2M) { + cp.primary_phy = HCI_ADV_PHY_1M; + cp.secondary_phy = HCI_ADV_PHY_2M; + } else if (flags & MGMT_ADV_FLAG_SEC_CODED) { + cp.primary_phy = HCI_ADV_PHY_CODED; + cp.secondary_phy = HCI_ADV_PHY_CODED; + } else { + /* In all other cases use 1M */ + cp.primary_phy = HCI_ADV_PHY_1M; + cp.secondary_phy = HCI_ADV_PHY_1M; + } + + err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + if (err) + return err; + + if ((own_addr_type == ADDR_LE_DEV_RANDOM || + own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) && + bacmp(&random_addr, BDADDR_ANY)) { + /* Check if random address need to be updated */ + if (adv) { + if (!bacmp(&random_addr, &adv->random_addr)) + return 0; + } else { + if (!bacmp(&random_addr, &hdev->random_addr)) + return 0; + } + + return hci_set_adv_set_random_addr_sync(hdev, instance, + &random_addr); + } + + return 0; +} + +static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) +{ + struct { + struct hci_cp_le_set_ext_scan_rsp_data cp; + u8 data[HCI_MAX_EXT_AD_LENGTH]; + } pdu; + u8 len; + + memset(&pdu, 0, sizeof(pdu)); + + len = eir_create_scan_rsp(hdev, instance, pdu.data); + + if (hdev->scan_rsp_data_len == len && + !memcmp(pdu.data, hdev->scan_rsp_data, len)) + return 0; + + memcpy(hdev->scan_rsp_data, pdu.data, len); + hdev->scan_rsp_data_len = len; + + pdu.cp.handle = instance; + pdu.cp.length = len; + pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; + pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, + sizeof(pdu.cp) + len, &pdu.cp, + HCI_CMD_TIMEOUT); +} + +static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) +{ + struct hci_cp_le_set_scan_rsp_data cp; + u8 len; + + memset(&cp, 0, sizeof(cp)); + + len = eir_create_scan_rsp(hdev, instance, cp.data); + + if (hdev->scan_rsp_data_len == len && + !memcmp(cp.data, hdev->scan_rsp_data, len)) + return 0; + + memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); + hdev->scan_rsp_data_len = len; + + cp.length = len; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_RSP_DATA, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) +{ + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + return 0; + + if (ext_adv_capable(hdev)) + return hci_set_ext_scan_rsp_data_sync(hdev, instance); + + return __hci_set_scan_rsp_data_sync(hdev, instance); +} + +int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance) +{ + struct hci_cp_le_set_ext_adv_enable *cp; + struct hci_cp_ext_adv_set *set; + u8 data[sizeof(*cp) + sizeof(*set) * 1]; + struct adv_info *adv; + + if (instance > 0) { + adv = hci_find_adv_instance(hdev, instance); + if (!adv) + return -EINVAL; + /* If already enabled there is nothing to do */ + if (adv->enabled) + return 0; + } else { + adv = NULL; + } + + cp = (void *)data; + set = (void *)cp->data; + + memset(cp, 0, sizeof(*cp)); + + cp->enable = 0x01; + cp->num_of_sets = 0x01; + + memset(set, 0, sizeof(*set)); + + set->handle = instance; + + /* Set duration per instance since controller is responsible for + * scheduling it. + */ + if (adv && adv->timeout) { + u16 duration = adv->timeout * MSEC_PER_SEC; + + /* Time = N * 10 ms */ + set->duration = cpu_to_le16(duration / 10); + } + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, + sizeof(*cp) + + sizeof(*set) * cp->num_of_sets, + data, HCI_CMD_TIMEOUT); +} + +int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance) +{ + int err; + + err = hci_setup_ext_adv_instance_sync(hdev, instance); + if (err) + return err; + + err = hci_set_ext_scan_rsp_data_sync(hdev, instance); + if (err) + return err; + + return hci_enable_ext_advertising_sync(hdev, instance); +} + +static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance) +{ + int err; + + if (ext_adv_capable(hdev)) + return hci_start_ext_adv_sync(hdev, instance); + + err = hci_update_adv_data_sync(hdev, instance); + if (err) + return err; + + err = hci_update_scan_rsp_data_sync(hdev, instance); + if (err) + return err; + + return hci_enable_advertising_sync(hdev); +} + +int hci_enable_advertising_sync(struct hci_dev *hdev) +{ + struct adv_info *adv_instance; + struct hci_cp_le_set_adv_param cp; + u8 own_addr_type, enable = 0x01; + bool connectable; + u16 adv_min_interval, adv_max_interval; + u32 flags; + u8 status; + + if (ext_adv_capable(hdev)) + return hci_enable_ext_advertising_sync(hdev, + hdev->cur_adv_instance); + + flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance); + adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance); + + /* If the "connectable" instance flag was not set, then choose between + * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. + */ + connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || + mgmt_get_connectable(hdev); + + if (!is_advertising_allowed(hdev, connectable)) + return -EINVAL; + + status = hci_disable_advertising_sync(hdev); + if (status) + return status; + + /* Clear the HCI_LE_ADV bit temporarily so that the + * hci_update_random_address knows that it's safe to go ahead + * and write a new random address. The flag will be set back on + * as soon as the SET_ADV_ENABLE HCI command completes. + */ + hci_dev_clear_flag(hdev, HCI_LE_ADV); + + /* Set require_privacy to true only when non-connectable + * advertising is used. In that case it is fine to use a + * non-resolvable private address. + */ + status = hci_update_random_address_sync(hdev, !connectable, + adv_use_rpa(hdev, flags), + &own_addr_type); + if (status) + return status; + + memset(&cp, 0, sizeof(cp)); + + if (adv_instance) { + adv_min_interval = adv_instance->min_interval; + adv_max_interval = adv_instance->max_interval; + } else { + adv_min_interval = hdev->le_adv_min_interval; + adv_max_interval = hdev->le_adv_max_interval; + } + + if (connectable) { + cp.type = LE_ADV_IND; + } else { + if (hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance)) + cp.type = LE_ADV_SCAN_IND; + else + cp.type = LE_ADV_NONCONN_IND; + + if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) || + hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { + adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN; + adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX; + } + } + + cp.min_interval = cpu_to_le16(adv_min_interval); + cp.max_interval = cpu_to_le16(adv_max_interval); + cp.own_address_type = own_addr_type; + cp.channel_map = hdev->le_adv_channel_map; + + status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + if (status) + return status; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, + sizeof(enable), &enable, HCI_CMD_TIMEOUT); +} + +static int enable_advertising_sync(struct hci_dev *hdev, void *data) +{ + return hci_enable_advertising_sync(hdev); +} + +int hci_enable_advertising(struct hci_dev *hdev) +{ + if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && + list_empty(&hdev->adv_instances)) + return 0; + + return hci_cmd_sync_queue(hdev, enable_advertising_sync, NULL, NULL); +} + +int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance, + struct sock *sk) +{ + int err; + + if (!ext_adv_capable(hdev)) + return 0; + + err = hci_disable_ext_adv_instance_sync(hdev, instance); + if (err) + return err; + + /* If request specifies an instance that doesn't exist, fail */ + if (instance > 0 && !hci_find_adv_instance(hdev, instance)) + return -EINVAL; + + return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_REMOVE_ADV_SET, + sizeof(instance), &instance, 0, + HCI_CMD_TIMEOUT, sk); +} + +static void cancel_adv_timeout(struct hci_dev *hdev) +{ + if (hdev->adv_instance_timeout) { + hdev->adv_instance_timeout = 0; + cancel_delayed_work(&hdev->adv_instance_expire); + } +} + +static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance) +{ + struct { + struct hci_cp_le_set_ext_adv_data cp; + u8 data[HCI_MAX_EXT_AD_LENGTH]; + } pdu; + u8 len; + + memset(&pdu, 0, sizeof(pdu)); + + len = eir_create_adv_data(hdev, instance, pdu.data); + + /* There's nothing to do if the data hasn't changed */ + if (hdev->adv_data_len == len && + memcmp(pdu.data, hdev->adv_data, len) == 0) + return 0; + + memcpy(hdev->adv_data, pdu.data, len); + hdev->adv_data_len = len; + + pdu.cp.length = len; + pdu.cp.handle = instance; + pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; + pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA, + sizeof(pdu.cp) + len, &pdu.cp, + HCI_CMD_TIMEOUT); +} + +static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance) +{ + struct hci_cp_le_set_adv_data cp; + u8 len; + + memset(&cp, 0, sizeof(cp)); + + len = eir_create_adv_data(hdev, instance, cp.data); + + /* There's nothing to do if the data hasn't changed */ + if (hdev->adv_data_len == len && + memcmp(cp.data, hdev->adv_data, len) == 0) + return 0; + + memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); + hdev->adv_data_len = len; + + cp.length = len; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance) +{ + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + return 0; + + if (ext_adv_capable(hdev)) + return hci_set_ext_adv_data_sync(hdev, instance); + + return hci_set_adv_data_sync(hdev, instance); +} + +int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance, + bool force) +{ + struct adv_info *adv = NULL; + u16 timeout; + + if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && !ext_adv_capable(hdev)) + return -EPERM; + + if (hdev->adv_instance_timeout) + return -EBUSY; + + adv = hci_find_adv_instance(hdev, instance); + if (!adv) + return -ENOENT; + + /* A zero timeout means unlimited advertising. As long as there is + * only one instance, duration should be ignored. We still set a timeout + * in case further instances are being added later on. + * + * If the remaining lifetime of the instance is more than the duration + * then the timeout corresponds to the duration, otherwise it will be + * reduced to the remaining instance lifetime. + */ + if (adv->timeout == 0 || adv->duration <= adv->remaining_time) + timeout = adv->duration; + else + timeout = adv->remaining_time; + + /* The remaining time is being reduced unless the instance is being + * advertised without time limit. + */ + if (adv->timeout) + adv->remaining_time = adv->remaining_time - timeout; + + /* Only use work for scheduling instances with legacy advertising */ + if (!ext_adv_capable(hdev)) { + hdev->adv_instance_timeout = timeout; + queue_delayed_work(hdev->req_workqueue, + &hdev->adv_instance_expire, + msecs_to_jiffies(timeout * 1000)); + } + + /* If we're just re-scheduling the same instance again then do not + * execute any HCI commands. This happens when a single instance is + * being advertised. + */ + if (!force && hdev->cur_adv_instance == instance && + hci_dev_test_flag(hdev, HCI_LE_ADV)) + return 0; + + hdev->cur_adv_instance = instance; + + return hci_start_adv_sync(hdev, instance); +} + +static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk) +{ + int err; + + if (!ext_adv_capable(hdev)) + return 0; + + /* Disable instance 0x00 to disable all instances */ + err = hci_disable_ext_adv_instance_sync(hdev, 0x00); + if (err) + return err; + + return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CLEAR_ADV_SETS, + 0, NULL, 0, HCI_CMD_TIMEOUT, sk); +} + +static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force) +{ + struct adv_info *adv, *n; + + if (ext_adv_capable(hdev)) + /* Remove all existing sets */ + return hci_clear_adv_sets_sync(hdev, sk); + + /* This is safe as long as there is no command send while the lock is + * held. + */ + hci_dev_lock(hdev); + + /* Cleanup non-ext instances */ + list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { + u8 instance = adv->instance; + int err; + + if (!(force || adv->timeout)) + continue; + + err = hci_remove_adv_instance(hdev, instance); + if (!err) + mgmt_advertising_removed(sk, hdev, instance); + } + + hci_dev_unlock(hdev); + + return 0; +} + +static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance, + struct sock *sk) +{ + int err; + + /* If we use extended advertising, instance has to be removed first. */ + if (ext_adv_capable(hdev)) + return hci_remove_ext_adv_instance_sync(hdev, instance, sk); + + /* This is safe as long as there is no command send while the lock is + * held. + */ + hci_dev_lock(hdev); + + err = hci_remove_adv_instance(hdev, instance); + if (!err) + mgmt_advertising_removed(sk, hdev, instance); + + hci_dev_unlock(hdev); + + return err; +} + +/* For a single instance: + * - force == true: The instance will be removed even when its remaining + * lifetime is not zero. + * - force == false: the instance will be deactivated but kept stored unless + * the remaining lifetime is zero. + * + * For instance == 0x00: + * - force == true: All instances will be removed regardless of their timeout + * setting. + * - force == false: Only instances that have a timeout will be removed. + */ +int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk, + u8 instance, bool force) +{ + struct adv_info *next = NULL; + int err; + + /* Cancel any timeout concerning the removed instance(s). */ + if (!instance || hdev->cur_adv_instance == instance) + cancel_adv_timeout(hdev); + + /* Get the next instance to advertise BEFORE we remove + * the current one. This can be the same instance again + * if there is only one instance. + */ + if (hdev->cur_adv_instance == instance) + next = hci_get_next_instance(hdev, instance); + + if (!instance) { + err = hci_clear_adv_sync(hdev, sk, force); + if (err) + return err; + } else { + struct adv_info *adv = hci_find_adv_instance(hdev, instance); + + if (force || (adv && adv->timeout && !adv->remaining_time)) { + /* Don't advertise a removed instance. */ + if (next && next->instance == instance) + next = NULL; + + err = hci_remove_adv_sync(hdev, instance, sk); + if (err) + return err; + } + } + + if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING)) + return 0; + + if (next && !ext_adv_capable(hdev)) + hci_schedule_adv_instance_sync(hdev, next->instance, false); + + return 0; +} + +int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle) +{ + struct hci_cp_read_rssi cp; + + cp.handle = handle; + return __hci_cmd_sync_status(hdev, HCI_OP_READ_RSSI, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLOCK, + sizeof(*cp), cp, HCI_CMD_TIMEOUT); +} + +int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type) +{ + struct hci_cp_read_tx_power cp; + + cp.handle = handle; + cp.type = type; + return __hci_cmd_sync_status(hdev, HCI_OP_READ_TX_POWER, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_disable_advertising_sync(struct hci_dev *hdev) +{ + u8 enable = 0x00; + + /* If controller is not advertising we are done. */ + if (!hci_dev_test_flag(hdev, HCI_LE_ADV)) + return 0; + + if (ext_adv_capable(hdev)) + return hci_disable_ext_adv_instance_sync(hdev, 0x00); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, + sizeof(enable), &enable, HCI_CMD_TIMEOUT); +} + +static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val, + u8 filter_dup) +{ + struct hci_cp_le_set_ext_scan_enable cp; + + memset(&cp, 0, sizeof(cp)); + cp.enable = val; + cp.filter_dup = filter_dup; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, + u8 filter_dup) +{ + struct hci_cp_le_set_scan_enable cp; + + if (use_ext_scan(hdev)) + return hci_le_set_ext_scan_enable_sync(hdev, val, filter_dup); + + memset(&cp, 0, sizeof(cp)); + cp.enable = val; + cp.filter_dup = filter_dup; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val) +{ + if (!use_ll_privacy(hdev)) + return 0; + + /* If controller is not/already resolving we are done. */ + if (val == hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, + sizeof(val), &val, HCI_CMD_TIMEOUT); +} + +static int hci_scan_disable_sync(struct hci_dev *hdev) +{ + int err; + + /* If controller is not scanning we are done. */ + if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) + return 0; + + if (hdev->scanning_paused) { + bt_dev_dbg(hdev, "Scanning is paused for suspend"); + return 0; + } + + err = hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00); + if (err) { + bt_dev_err(hdev, "Unable to disable scanning: %d", err); + return err; + } + + return err; +} + +static bool scan_use_rpa(struct hci_dev *hdev) +{ + return hci_dev_test_flag(hdev, HCI_PRIVACY); +} + +static void hci_start_interleave_scan(struct hci_dev *hdev) +{ + hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; + queue_delayed_work(hdev->req_workqueue, + &hdev->interleave_scan, 0); +} + +static bool is_interleave_scanning(struct hci_dev *hdev) +{ + return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; +} + +static void cancel_interleave_scan(struct hci_dev *hdev) +{ + bt_dev_dbg(hdev, "cancelling interleave scan"); + + cancel_delayed_work_sync(&hdev->interleave_scan); + + hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE; +} + +/* Return true if interleave_scan wasn't started until exiting this function, + * otherwise, return false + */ +static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev) +{ + /* Do interleaved scan only if all of the following are true: + * - There is at least one ADV monitor + * - At least one pending LE connection or one device to be scanned for + * - Monitor offloading is not supported + * If so, we should alternate between allowlist scan and one without + * any filters to save power. + */ + bool use_interleaving = hci_is_adv_monitoring(hdev) && + !(list_empty(&hdev->pend_le_conns) && + list_empty(&hdev->pend_le_reports)) && + hci_get_adv_monitor_offload_ext(hdev) == + HCI_ADV_MONITOR_EXT_NONE; + bool is_interleaving = is_interleave_scanning(hdev); + + if (use_interleaving && !is_interleaving) { + hci_start_interleave_scan(hdev); + bt_dev_dbg(hdev, "starting interleave scan"); + return true; + } + + if (!use_interleaving && is_interleaving) + cancel_interleave_scan(hdev); + + return false; +} + +/* Removes connection to resolve list if needed.*/ +static int hci_le_del_resolve_list_sync(struct hci_dev *hdev, + bdaddr_t *bdaddr, u8 bdaddr_type) +{ + struct hci_cp_le_del_from_resolv_list cp; + struct bdaddr_list_with_irk *entry; + + if (!use_ll_privacy(hdev)) + return 0; + + /* Check if the IRK has been programmed */ + entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, bdaddr, + bdaddr_type); + if (!entry) + return 0; + + cp.bdaddr_type = bdaddr_type; + bacpy(&cp.bdaddr, bdaddr); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_le_del_accept_list_sync(struct hci_dev *hdev, + bdaddr_t *bdaddr, u8 bdaddr_type) +{ + struct hci_cp_le_del_from_accept_list cp; + int err; + + /* Check if device is on accept list before removing it */ + if (!hci_bdaddr_list_lookup(&hdev->le_accept_list, bdaddr, bdaddr_type)) + return 0; + + cp.bdaddr_type = bdaddr_type; + bacpy(&cp.bdaddr, bdaddr); + + /* Ignore errors when removing from resolving list as that is likely + * that the device was never added. + */ + hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type); + + err = __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + if (err) { + bt_dev_err(hdev, "Unable to remove from allow list: %d", err); + return err; + } + + bt_dev_dbg(hdev, "Remove %pMR (0x%x) from allow list", &cp.bdaddr, + cp.bdaddr_type); + + return 0; +} + +/* Adds connection to resolve list if needed. + * Setting params to NULL programs local hdev->irk + */ +static int hci_le_add_resolve_list_sync(struct hci_dev *hdev, + struct hci_conn_params *params) +{ + struct hci_cp_le_add_to_resolv_list cp; + struct smp_irk *irk; + struct bdaddr_list_with_irk *entry; + + if (!use_ll_privacy(hdev)) + return 0; + + /* Attempt to program local identity address, type and irk if params is + * NULL. + */ + if (!params) { + if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) + return 0; + + hci_copy_identity_address(hdev, &cp.bdaddr, &cp.bdaddr_type); + memcpy(cp.peer_irk, hdev->irk, 16); + goto done; + } + + irk = hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type); + if (!irk) + return 0; + + /* Check if the IK has _not_ been programmed yet. */ + entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, + ¶ms->addr, + params->addr_type); + if (entry) + return 0; + + cp.bdaddr_type = params->addr_type; + bacpy(&cp.bdaddr, ¶ms->addr); + memcpy(cp.peer_irk, irk->val, 16); + +done: + if (hci_dev_test_flag(hdev, HCI_PRIVACY)) + memcpy(cp.local_irk, hdev->irk, 16); + else + memset(cp.local_irk, 0, 16); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +/* Adds connection to allow list if needed, if the device uses RPA (has IRK) + * this attempts to program the device in the resolving list as well. + */ +static int hci_le_add_accept_list_sync(struct hci_dev *hdev, + struct hci_conn_params *params, + u8 *num_entries) +{ + struct hci_cp_le_add_to_accept_list cp; + int err; + + /* Already in accept list */ + if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr, + params->addr_type)) + return 0; + + /* Select filter policy to accept all advertising */ + if (*num_entries >= hdev->le_accept_list_size) + return -ENOSPC; + + /* Accept list can not be used with RPAs */ + if (!use_ll_privacy(hdev) && + hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) { + return -EINVAL; + } + + /* During suspend, only wakeable devices can be in acceptlist */ + if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP, + params->current_flags)) + return 0; + + /* Attempt to program the device in the resolving list first to avoid + * having to rollback in case it fails since the resolving list is + * dynamic it can probably be smaller than the accept list. + */ + err = hci_le_add_resolve_list_sync(hdev, params); + if (err) { + bt_dev_err(hdev, "Unable to add to resolve list: %d", err); + return err; + } + + *num_entries += 1; + cp.bdaddr_type = params->addr_type; + bacpy(&cp.bdaddr, ¶ms->addr); + + err = __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + if (err) { + bt_dev_err(hdev, "Unable to add to allow list: %d", err); + /* Rollback the device from the resolving list */ + hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type); + return err; + } + + bt_dev_dbg(hdev, "Add %pMR (0x%x) to allow list", &cp.bdaddr, + cp.bdaddr_type); + + return 0; +} + +/* This function disables/pause all advertising instances */ +static int hci_pause_advertising_sync(struct hci_dev *hdev) +{ + int err; + int old_state; + + /* If there are no instances or advertising has already been paused + * there is nothing to do. + */ + if (!hdev->adv_instance_cnt || hdev->advertising_paused) + return 0; + + bt_dev_dbg(hdev, "Pausing directed advertising"); + + /* Stop directed advertising */ + old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING); + if (old_state) { + /* When discoverable timeout triggers, then just make sure + * the limited discoverable flag is cleared. Even in the case + * of a timeout triggered from general discoverable, it is + * safe to unconditionally clear the flag. + */ + hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); + hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); + hdev->discov_timeout = 0; + } + + bt_dev_dbg(hdev, "Pausing advertising instances"); + + /* Call to disable any advertisements active on the controller. + * This will succeed even if no advertisements are configured. + */ + err = hci_disable_advertising_sync(hdev); + if (err) + return err; + + /* If we are using software rotation, pause the loop */ + if (!ext_adv_capable(hdev)) + cancel_adv_timeout(hdev); + + hdev->advertising_paused = true; + hdev->advertising_old_state = old_state; + + return 0; +} + +/* This function enables all user advertising instances */ +static int hci_resume_advertising_sync(struct hci_dev *hdev) +{ + struct adv_info *adv, *tmp; + int err; + + /* If advertising has not been paused there is nothing to do. */ + if (!hdev->advertising_paused) + return 0; + + /* Resume directed advertising */ + hdev->advertising_paused = false; + if (hdev->advertising_old_state) { + hci_dev_set_flag(hdev, HCI_ADVERTISING); + hdev->advertising_old_state = 0; + } + + bt_dev_dbg(hdev, "Resuming advertising instances"); + + if (ext_adv_capable(hdev)) { + /* Call for each tracked instance to be re-enabled */ + list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) { + err = hci_enable_ext_advertising_sync(hdev, + adv->instance); + if (!err) + continue; + + /* If the instance cannot be resumed remove it */ + hci_remove_ext_adv_instance_sync(hdev, adv->instance, + NULL); + } + } else { + /* Schedule for most recent instance to be restarted and begin + * the software rotation loop + */ + err = hci_schedule_adv_instance_sync(hdev, + hdev->cur_adv_instance, + true); + } + + hdev->advertising_paused = false; + + return err; +} + +struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev, + bool extended, struct sock *sk) +{ + u16 opcode = extended ? HCI_OP_READ_LOCAL_OOB_EXT_DATA : + HCI_OP_READ_LOCAL_OOB_DATA; + + return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk); +} + +/* Device must not be scanning when updating the accept list. + * + * Update is done using the following sequence: + * + * use_ll_privacy((Disable Advertising) -> Disable Resolving List) -> + * Remove Devices From Accept List -> + * (has IRK && use_ll_privacy(Remove Devices From Resolving List))-> + * Add Devices to Accept List -> + * (has IRK && use_ll_privacy(Remove Devices From Resolving List)) -> + * use_ll_privacy(Enable Resolving List -> (Enable Advertising)) -> + * Enable Scanning + * + * In case of failure advertising shall be restored to its original state and + * return would disable accept list since either accept or resolving list could + * not be programmed. + * + */ +static u8 hci_update_accept_list_sync(struct hci_dev *hdev) +{ + struct hci_conn_params *params; + struct bdaddr_list *b, *t; + u8 num_entries = 0; + bool pend_conn, pend_report; + int err; + + /* Pause advertising if resolving list can be used as controllers are + * cannot accept resolving list modifications while advertising. + */ + if (use_ll_privacy(hdev)) { + err = hci_pause_advertising_sync(hdev); + if (err) { + bt_dev_err(hdev, "pause advertising failed: %d", err); + return 0x00; + } + } + + /* Disable address resolution while reprogramming accept list since + * devices that do have an IRK will be programmed in the resolving list + * when LL Privacy is enabled. + */ + err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00); + if (err) { + bt_dev_err(hdev, "Unable to disable LL privacy: %d", err); + goto done; + } + + /* Go through the current accept list programmed into the + * controller one by one and check if that address is still + * in the list of pending connections or list of devices to + * report. If not present in either list, then remove it from + * the controller. + */ + list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) { + pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns, + &b->bdaddr, + b->bdaddr_type); + pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports, + &b->bdaddr, + b->bdaddr_type); + + /* If the device is not likely to connect or report, + * remove it from the acceptlist. + */ + if (!pend_conn && !pend_report) { + hci_le_del_accept_list_sync(hdev, &b->bdaddr, + b->bdaddr_type); + continue; + } + + num_entries++; + } + + /* Since all no longer valid accept list entries have been + * removed, walk through the list of pending connections + * and ensure that any new device gets programmed into + * the controller. + * + * If the list of the devices is larger than the list of + * available accept list entries in the controller, then + * just abort and return filer policy value to not use the + * accept list. + */ + list_for_each_entry(params, &hdev->pend_le_conns, action) { + err = hci_le_add_accept_list_sync(hdev, params, &num_entries); + if (err) + goto done; + } + + /* After adding all new pending connections, walk through + * the list of pending reports and also add these to the + * accept list if there is still space. Abort if space runs out. + */ + list_for_each_entry(params, &hdev->pend_le_reports, action) { + err = hci_le_add_accept_list_sync(hdev, params, &num_entries); + if (err) + goto done; + } + + /* Use the allowlist unless the following conditions are all true: + * - We are not currently suspending + * - There are 1 or more ADV monitors registered and it's not offloaded + * - Interleaved scanning is not currently using the allowlist + */ + if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended && + hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE && + hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST) + err = -EINVAL; + +done: + /* Enable address resolution when LL Privacy is enabled. */ + err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01); + if (err) + bt_dev_err(hdev, "Unable to enable LL privacy: %d", err); + + /* Resume advertising if it was paused */ + if (use_ll_privacy(hdev)) + hci_resume_advertising_sync(hdev); + + /* Select filter policy to use accept list */ + return err ? 0x00 : 0x01; +} + +/* Returns true if an le connection is in the scanning state */ +static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type == LE_LINK && c->state == BT_CONNECT && + test_bit(HCI_CONN_SCANNING, &c->flags)) { + rcu_read_unlock(); + return true; + } + } + + rcu_read_unlock(); + + return false; +} + +static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type, + u16 interval, u16 window, + u8 own_addr_type, u8 filter_policy) +{ + struct hci_cp_le_set_ext_scan_params *cp; + struct hci_cp_le_scan_phy_params *phy; + u8 data[sizeof(*cp) + sizeof(*phy) * 2]; + u8 num_phy = 0; + + cp = (void *)data; + phy = (void *)cp->data; + + memset(data, 0, sizeof(data)); + + cp->own_addr_type = own_addr_type; + cp->filter_policy = filter_policy; + + if (scan_1m(hdev) || scan_2m(hdev)) { + cp->scanning_phys |= LE_SCAN_PHY_1M; + + phy->type = type; + phy->interval = cpu_to_le16(interval); + phy->window = cpu_to_le16(window); + + num_phy++; + phy++; + } + + if (scan_coded(hdev)) { + cp->scanning_phys |= LE_SCAN_PHY_CODED; + + phy->type = type; + phy->interval = cpu_to_le16(interval); + phy->window = cpu_to_le16(window); + + num_phy++; + phy++; + } + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS, + sizeof(*cp) + sizeof(*phy) * num_phy, + data, HCI_CMD_TIMEOUT); +} + +static int hci_le_set_scan_param_sync(struct hci_dev *hdev, u8 type, + u16 interval, u16 window, + u8 own_addr_type, u8 filter_policy) +{ + struct hci_cp_le_set_scan_param cp; + + if (use_ext_scan(hdev)) + return hci_le_set_ext_scan_param_sync(hdev, type, interval, + window, own_addr_type, + filter_policy); + + memset(&cp, 0, sizeof(cp)); + cp.type = type; + cp.interval = cpu_to_le16(interval); + cp.window = cpu_to_le16(window); + cp.own_address_type = own_addr_type; + cp.filter_policy = filter_policy; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_PARAM, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_start_scan_sync(struct hci_dev *hdev, u8 type, u16 interval, + u16 window, u8 own_addr_type, u8 filter_policy, + u8 filter_dup) +{ + int err; + + if (hdev->scanning_paused) { + bt_dev_dbg(hdev, "Scanning is paused for suspend"); + return 0; + } + + err = hci_le_set_scan_param_sync(hdev, type, interval, window, + own_addr_type, filter_policy); + if (err) + return err; + + return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, filter_dup); +} + +static int hci_passive_scan_sync(struct hci_dev *hdev) +{ + u8 own_addr_type; + u8 filter_policy; + u16 window, interval; + int err; + + if (hdev->scanning_paused) { + bt_dev_dbg(hdev, "Scanning is paused for suspend"); + return 0; + } + + err = hci_scan_disable_sync(hdev); + if (err) { + bt_dev_err(hdev, "disable scanning failed: %d", err); + return err; + } + + /* Set require_privacy to false since no SCAN_REQ are send + * during passive scanning. Not using an non-resolvable address + * here is important so that peer devices using direct + * advertising with our address will be correctly reported + * by the controller. + */ + if (hci_update_random_address_sync(hdev, false, scan_use_rpa(hdev), + &own_addr_type)) + return 0; + + if (hdev->enable_advmon_interleave_scan && + hci_update_interleaved_scan_sync(hdev)) + return 0; + + bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state); + + /* Adding or removing entries from the accept list must + * happen before enabling scanning. The controller does + * not allow accept list modification while scanning. + */ + filter_policy = hci_update_accept_list_sync(hdev); + + /* When the controller is using random resolvable addresses and + * with that having LE privacy enabled, then controllers with + * Extended Scanner Filter Policies support can now enable support + * for handling directed advertising. + * + * So instead of using filter polices 0x00 (no acceptlist) + * and 0x01 (acceptlist enabled) use the new filter policies + * 0x02 (no acceptlist) and 0x03 (acceptlist enabled). + */ + if (hci_dev_test_flag(hdev, HCI_PRIVACY) && + (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) + filter_policy |= 0x02; + + if (hdev->suspended) { + window = hdev->le_scan_window_suspend; + interval = hdev->le_scan_int_suspend; + } else if (hci_is_le_conn_scanning(hdev)) { + window = hdev->le_scan_window_connect; + interval = hdev->le_scan_int_connect; + } else if (hci_is_adv_monitoring(hdev)) { + window = hdev->le_scan_window_adv_monitor; + interval = hdev->le_scan_int_adv_monitor; + } else { + window = hdev->le_scan_window; + interval = hdev->le_scan_interval; + } + + bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy); + + return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window, + own_addr_type, filter_policy, + LE_SCAN_FILTER_DUP_ENABLE); +} + +/* This function controls the passive scanning based on hdev->pend_le_conns + * list. If there are pending LE connection we start the background scanning, + * otherwise we stop it in the following sequence: + * + * If there are devices to scan: + * + * Disable Scanning -> Update Accept List -> + * use_ll_privacy((Disable Advertising) -> Disable Resolving List -> + * Update Resolving List -> Enable Resolving List -> (Enable Advertising)) -> + * Enable Scanning + * + * Otherwise: + * + * Disable Scanning + */ +int hci_update_passive_scan_sync(struct hci_dev *hdev) +{ + int err; + + if (!test_bit(HCI_UP, &hdev->flags) || + test_bit(HCI_INIT, &hdev->flags) || + hci_dev_test_flag(hdev, HCI_SETUP) || + hci_dev_test_flag(hdev, HCI_CONFIG) || + hci_dev_test_flag(hdev, HCI_AUTO_OFF) || + hci_dev_test_flag(hdev, HCI_UNREGISTER)) + return 0; + + /* No point in doing scanning if LE support hasn't been enabled */ + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + return 0; + + /* If discovery is active don't interfere with it */ + if (hdev->discovery.state != DISCOVERY_STOPPED) + return 0; + + /* Reset RSSI and UUID filters when starting background scanning + * since these filters are meant for service discovery only. + * + * The Start Discovery and Start Service Discovery operations + * ensure to set proper values for RSSI threshold and UUID + * filter list. So it is safe to just reset them here. + */ + hci_discovery_filter_clear(hdev); + + bt_dev_dbg(hdev, "ADV monitoring is %s", + hci_is_adv_monitoring(hdev) ? "on" : "off"); + + if (list_empty(&hdev->pend_le_conns) && + list_empty(&hdev->pend_le_reports) && + !hci_is_adv_monitoring(hdev)) { + /* If there is no pending LE connections or devices + * to be scanned for or no ADV monitors, we should stop the + * background scanning. + */ + + bt_dev_dbg(hdev, "stopping background scanning"); + + err = hci_scan_disable_sync(hdev); + if (err) + bt_dev_err(hdev, "stop background scanning failed: %d", + err); + } else { + /* If there is at least one pending LE connection, we should + * keep the background scan running. + */ + + /* If controller is connecting, we should not start scanning + * since some controllers are not able to scan and connect at + * the same time. + */ + if (hci_lookup_le_connect(hdev)) + return 0; + + bt_dev_dbg(hdev, "start background scanning"); + + err = hci_passive_scan_sync(hdev); + if (err) + bt_dev_err(hdev, "start background scanning failed: %d", + err); + } + + return err; +} + +static int update_passive_scan_sync(struct hci_dev *hdev, void *data) +{ + return hci_update_passive_scan_sync(hdev); +} + +int hci_update_passive_scan(struct hci_dev *hdev) +{ + /* Only queue if it would have any effect */ + if (!test_bit(HCI_UP, &hdev->flags) || + test_bit(HCI_INIT, &hdev->flags) || + hci_dev_test_flag(hdev, HCI_SETUP) || + hci_dev_test_flag(hdev, HCI_CONFIG) || + hci_dev_test_flag(hdev, HCI_AUTO_OFF) || + hci_dev_test_flag(hdev, HCI_UNREGISTER)) + return 0; + + return hci_cmd_sync_queue(hdev, update_passive_scan_sync, NULL, NULL); +} + +int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val) +{ + int err; + + if (!bredr_sc_enabled(hdev) || lmp_host_sc_capable(hdev)) + return 0; + + err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT, + sizeof(val), &val, HCI_CMD_TIMEOUT); + + if (!err) { + if (val) { + hdev->features[1][0] |= LMP_HOST_SC; + hci_dev_set_flag(hdev, HCI_SC_ENABLED); + } else { + hdev->features[1][0] &= ~LMP_HOST_SC; + hci_dev_clear_flag(hdev, HCI_SC_ENABLED); + } + } + + return err; +} + +int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode) +{ + int err; + + if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) || + lmp_host_ssp_capable(hdev)) + return 0; + + if (!mode && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) { + __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, + sizeof(mode), &mode, HCI_CMD_TIMEOUT); + } + + err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE, + sizeof(mode), &mode, HCI_CMD_TIMEOUT); + if (err) + return err; + + return hci_write_sc_support_sync(hdev, 0x01); +} + +int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul) +{ + struct hci_cp_write_le_host_supported cp; + + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) || + !lmp_bredr_capable(hdev)) + return 0; + + /* Check first if we already have the right host state + * (host features set) + */ + if (le == lmp_host_le_capable(hdev) && + simul == lmp_host_le_br_capable(hdev)) + return 0; + + memset(&cp, 0, sizeof(cp)); + + cp.le = le; + cp.simul = simul; + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_powered_update_adv_sync(struct hci_dev *hdev) +{ + struct adv_info *adv, *tmp; + int err; + + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + return 0; + + /* If RPA Resolution has not been enable yet it means the + * resolving list is empty and we should attempt to program the + * local IRK in order to support using own_addr_type + * ADDR_LE_DEV_RANDOM_RESOLVED (0x03). + */ + if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) { + hci_le_add_resolve_list_sync(hdev, NULL); + hci_le_set_addr_resolution_enable_sync(hdev, 0x01); + } + + /* Make sure the controller has a good default for + * advertising data. This also applies to the case + * where BR/EDR was toggled during the AUTO_OFF phase. + */ + if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || + list_empty(&hdev->adv_instances)) { + if (ext_adv_capable(hdev)) { + err = hci_setup_ext_adv_instance_sync(hdev, 0x00); + if (!err) + hci_update_scan_rsp_data_sync(hdev, 0x00); + } else { + err = hci_update_adv_data_sync(hdev, 0x00); + if (!err) + hci_update_scan_rsp_data_sync(hdev, 0x00); + } + + if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) + hci_enable_advertising_sync(hdev); + } + + /* Call for each tracked instance to be scheduled */ + list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) + hci_schedule_adv_instance_sync(hdev, adv->instance, true); + + return 0; +} + +static int hci_write_auth_enable_sync(struct hci_dev *hdev) +{ + u8 link_sec; + + link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY); + if (link_sec == test_bit(HCI_AUTH, &hdev->flags)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE, + sizeof(link_sec), &link_sec, + HCI_CMD_TIMEOUT); +} + +int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable) +{ + struct hci_cp_write_page_scan_activity cp; + u8 type; + int err = 0; + + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + return 0; + + if (hdev->hci_ver < BLUETOOTH_VER_1_2) + return 0; + + memset(&cp, 0, sizeof(cp)); + + if (enable) { + type = PAGE_SCAN_TYPE_INTERLACED; + + /* 160 msec page scan interval */ + cp.interval = cpu_to_le16(0x0100); + } else { + type = hdev->def_page_scan_type; + cp.interval = cpu_to_le16(hdev->def_page_scan_int); + } + + cp.window = cpu_to_le16(hdev->def_page_scan_window); + + if (__cpu_to_le16(hdev->page_scan_interval) != cp.interval || + __cpu_to_le16(hdev->page_scan_window) != cp.window) { + err = __hci_cmd_sync_status(hdev, + HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + if (err) + return err; + } + + if (hdev->page_scan_type != type) + err = __hci_cmd_sync_status(hdev, + HCI_OP_WRITE_PAGE_SCAN_TYPE, + sizeof(type), &type, + HCI_CMD_TIMEOUT); + + return err; +} + +static bool disconnected_accept_list_entries(struct hci_dev *hdev) +{ + struct bdaddr_list *b; + + list_for_each_entry(b, &hdev->accept_list, list) { + struct hci_conn *conn; + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr); + if (!conn) + return true; + + if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) + return true; + } + + return false; +} + +static int hci_write_scan_enable_sync(struct hci_dev *hdev, u8 val) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE, + sizeof(val), &val, + HCI_CMD_TIMEOUT); +} + +int hci_update_scan_sync(struct hci_dev *hdev) +{ + u8 scan; + + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + return 0; + + if (!hdev_is_powered(hdev)) + return 0; + + if (mgmt_powering_down(hdev)) + return 0; + + if (hdev->scanning_paused) + return 0; + + if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) || + disconnected_accept_list_entries(hdev)) + scan = SCAN_PAGE; + else + scan = SCAN_DISABLED; + + if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) + scan |= SCAN_INQUIRY; + + if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) && + test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY)) + return 0; + + return hci_write_scan_enable_sync(hdev, scan); +} + +int hci_update_name_sync(struct hci_dev *hdev) +{ + struct hci_cp_write_local_name cp; + + memset(&cp, 0, sizeof(cp)); + + memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME, + sizeof(cp), &cp, + HCI_CMD_TIMEOUT); +} + +/* This function perform powered update HCI command sequence after the HCI init + * sequence which end up resetting all states, the sequence is as follows: + * + * HCI_SSP_ENABLED(Enable SSP) + * HCI_LE_ENABLED(Enable LE) + * HCI_LE_ENABLED(use_ll_privacy(Add local IRK to Resolving List) -> + * Update adv data) + * Enable Authentication + * lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class -> + * Set Name -> Set EIR) + */ +int hci_powered_update_sync(struct hci_dev *hdev) +{ + int err; + + /* Register the available SMP channels (BR/EDR and LE) only when + * successfully powering on the controller. This late + * registration is required so that LE SMP can clearly decide if + * the public address or static address is used. + */ + smp_register(hdev); + + err = hci_write_ssp_mode_sync(hdev, 0x01); + if (err) + return err; + + err = hci_write_le_host_supported_sync(hdev, 0x01, 0x00); + if (err) + return err; + + err = hci_powered_update_adv_sync(hdev); + if (err) + return err; + + err = hci_write_auth_enable_sync(hdev); + if (err) + return err; + + if (lmp_bredr_capable(hdev)) { + if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) + hci_write_fast_connectable_sync(hdev, true); + else + hci_write_fast_connectable_sync(hdev, false); + hci_update_scan_sync(hdev); + hci_update_class_sync(hdev); + hci_update_name_sync(hdev); + hci_update_eir_sync(hdev); + } + + return 0; +} + +/** + * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address + * (BD_ADDR) for a HCI device from + * a firmware node property. + * @hdev: The HCI device + * + * Search the firmware node for 'local-bd-address'. + * + * All-zero BD addresses are rejected, because those could be properties + * that exist in the firmware tables, but were not updated by the firmware. For + * example, the DTS could define 'local-bd-address', with zero BD addresses. + */ +static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev) +{ + struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent); + bdaddr_t ba; + int ret; + + ret = fwnode_property_read_u8_array(fwnode, "local-bd-address", + (u8 *)&ba, sizeof(ba)); + if (ret < 0 || !bacmp(&ba, BDADDR_ANY)) + return; + + bacpy(&hdev->public_addr, &ba); +} + +struct hci_init_stage { + int (*func)(struct hci_dev *hdev); +}; + +/* Run init stage NULL terminated function table */ +static int hci_init_stage_sync(struct hci_dev *hdev, + const struct hci_init_stage *stage) +{ + size_t i; + + for (i = 0; stage[i].func; i++) { + int err; + + err = stage[i].func(hdev); + if (err) + return err; + } + + return 0; +} + +/* Read Local Version */ +static int hci_read_local_version_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_VERSION, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read BD Address */ +static int hci_read_bd_addr_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_BD_ADDR, + 0, NULL, HCI_CMD_TIMEOUT); +} + +#define HCI_INIT(_func) \ +{ \ + .func = _func, \ +} + +static const struct hci_init_stage hci_init0[] = { + /* HCI_OP_READ_LOCAL_VERSION */ + HCI_INIT(hci_read_local_version_sync), + /* HCI_OP_READ_BD_ADDR */ + HCI_INIT(hci_read_bd_addr_sync), + {} +}; + +int hci_reset_sync(struct hci_dev *hdev) +{ + int err; + + set_bit(HCI_RESET, &hdev->flags); + + err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL, + HCI_CMD_TIMEOUT); + if (err) + return err; + + return 0; +} + +static int hci_init0_sync(struct hci_dev *hdev) +{ + int err; + + bt_dev_dbg(hdev, ""); + + /* Reset */ + if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { + err = hci_reset_sync(hdev); + if (err) + return err; + } + + return hci_init_stage_sync(hdev, hci_init0); +} + +static int hci_unconf_init_sync(struct hci_dev *hdev) +{ + int err; + + if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) + return 0; + + err = hci_init0_sync(hdev); + if (err < 0) + return err; + + if (hci_dev_test_flag(hdev, HCI_SETUP)) + hci_debugfs_create_basic(hdev); + + return 0; +} + +/* Read Local Supported Features. */ +static int hci_read_local_features_sync(struct hci_dev *hdev) +{ + /* Not all AMP controllers support this command */ + if (hdev->dev_type == HCI_AMP && !(hdev->commands[14] & 0x20)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* BR Controller init stage 1 command sequence */ +static const struct hci_init_stage br_init1[] = { + /* HCI_OP_READ_LOCAL_FEATURES */ + HCI_INIT(hci_read_local_features_sync), + /* HCI_OP_READ_LOCAL_VERSION */ + HCI_INIT(hci_read_local_version_sync), + /* HCI_OP_READ_BD_ADDR */ + HCI_INIT(hci_read_bd_addr_sync), + {} +}; + +/* Read Local Commands */ +static int hci_read_local_cmds_sync(struct hci_dev *hdev) +{ + /* All Bluetooth 1.2 and later controllers should support the + * HCI command for reading the local supported commands. + * + * Unfortunately some controllers indicate Bluetooth 1.2 support, + * but do not have support for this command. If that is the case, + * the driver can quirk the behavior and skip reading the local + * supported commands. + */ + if (hdev->hci_ver > BLUETOOTH_VER_1_1 && + !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks)) + return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS, + 0, NULL, HCI_CMD_TIMEOUT); + + return 0; +} + +/* Read Local AMP Info */ +static int hci_read_local_amp_info_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_AMP_INFO, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read Data Blk size */ +static int hci_read_data_block_size_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read Flow Control Mode */ +static int hci_read_flow_control_mode_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read Location Data */ +static int hci_read_location_data_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCATION_DATA, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* AMP Controller init stage 1 command sequence */ +static const struct hci_init_stage amp_init1[] = { + /* HCI_OP_READ_LOCAL_VERSION */ + HCI_INIT(hci_read_local_version_sync), + /* HCI_OP_READ_LOCAL_COMMANDS */ + HCI_INIT(hci_read_local_cmds_sync), + /* HCI_OP_READ_LOCAL_AMP_INFO */ + HCI_INIT(hci_read_local_amp_info_sync), + /* HCI_OP_READ_DATA_BLOCK_SIZE */ + HCI_INIT(hci_read_data_block_size_sync), + /* HCI_OP_READ_FLOW_CONTROL_MODE */ + HCI_INIT(hci_read_flow_control_mode_sync), + /* HCI_OP_READ_LOCATION_DATA */ + HCI_INIT(hci_read_location_data_sync), +}; + +static int hci_init1_sync(struct hci_dev *hdev) +{ + int err; + + bt_dev_dbg(hdev, ""); + + /* Reset */ + if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { + err = hci_reset_sync(hdev); + if (err) + return err; + } + + switch (hdev->dev_type) { + case HCI_PRIMARY: + hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; + return hci_init_stage_sync(hdev, br_init1); + case HCI_AMP: + hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; + return hci_init_stage_sync(hdev, amp_init1); + default: + bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type); + break; + } + + return 0; +} + +/* AMP Controller init stage 2 command sequence */ +static const struct hci_init_stage amp_init2[] = { + /* HCI_OP_READ_LOCAL_FEATURES */ + HCI_INIT(hci_read_local_features_sync), +}; + +/* Read Buffer Size (ACL mtu, max pkt, etc.) */ +static int hci_read_buffer_size_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read Class of Device */ +static int hci_read_dev_class_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLASS_OF_DEV, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read Local Name */ +static int hci_read_local_name_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_NAME, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read Voice Setting */ +static int hci_read_voice_setting_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read Number of Supported IAC */ +static int hci_read_num_supported_iac_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_NUM_SUPPORTED_IAC, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read Current IAC LAP */ +static int hci_read_current_iac_lap_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_CURRENT_IAC_LAP, + 0, NULL, HCI_CMD_TIMEOUT); +} + +static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type, + u8 cond_type, bdaddr_t *bdaddr, + u8 auto_accept) +{ + struct hci_cp_set_event_filter cp; + + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + return 0; + + memset(&cp, 0, sizeof(cp)); + cp.flt_type = flt_type; + + if (flt_type != HCI_FLT_CLEAR_ALL) { + cp.cond_type = cond_type; + bacpy(&cp.addr_conn_flt.bdaddr, bdaddr); + cp.addr_conn_flt.auto_accept = auto_accept; + } + + return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_FLT, + flt_type == HCI_FLT_CLEAR_ALL ? + sizeof(cp.flt_type) : sizeof(cp), &cp, + HCI_CMD_TIMEOUT); +} + +static int hci_clear_event_filter_sync(struct hci_dev *hdev) +{ + if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED)) + return 0; + + return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, 0x00, + BDADDR_ANY, 0x00); +} + +/* Connection accept timeout ~20 secs */ +static int hci_write_ca_timeout_sync(struct hci_dev *hdev) +{ + __le16 param = cpu_to_le16(0x7d00); + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CA_TIMEOUT, + sizeof(param), ¶m, HCI_CMD_TIMEOUT); +} + +/* BR Controller init stage 2 command sequence */ +static const struct hci_init_stage br_init2[] = { + /* HCI_OP_READ_BUFFER_SIZE */ + HCI_INIT(hci_read_buffer_size_sync), + /* HCI_OP_READ_CLASS_OF_DEV */ + HCI_INIT(hci_read_dev_class_sync), + /* HCI_OP_READ_LOCAL_NAME */ + HCI_INIT(hci_read_local_name_sync), + /* HCI_OP_READ_VOICE_SETTING */ + HCI_INIT(hci_read_voice_setting_sync), + /* HCI_OP_READ_NUM_SUPPORTED_IAC */ + HCI_INIT(hci_read_num_supported_iac_sync), + /* HCI_OP_READ_CURRENT_IAC_LAP */ + HCI_INIT(hci_read_current_iac_lap_sync), + /* HCI_OP_SET_EVENT_FLT */ + HCI_INIT(hci_clear_event_filter_sync), + /* HCI_OP_WRITE_CA_TIMEOUT */ + HCI_INIT(hci_write_ca_timeout_sync), + {} +}; + +static int hci_write_ssp_mode_1_sync(struct hci_dev *hdev) +{ + u8 mode = 0x01; + + if (!lmp_ssp_capable(hdev) || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) + return 0; + + /* When SSP is available, then the host features page + * should also be available as well. However some + * controllers list the max_page as 0 as long as SSP + * has not been enabled. To achieve proper debugging + * output, force the minimum max_page to 1 at least. + */ + hdev->max_page = 0x01; + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE, + sizeof(mode), &mode, HCI_CMD_TIMEOUT); +} + +static int hci_write_eir_sync(struct hci_dev *hdev) +{ + struct hci_cp_write_eir cp; + + if (!lmp_ssp_capable(hdev) || hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) + return 0; + + memset(hdev->eir, 0, sizeof(hdev->eir)); + memset(&cp, 0, sizeof(cp)); + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp, + HCI_CMD_TIMEOUT); +} + +static int hci_write_inquiry_mode_sync(struct hci_dev *hdev) +{ + u8 mode; + + if (!lmp_inq_rssi_capable(hdev) && + !test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) + return 0; + + /* If Extended Inquiry Result events are supported, then + * they are clearly preferred over Inquiry Result with RSSI + * events. + */ + mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01; + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_INQUIRY_MODE, + sizeof(mode), &mode, HCI_CMD_TIMEOUT); +} + +static int hci_read_inq_rsp_tx_power_sync(struct hci_dev *hdev) +{ + if (!lmp_inq_tx_pwr_capable(hdev)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, + 0, NULL, HCI_CMD_TIMEOUT); +} + +static int hci_read_local_ext_features_sync(struct hci_dev *hdev, u8 page) +{ + struct hci_cp_read_local_ext_features cp; + + if (!lmp_ext_feat_capable(hdev)) + return 0; + + memset(&cp, 0, sizeof(cp)); + cp.page = page; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_read_local_ext_features_1_sync(struct hci_dev *hdev) +{ + return hci_read_local_ext_features_sync(hdev, 0x01); +} + +/* HCI Controller init stage 2 command sequence */ +static const struct hci_init_stage hci_init2[] = { + /* HCI_OP_READ_LOCAL_COMMANDS */ + HCI_INIT(hci_read_local_cmds_sync), + /* HCI_OP_WRITE_SSP_MODE */ + HCI_INIT(hci_write_ssp_mode_1_sync), + /* HCI_OP_WRITE_EIR */ + HCI_INIT(hci_write_eir_sync), + /* HCI_OP_WRITE_INQUIRY_MODE */ + HCI_INIT(hci_write_inquiry_mode_sync), + /* HCI_OP_READ_INQ_RSP_TX_POWER */ + HCI_INIT(hci_read_inq_rsp_tx_power_sync), + /* HCI_OP_READ_LOCAL_EXT_FEATURES */ + HCI_INIT(hci_read_local_ext_features_1_sync), + /* HCI_OP_WRITE_AUTH_ENABLE */ + HCI_INIT(hci_write_auth_enable_sync), + {} +}; + +/* Read LE Buffer Size */ +static int hci_le_read_buffer_size_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read LE Local Supported Features */ +static int hci_le_read_local_features_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read LE Supported States */ +static int hci_le_read_supported_states_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* LE Controller init stage 2 command sequence */ +static const struct hci_init_stage le_init2[] = { + /* HCI_OP_LE_READ_BUFFER_SIZE */ + HCI_INIT(hci_le_read_buffer_size_sync), + /* HCI_OP_LE_READ_LOCAL_FEATURES */ + HCI_INIT(hci_le_read_local_features_sync), + /* HCI_OP_LE_READ_SUPPORTED_STATES */ + HCI_INIT(hci_le_read_supported_states_sync), + {} +}; + +static int hci_init2_sync(struct hci_dev *hdev) +{ + int err; + + bt_dev_dbg(hdev, ""); + + if (hdev->dev_type == HCI_AMP) + return hci_init_stage_sync(hdev, amp_init2); + + if (lmp_bredr_capable(hdev)) { + err = hci_init_stage_sync(hdev, br_init2); + if (err) + return err; + } else { + hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED); + } + + if (lmp_le_capable(hdev)) { + err = hci_init_stage_sync(hdev, le_init2); + if (err) + return err; + /* LE-only controllers have LE implicitly enabled */ + if (!lmp_bredr_capable(hdev)) + hci_dev_set_flag(hdev, HCI_LE_ENABLED); + } + + return hci_init_stage_sync(hdev, hci_init2); +} + +static int hci_set_event_mask_sync(struct hci_dev *hdev) +{ + /* The second byte is 0xff instead of 0x9f (two reserved bits + * disabled) since a Broadcom 1.2 dongle doesn't respond to the + * command otherwise. + */ + u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; + + /* CSR 1.1 dongles does not accept any bitfield so don't try to set + * any event mask for pre 1.2 devices. + */ + if (hdev->hci_ver < BLUETOOTH_VER_1_2) + return 0; + + if (lmp_bredr_capable(hdev)) { + events[4] |= 0x01; /* Flow Specification Complete */ + + /* Don't set Disconnect Complete when suspended as that + * would wakeup the host when disconnecting due to + * suspend. + */ + if (hdev->suspended) + events[0] &= 0xef; + } else { + /* Use a different default for LE-only devices */ + memset(events, 0, sizeof(events)); + events[1] |= 0x20; /* Command Complete */ + events[1] |= 0x40; /* Command Status */ + events[1] |= 0x80; /* Hardware Error */ + + /* If the controller supports the Disconnect command, enable + * the corresponding event. In addition enable packet flow + * control related events. + */ + if (hdev->commands[0] & 0x20) { + /* Don't set Disconnect Complete when suspended as that + * would wakeup the host when disconnecting due to + * suspend. + */ + if (!hdev->suspended) + events[0] |= 0x10; /* Disconnection Complete */ + events[2] |= 0x04; /* Number of Completed Packets */ + events[3] |= 0x02; /* Data Buffer Overflow */ + } + + /* If the controller supports the Read Remote Version + * Information command, enable the corresponding event. + */ + if (hdev->commands[2] & 0x80) + events[1] |= 0x08; /* Read Remote Version Information + * Complete + */ + + if (hdev->le_features[0] & HCI_LE_ENCRYPTION) { + events[0] |= 0x80; /* Encryption Change */ + events[5] |= 0x80; /* Encryption Key Refresh Complete */ + } + } + + if (lmp_inq_rssi_capable(hdev) || + test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) + events[4] |= 0x02; /* Inquiry Result with RSSI */ + + if (lmp_ext_feat_capable(hdev)) + events[4] |= 0x04; /* Read Remote Extended Features Complete */ + + if (lmp_esco_capable(hdev)) { + events[5] |= 0x08; /* Synchronous Connection Complete */ + events[5] |= 0x10; /* Synchronous Connection Changed */ + } + + if (lmp_sniffsubr_capable(hdev)) + events[5] |= 0x20; /* Sniff Subrating */ + + if (lmp_pause_enc_capable(hdev)) + events[5] |= 0x80; /* Encryption Key Refresh Complete */ + + if (lmp_ext_inq_capable(hdev)) + events[5] |= 0x40; /* Extended Inquiry Result */ + + if (lmp_no_flush_capable(hdev)) + events[7] |= 0x01; /* Enhanced Flush Complete */ + + if (lmp_lsto_capable(hdev)) + events[6] |= 0x80; /* Link Supervision Timeout Changed */ + + if (lmp_ssp_capable(hdev)) { + events[6] |= 0x01; /* IO Capability Request */ + events[6] |= 0x02; /* IO Capability Response */ + events[6] |= 0x04; /* User Confirmation Request */ + events[6] |= 0x08; /* User Passkey Request */ + events[6] |= 0x10; /* Remote OOB Data Request */ + events[6] |= 0x20; /* Simple Pairing Complete */ + events[7] |= 0x04; /* User Passkey Notification */ + events[7] |= 0x08; /* Keypress Notification */ + events[7] |= 0x10; /* Remote Host Supported + * Features Notification + */ + } + + if (lmp_le_capable(hdev)) + events[7] |= 0x20; /* LE Meta-Event */ + + return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK, + sizeof(events), events, HCI_CMD_TIMEOUT); +} + +static int hci_read_stored_link_key_sync(struct hci_dev *hdev) +{ + struct hci_cp_read_stored_link_key cp; + + if (!(hdev->commands[6] & 0x20) || + test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) + return 0; + + memset(&cp, 0, sizeof(cp)); + bacpy(&cp.bdaddr, BDADDR_ANY); + cp.read_all = 0x01; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_STORED_LINK_KEY, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_setup_link_policy_sync(struct hci_dev *hdev) +{ + struct hci_cp_write_def_link_policy cp; + u16 link_policy = 0; + + if (!(hdev->commands[5] & 0x10)) + return 0; + + memset(&cp, 0, sizeof(cp)); + + if (lmp_rswitch_capable(hdev)) + link_policy |= HCI_LP_RSWITCH; + if (lmp_hold_capable(hdev)) + link_policy |= HCI_LP_HOLD; + if (lmp_sniff_capable(hdev)) + link_policy |= HCI_LP_SNIFF; + if (lmp_park_capable(hdev)) + link_policy |= HCI_LP_PARK; + + cp.policy = cpu_to_le16(link_policy); + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_read_page_scan_activity_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[8] & 0x01)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_ACTIVITY, + 0, NULL, HCI_CMD_TIMEOUT); +} + +static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[18] & 0x04) || + test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING, + 0, NULL, HCI_CMD_TIMEOUT); +} + +static int hci_read_page_scan_type_sync(struct hci_dev *hdev) +{ + /* Some older Broadcom based Bluetooth 1.2 controllers do not + * support the Read Page Scan Type command. Check support for + * this command in the bit mask of supported commands. + */ + if (!(hdev->commands[13] & 0x01)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read features beyond page 1 if available */ +static int hci_read_local_ext_features_all_sync(struct hci_dev *hdev) +{ + u8 page; + int err; + + if (!lmp_ext_feat_capable(hdev)) + return 0; + + for (page = 2; page < HCI_MAX_PAGES && page <= hdev->max_page; + page++) { + err = hci_read_local_ext_features_sync(hdev, page); + if (err) + return err; + } + + return 0; +} + +/* HCI Controller init stage 3 command sequence */ +static const struct hci_init_stage hci_init3[] = { + /* HCI_OP_SET_EVENT_MASK */ + HCI_INIT(hci_set_event_mask_sync), + /* HCI_OP_READ_STORED_LINK_KEY */ + HCI_INIT(hci_read_stored_link_key_sync), + /* HCI_OP_WRITE_DEF_LINK_POLICY */ + HCI_INIT(hci_setup_link_policy_sync), + /* HCI_OP_READ_PAGE_SCAN_ACTIVITY */ + HCI_INIT(hci_read_page_scan_activity_sync), + /* HCI_OP_READ_DEF_ERR_DATA_REPORTING */ + HCI_INIT(hci_read_def_err_data_reporting_sync), + /* HCI_OP_READ_PAGE_SCAN_TYPE */ + HCI_INIT(hci_read_page_scan_type_sync), + /* HCI_OP_READ_LOCAL_EXT_FEATURES */ + HCI_INIT(hci_read_local_ext_features_all_sync), + {} +}; + +static int hci_le_set_event_mask_sync(struct hci_dev *hdev) +{ + u8 events[8]; + + if (!lmp_le_capable(hdev)) + return 0; + + memset(events, 0, sizeof(events)); + + if (hdev->le_features[0] & HCI_LE_ENCRYPTION) + events[0] |= 0x10; /* LE Long Term Key Request */ + + /* If controller supports the Connection Parameters Request + * Link Layer Procedure, enable the corresponding event. + */ + if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC) + /* LE Remote Connection Parameter Request */ + events[0] |= 0x20; + + /* If the controller supports the Data Length Extension + * feature, enable the corresponding event. + */ + if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) + events[0] |= 0x40; /* LE Data Length Change */ + + /* If the controller supports LL Privacy feature, enable + * the corresponding event. + */ + if (hdev->le_features[0] & HCI_LE_LL_PRIVACY) + events[1] |= 0x02; /* LE Enhanced Connection Complete */ + + /* If the controller supports Extended Scanner Filter + * Policies, enable the corresponding event. + */ + if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY) + events[1] |= 0x04; /* LE Direct Advertising Report */ + + /* If the controller supports Channel Selection Algorithm #2 + * feature, enable the corresponding event. + */ + if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2) + events[2] |= 0x08; /* LE Channel Selection Algorithm */ + + /* If the controller supports the LE Set Scan Enable command, + * enable the corresponding advertising report event. + */ + if (hdev->commands[26] & 0x08) + events[0] |= 0x02; /* LE Advertising Report */ + + /* If the controller supports the LE Create Connection + * command, enable the corresponding event. + */ + if (hdev->commands[26] & 0x10) + events[0] |= 0x01; /* LE Connection Complete */ + + /* If the controller supports the LE Connection Update + * command, enable the corresponding event. + */ + if (hdev->commands[27] & 0x04) + events[0] |= 0x04; /* LE Connection Update Complete */ + + /* If the controller supports the LE Read Remote Used Features + * command, enable the corresponding event. + */ + if (hdev->commands[27] & 0x20) + /* LE Read Remote Used Features Complete */ + events[0] |= 0x08; + + /* If the controller supports the LE Read Local P-256 + * Public Key command, enable the corresponding event. + */ + if (hdev->commands[34] & 0x02) + /* LE Read Local P-256 Public Key Complete */ + events[0] |= 0x80; + + /* If the controller supports the LE Generate DHKey + * command, enable the corresponding event. + */ + if (hdev->commands[34] & 0x04) + events[1] |= 0x01; /* LE Generate DHKey Complete */ + + /* If the controller supports the LE Set Default PHY or + * LE Set PHY commands, enable the corresponding event. + */ + if (hdev->commands[35] & (0x20 | 0x40)) + events[1] |= 0x08; /* LE PHY Update Complete */ + + /* If the controller supports LE Set Extended Scan Parameters + * and LE Set Extended Scan Enable commands, enable the + * corresponding event. + */ + if (use_ext_scan(hdev)) + events[1] |= 0x10; /* LE Extended Advertising Report */ + + /* If the controller supports the LE Extended Advertising + * command, enable the corresponding event. + */ + if (ext_adv_capable(hdev)) + events[2] |= 0x02; /* LE Advertising Set Terminated */ + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK, + sizeof(events), events, HCI_CMD_TIMEOUT); +} + +/* Read LE Advertising Channel TX Power */ +static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev) +{ + if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) { + /* HCI TS spec forbids mixing of legacy and extended + * advertising commands wherein READ_ADV_TX_POWER is + * also included. So do not call it if extended adv + * is supported otherwise controller will return + * COMMAND_DISALLOWED for extended commands. + */ + return __hci_cmd_sync_status(hdev, + HCI_OP_LE_READ_ADV_TX_POWER, + 0, NULL, HCI_CMD_TIMEOUT); + } + + return 0; +} + +/* Read LE Min/Max Tx Power*/ +static int hci_le_read_tx_power_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[38] & 0x80)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read LE Accept List Size */ +static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[26] & 0x40)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ACCEPT_LIST_SIZE, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Clear LE Accept List */ +static int hci_le_clear_accept_list_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[26] & 0x80)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL, + HCI_CMD_TIMEOUT); +} + +/* Read LE Resolving List Size */ +static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[34] & 0x40)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_RESOLV_LIST_SIZE, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Clear LE Resolving List */ +static int hci_le_clear_resolv_list_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[34] & 0x20)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL, + HCI_CMD_TIMEOUT); +} + +/* Set RPA timeout */ +static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev) +{ + __le16 timeout = cpu_to_le16(hdev->rpa_timeout); + + if (!(hdev->commands[35] & 0x04)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT, + sizeof(timeout), &timeout, + HCI_CMD_TIMEOUT); +} + +/* Read LE Maximum Data Length */ +static int hci_le_read_max_data_len_sync(struct hci_dev *hdev) +{ + if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL, + HCI_CMD_TIMEOUT); +} + +/* Read LE Suggested Default Data Length */ +static int hci_le_read_def_data_len_sync(struct hci_dev *hdev) +{ + if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL, + HCI_CMD_TIMEOUT); +} + +/* Read LE Number of Supported Advertising Sets */ +static int hci_le_read_num_support_adv_sets_sync(struct hci_dev *hdev) +{ + if (!ext_adv_capable(hdev)) + return 0; + + return __hci_cmd_sync_status(hdev, + HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Write LE Host Supported */ +static int hci_set_le_support_sync(struct hci_dev *hdev) +{ + struct hci_cp_write_le_host_supported cp; + + /* LE-only devices do not support explicit enablement */ + if (!lmp_bredr_capable(hdev)) + return 0; + + memset(&cp, 0, sizeof(cp)); + + if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { + cp.le = 0x01; + cp.simul = 0x00; + } + + if (cp.le == lmp_host_le_capable(hdev)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +/* LE Controller init stage 3 command sequence */ +static const struct hci_init_stage le_init3[] = { + /* HCI_OP_LE_SET_EVENT_MASK */ + HCI_INIT(hci_le_set_event_mask_sync), + /* HCI_OP_LE_READ_ADV_TX_POWER */ + HCI_INIT(hci_le_read_adv_tx_power_sync), + /* HCI_OP_LE_READ_TRANSMIT_POWER */ + HCI_INIT(hci_le_read_tx_power_sync), + /* HCI_OP_LE_READ_ACCEPT_LIST_SIZE */ + HCI_INIT(hci_le_read_accept_list_size_sync), + /* HCI_OP_LE_CLEAR_ACCEPT_LIST */ + HCI_INIT(hci_le_clear_accept_list_sync), + /* HCI_OP_LE_READ_RESOLV_LIST_SIZE */ + HCI_INIT(hci_le_read_resolv_list_size_sync), + /* HCI_OP_LE_CLEAR_RESOLV_LIST */ + HCI_INIT(hci_le_clear_resolv_list_sync), + /* HCI_OP_LE_SET_RPA_TIMEOUT */ + HCI_INIT(hci_le_set_rpa_timeout_sync), + /* HCI_OP_LE_READ_MAX_DATA_LEN */ + HCI_INIT(hci_le_read_max_data_len_sync), + /* HCI_OP_LE_READ_DEF_DATA_LEN */ + HCI_INIT(hci_le_read_def_data_len_sync), + /* HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS */ + HCI_INIT(hci_le_read_num_support_adv_sets_sync), + /* HCI_OP_WRITE_LE_HOST_SUPPORTED */ + HCI_INIT(hci_set_le_support_sync), + {} +}; + +static int hci_init3_sync(struct hci_dev *hdev) +{ + int err; + + bt_dev_dbg(hdev, ""); + + err = hci_init_stage_sync(hdev, hci_init3); + if (err) + return err; + + if (lmp_le_capable(hdev)) + return hci_init_stage_sync(hdev, le_init3); + + return 0; +} + +static int hci_delete_stored_link_key_sync(struct hci_dev *hdev) +{ + struct hci_cp_delete_stored_link_key cp; + + /* Some Broadcom based Bluetooth controllers do not support the + * Delete Stored Link Key command. They are clearly indicating its + * absence in the bit mask of supported commands. + * + * Check the supported commands and only if the command is marked + * as supported send it. If not supported assume that the controller + * does not have actual support for stored link keys which makes this + * command redundant anyway. + * + * Some controllers indicate that they support handling deleting + * stored link keys, but they don't. The quirk lets a driver + * just disable this command. + */ + if (!(hdev->commands[6] & 0x80) || + test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) + return 0; + + memset(&cp, 0, sizeof(cp)); + bacpy(&cp.bdaddr, BDADDR_ANY); + cp.delete_all = 0x01; + + return __hci_cmd_sync_status(hdev, HCI_OP_DELETE_STORED_LINK_KEY, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev) +{ + u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; + bool changed = false; + + /* Set event mask page 2 if the HCI command for it is supported */ + if (!(hdev->commands[22] & 0x04)) + return 0; + + /* If Connectionless Peripheral Broadcast central role is supported + * enable all necessary events for it. + */ + if (lmp_cpb_central_capable(hdev)) { + events[1] |= 0x40; /* Triggered Clock Capture */ + events[1] |= 0x80; /* Synchronization Train Complete */ + events[2] |= 0x10; /* Peripheral Page Response Timeout */ + events[2] |= 0x20; /* CPB Channel Map Change */ + changed = true; + } + + /* If Connectionless Peripheral Broadcast peripheral role is supported + * enable all necessary events for it. + */ + if (lmp_cpb_peripheral_capable(hdev)) { + events[2] |= 0x01; /* Synchronization Train Received */ + events[2] |= 0x02; /* CPB Receive */ + events[2] |= 0x04; /* CPB Timeout */ + events[2] |= 0x08; /* Truncated Page Complete */ + changed = true; + } + + /* Enable Authenticated Payload Timeout Expired event if supported */ + if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) { + events[2] |= 0x80; + changed = true; + } + + /* Some Broadcom based controllers indicate support for Set Event + * Mask Page 2 command, but then actually do not support it. Since + * the default value is all bits set to zero, the command is only + * required if the event mask has to be changed. In case no change + * to the event mask is needed, skip this command. + */ + if (!changed) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK_PAGE_2, + sizeof(events), events, HCI_CMD_TIMEOUT); +} + +/* Read local codec list if the HCI command is supported */ +static int hci_read_local_codecs_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[29] & 0x20)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_CODECS, 0, NULL, + HCI_CMD_TIMEOUT); +} + +/* Read local pairing options if the HCI command is supported */ +static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[41] & 0x08)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_PAIRING_OPTS, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Get MWS transport configuration if the HCI command is supported */ +static int hci_get_mws_transport_config_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[30] & 0x08)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Check for Synchronization Train support */ +static int hci_read_sync_train_params_sync(struct hci_dev *hdev) +{ + if (!lmp_sync_train_capable(hdev)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_SYNC_TRAIN_PARAMS, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Enable Secure Connections if supported and configured */ +static int hci_write_sc_support_1_sync(struct hci_dev *hdev) +{ + u8 support = 0x01; + + if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) || + !bredr_sc_enabled(hdev)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT, + sizeof(support), &support, + HCI_CMD_TIMEOUT); +} + +/* Set erroneous data reporting if supported to the wideband speech + * setting value + */ +static int hci_set_err_data_report_sync(struct hci_dev *hdev) +{ + struct hci_cp_write_def_err_data_reporting cp; + bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED); + + if (!(hdev->commands[18] & 0x08) || + test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) + return 0; + + if (enabled == hdev->err_data_reporting) + return 0; + + memset(&cp, 0, sizeof(cp)); + cp.err_data_reporting = enabled ? ERR_DATA_REPORTING_ENABLED : + ERR_DATA_REPORTING_DISABLED; + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static const struct hci_init_stage hci_init4[] = { + /* HCI_OP_DELETE_STORED_LINK_KEY */ + HCI_INIT(hci_delete_stored_link_key_sync), + /* HCI_OP_SET_EVENT_MASK_PAGE_2 */ + HCI_INIT(hci_set_event_mask_page_2_sync), + /* HCI_OP_READ_LOCAL_CODECS */ + HCI_INIT(hci_read_local_codecs_sync), + /* HCI_OP_READ_LOCAL_PAIRING_OPTS */ + HCI_INIT(hci_read_local_pairing_opts_sync), + /* HCI_OP_GET_MWS_TRANSPORT_CONFIG */ + HCI_INIT(hci_get_mws_transport_config_sync), + /* HCI_OP_READ_SYNC_TRAIN_PARAMS */ + HCI_INIT(hci_read_sync_train_params_sync), + /* HCI_OP_WRITE_SC_SUPPORT */ + HCI_INIT(hci_write_sc_support_1_sync), + /* HCI_OP_WRITE_DEF_ERR_DATA_REPORTING */ + HCI_INIT(hci_set_err_data_report_sync), + {} +}; + +/* Set Suggested Default Data Length to maximum if supported */ +static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev) +{ + struct hci_cp_le_write_def_data_len cp; + + if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) + return 0; + + memset(&cp, 0, sizeof(cp)); + cp.tx_len = cpu_to_le16(hdev->le_max_tx_len); + cp.tx_time = cpu_to_le16(hdev->le_max_tx_time); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +/* Set Default PHY parameters if command is supported */ +static int hci_le_set_default_phy_sync(struct hci_dev *hdev) +{ + struct hci_cp_le_set_default_phy cp; + + if (!(hdev->commands[35] & 0x20)) + return 0; + + memset(&cp, 0, sizeof(cp)); + cp.all_phys = 0x00; + cp.tx_phys = hdev->le_tx_def_phys; + cp.rx_phys = hdev->le_rx_def_phys; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static const struct hci_init_stage le_init4[] = { + /* HCI_OP_LE_WRITE_DEF_DATA_LEN */ + HCI_INIT(hci_le_set_write_def_data_len_sync), + /* HCI_OP_LE_SET_DEFAULT_PHY */ + HCI_INIT(hci_le_set_default_phy_sync), + {} +}; + +static int hci_init4_sync(struct hci_dev *hdev) +{ + int err; + + bt_dev_dbg(hdev, ""); + + err = hci_init_stage_sync(hdev, hci_init4); + if (err) + return err; + + if (lmp_le_capable(hdev)) + return hci_init_stage_sync(hdev, le_init4); + + return 0; +} + +static int hci_init_sync(struct hci_dev *hdev) +{ + int err; + + err = hci_init1_sync(hdev); + if (err < 0) + return err; + + if (hci_dev_test_flag(hdev, HCI_SETUP)) + hci_debugfs_create_basic(hdev); + + err = hci_init2_sync(hdev); + if (err < 0) + return err; + + /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode + * BR/EDR/LE type controllers. AMP controllers only need the + * first two stages of init. + */ + if (hdev->dev_type != HCI_PRIMARY) + return 0; + + err = hci_init3_sync(hdev); + if (err < 0) + return err; + + err = hci_init4_sync(hdev); + if (err < 0) + return err; + + /* This function is only called when the controller is actually in + * configured state. When the controller is marked as unconfigured, + * this initialization procedure is not run. + * + * It means that it is possible that a controller runs through its + * setup phase and then discovers missing settings. If that is the + * case, then this function will not be called. It then will only + * be called during the config phase. + * + * So only when in setup phase or config phase, create the debugfs + * entries and register the SMP channels. + */ + if (!hci_dev_test_flag(hdev, HCI_SETUP) && + !hci_dev_test_flag(hdev, HCI_CONFIG)) + return 0; + + hci_debugfs_create_common(hdev); + + if (lmp_bredr_capable(hdev)) + hci_debugfs_create_bredr(hdev); + + if (lmp_le_capable(hdev)) + hci_debugfs_create_le(hdev); + + return 0; +} + +int hci_dev_open_sync(struct hci_dev *hdev) +{ + int ret = 0; + + bt_dev_dbg(hdev, ""); + + if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { + ret = -ENODEV; + goto done; + } + + if (!hci_dev_test_flag(hdev, HCI_SETUP) && + !hci_dev_test_flag(hdev, HCI_CONFIG)) { + /* Check for rfkill but allow the HCI setup stage to + * proceed (which in itself doesn't cause any RF activity). + */ + if (hci_dev_test_flag(hdev, HCI_RFKILLED)) { + ret = -ERFKILL; + goto done; + } + + /* Check for valid public address or a configured static + * random address, but let the HCI setup proceed to + * be able to determine if there is a public address + * or not. + * + * In case of user channel usage, it is not important + * if a public address or static random address is + * available. + * + * This check is only valid for BR/EDR controllers + * since AMP controllers do not have an address. + */ + if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + hdev->dev_type == HCI_PRIMARY && + !bacmp(&hdev->bdaddr, BDADDR_ANY) && + !bacmp(&hdev->static_addr, BDADDR_ANY)) { + ret = -EADDRNOTAVAIL; + goto done; + } + } + + if (test_bit(HCI_UP, &hdev->flags)) { + ret = -EALREADY; + goto done; + } + + if (hdev->open(hdev)) { + ret = -EIO; + goto done; + } + + set_bit(HCI_RUNNING, &hdev->flags); + hci_sock_dev_event(hdev, HCI_DEV_OPEN); + + atomic_set(&hdev->cmd_cnt, 1); + set_bit(HCI_INIT, &hdev->flags); + + if (hci_dev_test_flag(hdev, HCI_SETUP) || + test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) { + bool invalid_bdaddr; + + hci_sock_dev_event(hdev, HCI_DEV_SETUP); + + if (hdev->setup) + ret = hdev->setup(hdev); + + /* The transport driver can set the quirk to mark the + * BD_ADDR invalid before creating the HCI device or in + * its setup callback. + */ + invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, + &hdev->quirks); + + if (ret) + goto setup_failed; + + if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) { + if (!bacmp(&hdev->public_addr, BDADDR_ANY)) + hci_dev_get_bd_addr_from_property(hdev); + + if (bacmp(&hdev->public_addr, BDADDR_ANY) && + hdev->set_bdaddr) { + ret = hdev->set_bdaddr(hdev, + &hdev->public_addr); + + /* If setting of the BD_ADDR from the device + * property succeeds, then treat the address + * as valid even if the invalid BD_ADDR + * quirk indicates otherwise. + */ + if (!ret) + invalid_bdaddr = false; + } + } + +setup_failed: + /* The transport driver can set these quirks before + * creating the HCI device or in its setup callback. + * + * For the invalid BD_ADDR quirk it is possible that + * it becomes a valid address if the bootloader does + * provide it (see above). + * + * In case any of them is set, the controller has to + * start up as unconfigured. + */ + if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || + invalid_bdaddr) + hci_dev_set_flag(hdev, HCI_UNCONFIGURED); + + /* For an unconfigured controller it is required to + * read at least the version information provided by + * the Read Local Version Information command. + * + * If the set_bdaddr driver callback is provided, then + * also the original Bluetooth public device address + * will be read using the Read BD Address command. + */ + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) + ret = hci_unconf_init_sync(hdev); + } + + if (hci_dev_test_flag(hdev, HCI_CONFIG)) { + /* If public address change is configured, ensure that + * the address gets programmed. If the driver does not + * support changing the public address, fail the power + * on procedure. + */ + if (bacmp(&hdev->public_addr, BDADDR_ANY) && + hdev->set_bdaddr) + ret = hdev->set_bdaddr(hdev, &hdev->public_addr); + else + ret = -EADDRNOTAVAIL; + } + + if (!ret) { + if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && + !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + ret = hci_init_sync(hdev); + if (!ret && hdev->post_init) + ret = hdev->post_init(hdev); + } + } + + /* If the HCI Reset command is clearing all diagnostic settings, + * then they need to be reprogrammed after the init procedure + * completed. + */ + if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && + !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag) + ret = hdev->set_diag(hdev, true); + + if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + msft_do_open(hdev); + aosp_do_open(hdev); + } + + clear_bit(HCI_INIT, &hdev->flags); + + if (!ret) { + hci_dev_hold(hdev); + hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); + hci_adv_instances_set_rpa_expired(hdev, true); + set_bit(HCI_UP, &hdev->flags); + hci_sock_dev_event(hdev, HCI_DEV_UP); + hci_leds_update_powered(hdev, true); + if (!hci_dev_test_flag(hdev, HCI_SETUP) && + !hci_dev_test_flag(hdev, HCI_CONFIG) && + !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && + !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + hci_dev_test_flag(hdev, HCI_MGMT) && + hdev->dev_type == HCI_PRIMARY) { + ret = hci_powered_update_sync(hdev); + } + } else { + /* Init failed, cleanup */ + flush_work(&hdev->tx_work); + + /* Since hci_rx_work() is possible to awake new cmd_work + * it should be flushed first to avoid unexpected call of + * hci_cmd_work() + */ + flush_work(&hdev->rx_work); + flush_work(&hdev->cmd_work); + + skb_queue_purge(&hdev->cmd_q); + skb_queue_purge(&hdev->rx_q); + + if (hdev->flush) + hdev->flush(hdev); + + if (hdev->sent_cmd) { + kfree_skb(hdev->sent_cmd); + hdev->sent_cmd = NULL; + } + + clear_bit(HCI_RUNNING, &hdev->flags); + hci_sock_dev_event(hdev, HCI_DEV_CLOSE); + + hdev->close(hdev); + hdev->flags &= BIT(HCI_RAW); + } + +done: + return ret; +} + +/* This function requires the caller holds hdev->lock */ +static void hci_pend_le_actions_clear(struct hci_dev *hdev) +{ + struct hci_conn_params *p; + + list_for_each_entry(p, &hdev->le_conn_params, list) { + if (p->conn) { + hci_conn_drop(p->conn); + hci_conn_put(p->conn); + p->conn = NULL; + } + list_del_init(&p->action); + } + + BT_DBG("All LE pending actions cleared"); +} + +int hci_dev_close_sync(struct hci_dev *hdev) +{ + bool auto_off; + int err = 0; + + bt_dev_dbg(hdev, ""); + + cancel_delayed_work(&hdev->power_off); + cancel_delayed_work(&hdev->ncmd_timer); + + hci_request_cancel_all(hdev); + + if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && + !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + test_bit(HCI_UP, &hdev->flags)) { + /* Execute vendor specific shutdown routine */ + if (hdev->shutdown) + err = hdev->shutdown(hdev); + } + + if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { + cancel_delayed_work_sync(&hdev->cmd_timer); + return err; + } + + hci_leds_update_powered(hdev, false); + + /* Flush RX and TX works */ + flush_work(&hdev->tx_work); + flush_work(&hdev->rx_work); + + if (hdev->discov_timeout > 0) { + hdev->discov_timeout = 0; + hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); + hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); + } + + if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) + cancel_delayed_work(&hdev->service_cache); + + if (hci_dev_test_flag(hdev, HCI_MGMT)) { + struct adv_info *adv_instance; + + cancel_delayed_work_sync(&hdev->rpa_expired); + + list_for_each_entry(adv_instance, &hdev->adv_instances, list) + cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); + } + + /* Avoid potential lockdep warnings from the *_flush() calls by + * ensuring the workqueue is empty up front. + */ + drain_workqueue(hdev->workqueue); + + hci_dev_lock(hdev); + + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + + auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF); + + if (!auto_off && hdev->dev_type == HCI_PRIMARY && + !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + hci_dev_test_flag(hdev, HCI_MGMT)) + __mgmt_power_off(hdev); + + hci_inquiry_cache_flush(hdev); + hci_pend_le_actions_clear(hdev); + hci_conn_hash_flush(hdev); + hci_dev_unlock(hdev); + + smp_unregister(hdev); + + hci_sock_dev_event(hdev, HCI_DEV_DOWN); + + if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + aosp_do_close(hdev); + msft_do_close(hdev); + } + + if (hdev->flush) + hdev->flush(hdev); + + /* Reset device */ + skb_queue_purge(&hdev->cmd_q); + atomic_set(&hdev->cmd_cnt, 1); + if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) && + !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { + set_bit(HCI_INIT, &hdev->flags); + hci_reset_sync(hdev); + clear_bit(HCI_INIT, &hdev->flags); + } + + /* flush cmd work */ + flush_work(&hdev->cmd_work); + + /* Drop queues */ + skb_queue_purge(&hdev->rx_q); + skb_queue_purge(&hdev->cmd_q); + skb_queue_purge(&hdev->raw_q); + + /* Drop last sent command */ + if (hdev->sent_cmd) { + cancel_delayed_work_sync(&hdev->cmd_timer); + kfree_skb(hdev->sent_cmd); + hdev->sent_cmd = NULL; + } + + clear_bit(HCI_RUNNING, &hdev->flags); + hci_sock_dev_event(hdev, HCI_DEV_CLOSE); + + /* After this point our queues are empty and no tasks are scheduled. */ + hdev->close(hdev); + + /* Clear flags */ + hdev->flags &= BIT(HCI_RAW); + hci_dev_clear_volatile_flags(hdev); + + /* Controller radio is available but is currently powered down */ + hdev->amp_status = AMP_STATUS_POWERED_DOWN; + + memset(hdev->eir, 0, sizeof(hdev->eir)); + memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); + bacpy(&hdev->random_addr, BDADDR_ANY); + + hci_dev_put(hdev); + return err; +} + +/* This function perform power on HCI command sequence as follows: + * + * If controller is already up (HCI_UP) performs hci_powered_update_sync + * sequence otherwise run hci_dev_open_sync which will follow with + * hci_powered_update_sync after the init sequence is completed. + */ +static int hci_power_on_sync(struct hci_dev *hdev) +{ + int err; + + if (test_bit(HCI_UP, &hdev->flags) && + hci_dev_test_flag(hdev, HCI_MGMT) && + hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { + cancel_delayed_work(&hdev->power_off); + return hci_powered_update_sync(hdev); + } + + err = hci_dev_open_sync(hdev); + if (err < 0) + return err; + + /* During the HCI setup phase, a few error conditions are + * ignored and they need to be checked now. If they are still + * valid, it is important to return the device back off. + */ + if (hci_dev_test_flag(hdev, HCI_RFKILLED) || + hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || + (hdev->dev_type == HCI_PRIMARY && + !bacmp(&hdev->bdaddr, BDADDR_ANY) && + !bacmp(&hdev->static_addr, BDADDR_ANY))) { + hci_dev_clear_flag(hdev, HCI_AUTO_OFF); + hci_dev_close_sync(hdev); + } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) { + queue_delayed_work(hdev->req_workqueue, &hdev->power_off, + HCI_AUTO_OFF_TIMEOUT); + } + + if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) { + /* For unconfigured devices, set the HCI_RAW flag + * so that userspace can easily identify them. + */ + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) + set_bit(HCI_RAW, &hdev->flags); + + /* For fully configured devices, this will send + * the Index Added event. For unconfigured devices, + * it will send Unconfigued Index Added event. + * + * Devices with HCI_QUIRK_RAW_DEVICE are ignored + * and no event will be send. + */ + mgmt_index_added(hdev); + } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) { + /* When the controller is now configured, then it + * is important to clear the HCI_RAW flag. + */ + if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) + clear_bit(HCI_RAW, &hdev->flags); + + /* Powering on the controller with HCI_CONFIG set only + * happens with the transition from unconfigured to + * configured. This will send the Index Added event. + */ + mgmt_index_added(hdev); + } + + return 0; +} + +static int hci_remote_name_cancel_sync(struct hci_dev *hdev, bdaddr_t *addr) +{ + struct hci_cp_remote_name_req_cancel cp; + + memset(&cp, 0, sizeof(cp)); + bacpy(&cp.bdaddr, addr); + + return __hci_cmd_sync_status(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_stop_discovery_sync(struct hci_dev *hdev) +{ + struct discovery_state *d = &hdev->discovery; + struct inquiry_entry *e; + int err; + + bt_dev_dbg(hdev, "state %u", hdev->discovery.state); + + if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) { + if (test_bit(HCI_INQUIRY, &hdev->flags)) { + err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, + 0, NULL, HCI_CMD_TIMEOUT); + if (err) + return err; + } + + if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { + cancel_delayed_work(&hdev->le_scan_disable); + cancel_delayed_work(&hdev->le_scan_restart); + + err = hci_scan_disable_sync(hdev); + if (err) + return err; + } + + } else { + err = hci_scan_disable_sync(hdev); + if (err) + return err; + } + + /* Resume advertising if it was paused */ + if (use_ll_privacy(hdev)) + hci_resume_advertising_sync(hdev); + + /* No further actions needed for LE-only discovery */ + if (d->type == DISCOV_TYPE_LE) + return 0; + + if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) { + e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, + NAME_PENDING); + if (!e) + return 0; + + return hci_remote_name_cancel_sync(hdev, &e->data.bdaddr); + } + + return 0; +} + +static int hci_disconnect_phy_link_sync(struct hci_dev *hdev, u16 handle, + u8 reason) +{ + struct hci_cp_disconn_phy_link cp; + + memset(&cp, 0, sizeof(cp)); + cp.phy_handle = HCI_PHY_HANDLE(handle); + cp.reason = reason; + + return __hci_cmd_sync_status(hdev, HCI_OP_DISCONN_PHY_LINK, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn, + u8 reason) +{ + struct hci_cp_disconnect cp; + + if (conn->type == AMP_LINK) + return hci_disconnect_phy_link_sync(hdev, conn->handle, reason); + + memset(&cp, 0, sizeof(cp)); + cp.handle = cpu_to_le16(conn->handle); + cp.reason = reason; + + /* Wait for HCI_EV_DISCONN_COMPLETE not HCI_EV_CMD_STATUS when not + * suspending. + */ + if (!hdev->suspended) + return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT, + sizeof(cp), &cp, + HCI_EV_DISCONN_COMPLETE, + HCI_CMD_TIMEOUT, NULL); + + return __hci_cmd_sync_status(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp, + HCI_CMD_TIMEOUT); +} + +static int hci_le_connect_cancel_sync(struct hci_dev *hdev, + struct hci_conn *conn) +{ + if (test_bit(HCI_CONN_SCANNING, &conn->flags)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, + 6, &conn->dst, HCI_CMD_TIMEOUT); +} + +static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn) +{ + if (conn->type == LE_LINK) + return hci_le_connect_cancel_sync(hdev, conn); + + if (hdev->hci_ver < BLUETOOTH_VER_1_2) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL, + 6, &conn->dst, HCI_CMD_TIMEOUT); +} + +static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn, + u8 reason) +{ + struct hci_cp_reject_sync_conn_req cp; + + memset(&cp, 0, sizeof(cp)); + bacpy(&cp.bdaddr, &conn->dst); + cp.reason = reason; + + /* SCO rejection has its own limited set of + * allowed error values (0x0D-0x0F). + */ + if (reason < 0x0d || reason > 0x0f) + cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES; + + return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_SYNC_CONN_REQ, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, + u8 reason) +{ + struct hci_cp_reject_conn_req cp; + + if (conn->type == SCO_LINK || conn->type == ESCO_LINK) + return hci_reject_sco_sync(hdev, conn, reason); + + memset(&cp, 0, sizeof(cp)); + bacpy(&cp.bdaddr, &conn->dst); + cp.reason = reason; + + return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_CONN_REQ, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, + u8 reason) +{ + switch (conn->state) { + case BT_CONNECTED: + case BT_CONFIG: + return hci_disconnect_sync(hdev, conn, reason); + case BT_CONNECT: + return hci_connect_cancel_sync(hdev, conn); + case BT_CONNECT2: + return hci_reject_conn_sync(hdev, conn, reason); + default: + conn->state = BT_CLOSED; + break; + } + + return 0; +} + +static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason) +{ + struct hci_conn *conn, *tmp; + int err; + + list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) { + err = hci_abort_conn_sync(hdev, conn, reason); + if (err) + return err; + } + + return err; +} + +/* This function perform power off HCI command sequence as follows: + * + * Clear Advertising + * Stop Discovery + * Disconnect all connections + * hci_dev_close_sync + */ +static int hci_power_off_sync(struct hci_dev *hdev) +{ + int err; + + /* If controller is already down there is nothing to do */ + if (!test_bit(HCI_UP, &hdev->flags)) + return 0; + + if (test_bit(HCI_ISCAN, &hdev->flags) || + test_bit(HCI_PSCAN, &hdev->flags)) { + err = hci_write_scan_enable_sync(hdev, 0x00); + if (err) + return err; + } + + err = hci_clear_adv_sync(hdev, NULL, false); + if (err) + return err; + + err = hci_stop_discovery_sync(hdev); + if (err) + return err; + + /* Terminated due to Power Off */ + err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); + if (err) + return err; + + return hci_dev_close_sync(hdev); +} + +int hci_set_powered_sync(struct hci_dev *hdev, u8 val) +{ + if (val) + return hci_power_on_sync(hdev); + + return hci_power_off_sync(hdev); +} + +static int hci_write_iac_sync(struct hci_dev *hdev) +{ + struct hci_cp_write_current_iac_lap cp; + + if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) + return 0; + + memset(&cp, 0, sizeof(cp)); + + if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { + /* Limited discoverable mode */ + cp.num_iac = min_t(u8, hdev->num_iac, 2); + cp.iac_lap[0] = 0x00; /* LIAC */ + cp.iac_lap[1] = 0x8b; + cp.iac_lap[2] = 0x9e; + cp.iac_lap[3] = 0x33; /* GIAC */ + cp.iac_lap[4] = 0x8b; + cp.iac_lap[5] = 0x9e; + } else { + /* General discoverable mode */ + cp.num_iac = 1; + cp.iac_lap[0] = 0x33; /* GIAC */ + cp.iac_lap[1] = 0x8b; + cp.iac_lap[2] = 0x9e; + } + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP, + (cp.num_iac * 3) + 1, &cp, + HCI_CMD_TIMEOUT); +} + +int hci_update_discoverable_sync(struct hci_dev *hdev) +{ + int err = 0; + + if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { + err = hci_write_iac_sync(hdev); + if (err) + return err; + + err = hci_update_scan_sync(hdev); + if (err) + return err; + + err = hci_update_class_sync(hdev); + if (err) + return err; + } + + /* Advertising instances don't use the global discoverable setting, so + * only update AD if advertising was enabled using Set Advertising. + */ + if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) { + err = hci_update_adv_data_sync(hdev, 0x00); + if (err) + return err; + + /* Discoverable mode affects the local advertising + * address in limited privacy mode. + */ + if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) { + if (ext_adv_capable(hdev)) + err = hci_start_ext_adv_sync(hdev, 0x00); + else + err = hci_enable_advertising_sync(hdev); + } + } + + return err; +} + +static int update_discoverable_sync(struct hci_dev *hdev, void *data) +{ + return hci_update_discoverable_sync(hdev); +} + +int hci_update_discoverable(struct hci_dev *hdev) +{ + /* Only queue if it would have any effect */ + if (hdev_is_powered(hdev) && + hci_dev_test_flag(hdev, HCI_ADVERTISING) && + hci_dev_test_flag(hdev, HCI_DISCOVERABLE) && + hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) + return hci_cmd_sync_queue(hdev, update_discoverable_sync, NULL, + NULL); + + return 0; +} + +int hci_update_connectable_sync(struct hci_dev *hdev) +{ + int err; + + err = hci_update_scan_sync(hdev); + if (err) + return err; + + /* If BR/EDR is not enabled and we disable advertising as a + * by-product of disabling connectable, we need to update the + * advertising flags. + */ + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + err = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance); + + /* Update the advertising parameters if necessary */ + if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || + !list_empty(&hdev->adv_instances)) { + if (ext_adv_capable(hdev)) + err = hci_start_ext_adv_sync(hdev, + hdev->cur_adv_instance); + else + err = hci_enable_advertising_sync(hdev); + + if (err) + return err; + } + + return hci_update_passive_scan_sync(hdev); +} + +static int hci_inquiry_sync(struct hci_dev *hdev, u8 length) +{ + const u8 giac[3] = { 0x33, 0x8b, 0x9e }; + const u8 liac[3] = { 0x00, 0x8b, 0x9e }; + struct hci_cp_inquiry cp; + + bt_dev_dbg(hdev, ""); + + if (hci_dev_test_flag(hdev, HCI_INQUIRY)) + return 0; + + hci_dev_lock(hdev); + hci_inquiry_cache_flush(hdev); + hci_dev_unlock(hdev); + + memset(&cp, 0, sizeof(cp)); + + if (hdev->discovery.limited) + memcpy(&cp.lap, liac, sizeof(cp.lap)); + else + memcpy(&cp.lap, giac, sizeof(cp.lap)); + + cp.length = length; + + return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval) +{ + u8 own_addr_type; + /* Accept list is not used for discovery */ + u8 filter_policy = 0x00; + /* Default is to enable duplicates filter */ + u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE; + int err; + + bt_dev_dbg(hdev, ""); + + /* If controller is scanning, it means the passive scanning is + * running. Thus, we should temporarily stop it in order to set the + * discovery scanning parameters. + */ + err = hci_scan_disable_sync(hdev); + if (err) { + bt_dev_err(hdev, "Unable to disable scanning: %d", err); + return err; + } + + cancel_interleave_scan(hdev); + + /* Pause advertising since active scanning disables address resolution + * which advertising depend on in order to generate its RPAs. + */ + if (use_ll_privacy(hdev)) { + err = hci_pause_advertising_sync(hdev); + if (err) { + bt_dev_err(hdev, "pause advertising failed: %d", err); + goto failed; + } + } + + /* Disable address resolution while doing active scanning since the + * accept list shall not be used and all reports shall reach the host + * anyway. + */ + err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00); + if (err) { + bt_dev_err(hdev, "Unable to disable Address Resolution: %d", + err); + goto failed; + } + + /* All active scans will be done with either a resolvable private + * address (when privacy feature has been enabled) or non-resolvable + * private address. + */ + err = hci_update_random_address_sync(hdev, true, scan_use_rpa(hdev), + &own_addr_type); + if (err < 0) + own_addr_type = ADDR_LE_DEV_PUBLIC; + + if (hci_is_adv_monitoring(hdev)) { + /* Duplicate filter should be disabled when some advertisement + * monitor is activated, otherwise AdvMon can only receive one + * advertisement for one peer(*) during active scanning, and + * might report loss to these peers. + * + * Note that different controllers have different meanings of + * |duplicate|. Some of them consider packets with the same + * address as duplicate, and others consider packets with the + * same address and the same RSSI as duplicate. Although in the + * latter case we don't need to disable duplicate filter, but + * it is common to have active scanning for a short period of + * time, the power impact should be neglectable. + */ + filter_dup = LE_SCAN_FILTER_DUP_DISABLE; + } + + err = hci_start_scan_sync(hdev, LE_SCAN_ACTIVE, interval, + hdev->le_scan_window_discovery, + own_addr_type, filter_policy, filter_dup); + if (!err) + return err; + +failed: + /* Resume advertising if it was paused */ + if (use_ll_privacy(hdev)) + hci_resume_advertising_sync(hdev); + + /* Resume passive scanning */ + hci_update_passive_scan_sync(hdev); + return err; +} + +static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev) +{ + int err; + + bt_dev_dbg(hdev, ""); + + err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery * 2); + if (err) + return err; + + return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN); +} + +int hci_start_discovery_sync(struct hci_dev *hdev) +{ + unsigned long timeout; + int err; + + bt_dev_dbg(hdev, "type %u", hdev->discovery.type); + + switch (hdev->discovery.type) { + case DISCOV_TYPE_BREDR: + return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN); + case DISCOV_TYPE_INTERLEAVED: + /* When running simultaneous discovery, the LE scanning time + * should occupy the whole discovery time sine BR/EDR inquiry + * and LE scanning are scheduled by the controller. + * + * For interleaving discovery in comparison, BR/EDR inquiry + * and LE scanning are done sequentially with separate + * timeouts. + */ + if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, + &hdev->quirks)) { + timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); + /* During simultaneous discovery, we double LE scan + * interval. We must leave some time for the controller + * to do BR/EDR inquiry. + */ + err = hci_start_interleaved_discovery_sync(hdev); + break; + } + + timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); + err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery); + break; + case DISCOV_TYPE_LE: + timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); + err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery); + break; + default: + return -EINVAL; + } + + if (err) + return err; + + bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout)); + + /* When service discovery is used and the controller has a + * strict duplicate filter, it is important to remember the + * start and duration of the scan. This is required for + * restarting scanning during the discovery phase. + */ + if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) && + hdev->discovery.result_filtering) { + hdev->discovery.scan_start = jiffies; + hdev->discovery.scan_duration = timeout; + } + + queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable, + timeout); + return 0; +} + +static void hci_suspend_monitor_sync(struct hci_dev *hdev) +{ + switch (hci_get_adv_monitor_offload_ext(hdev)) { + case HCI_ADV_MONITOR_EXT_MSFT: + msft_suspend_sync(hdev); + break; + default: + return; + } +} + +/* This function disables discovery and mark it as paused */ +static int hci_pause_discovery_sync(struct hci_dev *hdev) +{ + int old_state = hdev->discovery.state; + int err; + + /* If discovery already stopped/stopping/paused there nothing to do */ + if (old_state == DISCOVERY_STOPPED || old_state == DISCOVERY_STOPPING || + hdev->discovery_paused) + return 0; + + hci_discovery_set_state(hdev, DISCOVERY_STOPPING); + err = hci_stop_discovery_sync(hdev); + if (err) + return err; + + hdev->discovery_paused = true; + hdev->discovery_old_state = old_state; + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + + return 0; +} + +static int hci_update_event_filter_sync(struct hci_dev *hdev) +{ + struct bdaddr_list_with_flags *b; + u8 scan = SCAN_DISABLED; + bool scanning = test_bit(HCI_PSCAN, &hdev->flags); + int err; + + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + return 0; + + /* Always clear event filter when starting */ + hci_clear_event_filter_sync(hdev); + + list_for_each_entry(b, &hdev->accept_list, list) { + if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP, + b->current_flags)) + continue; + + bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr); + + err = hci_set_event_filter_sync(hdev, HCI_FLT_CONN_SETUP, + HCI_CONN_SETUP_ALLOW_BDADDR, + &b->bdaddr, + HCI_CONN_SETUP_AUTO_ON); + if (err) + bt_dev_dbg(hdev, "Failed to set event filter for %pMR", + &b->bdaddr); + else + scan = SCAN_PAGE; + } + + if (scan && !scanning) + hci_write_scan_enable_sync(hdev, scan); + else if (!scan && scanning) + hci_write_scan_enable_sync(hdev, scan); + + return 0; +} + +/* This function performs the HCI suspend procedures in the follow order: + * + * Pause discovery (active scanning/inquiry) + * Pause Directed Advertising/Advertising + * Disconnect all connections + * Set suspend_status to BT_SUSPEND_DISCONNECT if hdev cannot wakeup + * otherwise: + * Update event mask (only set events that are allowed to wake up the host) + * Update event filter (with devices marked with HCI_CONN_FLAG_REMOTE_WAKEUP) + * Update passive scanning (lower duty cycle) + * Set suspend_status to BT_SUSPEND_CONFIGURE_WAKE + */ +int hci_suspend_sync(struct hci_dev *hdev) +{ + int err; + + /* If marked as suspended there nothing to do */ + if (hdev->suspended) + return 0; + + /* Mark device as suspended */ + hdev->suspended = true; + + /* Pause discovery if not already stopped */ + hci_pause_discovery_sync(hdev); + + /* Pause other advertisements */ + hci_pause_advertising_sync(hdev); + + /* Disable page scan if enabled */ + if (test_bit(HCI_PSCAN, &hdev->flags)) + hci_write_scan_enable_sync(hdev, SCAN_DISABLED); + + /* Suspend monitor filters */ + hci_suspend_monitor_sync(hdev); + + /* Prevent disconnects from causing scanning to be re-enabled */ + hdev->scanning_paused = true; + + /* Soft disconnect everything (power off) */ + err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); + if (err) { + /* Set state to BT_RUNNING so resume doesn't notify */ + hdev->suspend_state = BT_RUNNING; + hci_resume_sync(hdev); + return err; + } + + /* Only configure accept list if disconnect succeeded and wake + * isn't being prevented. + */ + if (!hdev->wakeup || !hdev->wakeup(hdev)) { + hdev->suspend_state = BT_SUSPEND_DISCONNECT; + return 0; + } + + /* Unpause to take care of updating scanning params */ + hdev->scanning_paused = false; + + /* Update event mask so only the allowed event can wakeup the host */ + hci_set_event_mask_sync(hdev); + + /* Enable event filter for paired devices */ + hci_update_event_filter_sync(hdev); + + /* Update LE passive scan if enabled */ + hci_update_passive_scan_sync(hdev); + + /* Pause scan changes again. */ + hdev->scanning_paused = true; + + hdev->suspend_state = BT_SUSPEND_CONFIGURE_WAKE; + + return 0; +} + +/* This function resumes discovery */ +static int hci_resume_discovery_sync(struct hci_dev *hdev) +{ + int err; + + /* If discovery not paused there nothing to do */ + if (!hdev->discovery_paused) + return 0; + + hdev->discovery_paused = false; + + hci_discovery_set_state(hdev, DISCOVERY_STARTING); + + err = hci_start_discovery_sync(hdev); + + hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED : + DISCOVERY_FINDING); + + return err; +} + +static void hci_resume_monitor_sync(struct hci_dev *hdev) +{ + switch (hci_get_adv_monitor_offload_ext(hdev)) { + case HCI_ADV_MONITOR_EXT_MSFT: + msft_resume_sync(hdev); + break; + default: + return; + } +} + +/* This function performs the HCI suspend procedures in the follow order: + * + * Restore event mask + * Clear event filter + * Update passive scanning (normal duty cycle) + * Resume Directed Advertising/Advertising + * Resume discovery (active scanning/inquiry) + */ +int hci_resume_sync(struct hci_dev *hdev) +{ + /* If not marked as suspended there nothing to do */ + if (!hdev->suspended) + return 0; + + hdev->suspended = false; + hdev->scanning_paused = false; + + /* Restore event mask */ + hci_set_event_mask_sync(hdev); + + /* Clear any event filters and restore scan state */ + hci_clear_event_filter_sync(hdev); + hci_update_scan_sync(hdev); + + /* Reset passive scanning to normal */ + hci_update_passive_scan_sync(hdev); + + /* Resume monitor filters */ + hci_resume_monitor_sync(hdev); + + /* Resume other advertisements */ + hci_resume_advertising_sync(hdev); + + /* Resume discovery */ + hci_resume_discovery_sync(hdev); + + return 0; +} diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 7827639ecf5c..4e3e0451b08c 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c @@ -86,6 +86,8 @@ static void bt_host_release(struct device *dev) if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) hci_release_dev(hdev); + else + kfree(hdev); module_put(THIS_MODULE); } diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 160c016a5dfb..4574c5cb1b59 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -172,6 +172,21 @@ done: return err; } +static void l2cap_sock_init_pid(struct sock *sk) +{ + struct l2cap_chan *chan = l2cap_pi(sk)->chan; + + /* Only L2CAP_MODE_EXT_FLOWCTL ever need to access the PID in order to + * group the channels being requested. + */ + if (chan->mode != L2CAP_MODE_EXT_FLOWCTL) + return; + + spin_lock(&sk->sk_peer_lock); + sk->sk_peer_pid = get_pid(task_tgid(current)); + spin_unlock(&sk->sk_peer_lock); +} + static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { @@ -243,6 +258,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, if (chan->psm && bdaddr_type_is_le(chan->src_type) && !chan->mode) chan->mode = L2CAP_MODE_LE_FLOWCTL; + l2cap_sock_init_pid(sk); + err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid), &la.l2_bdaddr, la.l2_bdaddr_type); if (err) @@ -298,6 +315,8 @@ static int l2cap_sock_listen(struct socket *sock, int backlog) goto done; } + l2cap_sock_init_pid(sk); + sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 3e5283607b97..f8f74d344297 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -39,6 +39,7 @@ #include "mgmt_config.h" #include "msft.h" #include "eir.h" +#include "aosp.h" #define MGMT_VERSION 1 #define MGMT_REVISION 21 @@ -276,10 +277,39 @@ static const u8 mgmt_status_table[] = { MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */ }; -static u8 mgmt_status(u8 hci_status) +static u8 mgmt_errno_status(int err) { - if (hci_status < ARRAY_SIZE(mgmt_status_table)) - return mgmt_status_table[hci_status]; + switch (err) { + case 0: + return MGMT_STATUS_SUCCESS; + case -EPERM: + return MGMT_STATUS_REJECTED; + case -EINVAL: + return MGMT_STATUS_INVALID_PARAMS; + case -EOPNOTSUPP: + return MGMT_STATUS_NOT_SUPPORTED; + case -EBUSY: + return MGMT_STATUS_BUSY; + case -ETIMEDOUT: + return MGMT_STATUS_AUTH_FAILED; + case -ENOMEM: + return MGMT_STATUS_NO_RESOURCES; + case -EISCONN: + return MGMT_STATUS_ALREADY_CONNECTED; + case -ENOTCONN: + return MGMT_STATUS_DISCONNECTED; + } + + return MGMT_STATUS_FAILED; +} + +static u8 mgmt_status(int err) +{ + if (err < 0) + return mgmt_errno_status(err); + + if (err < ARRAY_SIZE(mgmt_status_table)) + return mgmt_status_table[err]; return MGMT_STATUS_FAILED; } @@ -810,12 +840,7 @@ static u32 get_supported_settings(struct hci_dev *hdev) settings |= MGMT_SETTING_SECURE_CONN; settings |= MGMT_SETTING_PRIVACY; settings |= MGMT_SETTING_STATIC_ADDRESS; - - /* When the experimental feature for LL Privacy support is - * enabled, then advertising is no longer supported. - */ - if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) - settings |= MGMT_SETTING_ADVERTISING; + settings |= MGMT_SETTING_ADVERTISING; } if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || @@ -903,13 +928,6 @@ static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev) return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev); } -static struct mgmt_pending_cmd *pending_find_data(u16 opcode, - struct hci_dev *hdev, - const void *data) -{ - return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data); -} - u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev) { struct mgmt_pending_cmd *cmd; @@ -951,32 +969,41 @@ bool mgmt_get_connectable(struct hci_dev *hdev) return hci_dev_test_flag(hdev, HCI_CONNECTABLE); } +static int service_cache_sync(struct hci_dev *hdev, void *data) +{ + hci_update_eir_sync(hdev); + hci_update_class_sync(hdev); + + return 0; +} + static void service_cache_off(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, service_cache.work); - struct hci_request req; if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) return; - hci_req_init(&req, hdev); - - hci_dev_lock(hdev); - - __hci_req_update_eir(&req); - __hci_req_update_class(&req); - - hci_dev_unlock(hdev); + hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL); +} - hci_req_run(&req, NULL); +static int rpa_expired_sync(struct hci_dev *hdev, void *data) +{ + /* The generation of a new RPA and programming it into the + * controller happens in the hci_req_enable_advertising() + * function. + */ + if (ext_adv_capable(hdev)) + return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance); + else + return hci_enable_advertising_sync(hdev); } static void rpa_expired(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, rpa_expired.work); - struct hci_request req; bt_dev_dbg(hdev, ""); @@ -985,16 +1012,7 @@ static void rpa_expired(struct work_struct *work) if (!hci_dev_test_flag(hdev, HCI_ADVERTISING)) return; - /* The generation of a new RPA and programming it into the - * controller happens in the hci_req_enable_advertising() - * function. - */ - hci_req_init(&req, hdev); - if (ext_adv_capable(hdev)) - __hci_req_start_ext_adv(&req, hdev->cur_adv_instance); - else - __hci_req_enable_advertising(&req); - hci_req_run(&req, NULL); + hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL); } static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev) @@ -1131,16 +1149,6 @@ static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev) sizeof(settings)); } -static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode) -{ - bt_dev_dbg(hdev, "status 0x%02x", status); - - if (hci_conn_count(hdev) == 0) { - cancel_delayed_work(&hdev->power_off); - queue_work(hdev->req_workqueue, &hdev->power_off.work); - } -} - void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance) { struct mgmt_ev_advertising_added ev; @@ -1168,38 +1176,77 @@ static void cancel_adv_timeout(struct hci_dev *hdev) } } -static int clean_up_hci_state(struct hci_dev *hdev) +/* This function requires the caller holds hdev->lock */ +static void restart_le_actions(struct hci_dev *hdev) { - struct hci_request req; - struct hci_conn *conn; - bool discov_stopped; - int err; + struct hci_conn_params *p; - hci_req_init(&req, hdev); + list_for_each_entry(p, &hdev->le_conn_params, list) { + /* Needed for AUTO_OFF case where might not "really" + * have been powered off. + */ + list_del_init(&p->action); - if (test_bit(HCI_ISCAN, &hdev->flags) || - test_bit(HCI_PSCAN, &hdev->flags)) { - u8 scan = 0x00; - hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); + switch (p->auto_connect) { + case HCI_AUTO_CONN_DIRECT: + case HCI_AUTO_CONN_ALWAYS: + list_add(&p->action, &hdev->pend_le_conns); + break; + case HCI_AUTO_CONN_REPORT: + list_add(&p->action, &hdev->pend_le_reports); + break; + default: + break; + } } +} + +static int new_settings(struct hci_dev *hdev, struct sock *skip) +{ + __le32 ev = cpu_to_le32(get_current_settings(hdev)); - hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false); + return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, + sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip); +} - if (hci_dev_test_flag(hdev, HCI_LE_ADV)) - __hci_req_disable_advertising(&req); +static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_mode *cp = cmd->param; - discov_stopped = hci_req_stop_discovery(&req); + bt_dev_dbg(hdev, "err %d", err); - list_for_each_entry(conn, &hdev->conn_hash.list, list) { - /* 0x15 == Terminated due to Power Off */ - __hci_abort_conn(&req, conn, 0x15); + if (!err) { + if (cp->val) { + hci_dev_lock(hdev); + restart_le_actions(hdev); + hci_update_passive_scan(hdev); + hci_dev_unlock(hdev); + } + + send_settings_rsp(cmd->sk, cmd->opcode, hdev); + + /* Only call new_setting for power on as power off is deferred + * to hdev->power_off work which does call hci_dev_do_close. + */ + if (cp->val) + new_settings(hdev, cmd->sk); + } else { + mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, + mgmt_status(err)); } - err = hci_req_run(&req, clean_up_hci_complete); - if (!err && discov_stopped) - hci_discovery_set_state(hdev, DISCOVERY_STOPPING); + mgmt_pending_free(cmd); +} - return err; +static int set_powered_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_mode *cp = cmd->param; + + BT_DBG("%s", hdev->name); + + return hci_set_powered_sync(hdev, cp->val); } static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data, @@ -1228,43 +1275,20 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data, goto failed; } - cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len); + cmd = mgmt_pending_new(sk, MGMT_OP_SET_POWERED, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } - if (cp->val) { - queue_work(hdev->req_workqueue, &hdev->power_on); - err = 0; - } else { - /* Disconnect connections, stop scans, etc */ - err = clean_up_hci_state(hdev); - if (!err) - queue_delayed_work(hdev->req_workqueue, &hdev->power_off, - HCI_POWER_OFF_TIMEOUT); - - /* ENODATA means there were no HCI commands queued */ - if (err == -ENODATA) { - cancel_delayed_work(&hdev->power_off); - queue_work(hdev->req_workqueue, &hdev->power_off.work); - err = 0; - } - } + err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd, + mgmt_set_powered_complete); failed: hci_dev_unlock(hdev); return err; } -static int new_settings(struct hci_dev *hdev, struct sock *skip) -{ - __le32 ev = cpu_to_le32(get_current_settings(hdev)); - - return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, - sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip); -} - int mgmt_new_settings(struct hci_dev *hdev) { return new_settings(hdev, NULL); @@ -1346,23 +1370,20 @@ static u8 mgmt_le_support(struct hci_dev *hdev) return MGMT_STATUS_SUCCESS; } -void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status) +static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data, + int err) { - struct mgmt_pending_cmd *cmd; + struct mgmt_pending_cmd *cmd = data; - bt_dev_dbg(hdev, "status 0x%02x", status); + bt_dev_dbg(hdev, "err %d", err); hci_dev_lock(hdev); - cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev); - if (!cmd) - goto unlock; - - if (status) { - u8 mgmt_err = mgmt_status(status); + if (err) { + u8 mgmt_err = mgmt_status(err); mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); - goto remove_cmd; + goto done; } if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) && @@ -1374,13 +1395,18 @@ void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status) send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev); new_settings(hdev, cmd->sk); -remove_cmd: - mgmt_pending_remove(cmd); - -unlock: +done: + mgmt_pending_free(cmd); hci_dev_unlock(hdev); } +static int set_discoverable_sync(struct hci_dev *hdev, void *data) +{ + BT_DBG("%s", hdev->name); + + return hci_update_discoverable_sync(hdev); +} + static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { @@ -1479,7 +1505,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, goto failed; } - cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len); + cmd = mgmt_pending_new(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; @@ -1503,39 +1529,34 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, else hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); - queue_work(hdev->req_workqueue, &hdev->discoverable_update); - err = 0; + err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd, + mgmt_set_discoverable_complete); failed: hci_dev_unlock(hdev); return err; } -void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status) +static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data, + int err) { - struct mgmt_pending_cmd *cmd; + struct mgmt_pending_cmd *cmd = data; - bt_dev_dbg(hdev, "status 0x%02x", status); + bt_dev_dbg(hdev, "err %d", err); hci_dev_lock(hdev); - cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev); - if (!cmd) - goto unlock; - - if (status) { - u8 mgmt_err = mgmt_status(status); + if (err) { + u8 mgmt_err = mgmt_status(err); mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); - goto remove_cmd; + goto done; } send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev); new_settings(hdev, cmd->sk); -remove_cmd: - mgmt_pending_remove(cmd); - -unlock: +done: + mgmt_pending_free(cmd); hci_dev_unlock(hdev); } @@ -1561,13 +1582,20 @@ static int set_connectable_update_settings(struct hci_dev *hdev, if (changed) { hci_req_update_scan(hdev); - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); return new_settings(hdev, sk); } return 0; } +static int set_connectable_sync(struct hci_dev *hdev, void *data) +{ + BT_DBG("%s", hdev->name); + + return hci_update_connectable_sync(hdev); +} + static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { @@ -1600,7 +1628,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, goto failed; } - cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len); + cmd = mgmt_pending_new(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; @@ -1617,8 +1645,8 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, hci_dev_clear_flag(hdev, HCI_CONNECTABLE); } - queue_work(hdev->req_workqueue, &hdev->connectable_update); - err = 0; + err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd, + mgmt_set_connectable_complete); failed: hci_dev_unlock(hdev); @@ -1653,12 +1681,7 @@ static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data, /* In limited privacy mode the change of bondable mode * may affect the local advertising address. */ - if (hdev_is_powered(hdev) && - hci_dev_test_flag(hdev, HCI_ADVERTISING) && - hci_dev_test_flag(hdev, HCI_DISCOVERABLE) && - hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) - queue_work(hdev->req_workqueue, - &hdev->discoverable_update); + hci_update_discoverable(hdev); err = new_settings(hdev, sk); } @@ -1737,6 +1760,69 @@ failed: return err; } +static void set_ssp_complete(struct hci_dev *hdev, void *data, int err) +{ + struct cmd_lookup match = { NULL, hdev }; + struct mgmt_pending_cmd *cmd = data; + struct mgmt_mode *cp = cmd->param; + u8 enable = cp->val; + bool changed; + + if (err) { + u8 mgmt_err = mgmt_status(err); + + if (enable && hci_dev_test_and_clear_flag(hdev, + HCI_SSP_ENABLED)) { + hci_dev_clear_flag(hdev, HCI_HS_ENABLED); + new_settings(hdev, NULL); + } + + mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp, + &mgmt_err); + return; + } + + if (enable) { + changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED); + } else { + changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED); + + if (!changed) + changed = hci_dev_test_and_clear_flag(hdev, + HCI_HS_ENABLED); + else + hci_dev_clear_flag(hdev, HCI_HS_ENABLED); + } + + mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match); + + if (changed) + new_settings(hdev, match.sk); + + if (match.sk) + sock_put(match.sk); + + hci_update_eir_sync(hdev); +} + +static int set_ssp_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_mode *cp = cmd->param; + bool changed = false; + int err; + + if (cp->val) + changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED); + + err = hci_write_ssp_mode_sync(hdev, cp->val); + + if (!err && changed) + hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); + + return err; +} + static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; @@ -1798,19 +1884,18 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) } cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len); - if (!cmd) { + if (!cmd) err = -ENOMEM; - goto failed; - } - - if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) - hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, - sizeof(cp->val), &cp->val); + else + err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd, + set_ssp_complete); - err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val); if (err < 0) { - mgmt_pending_remove(cmd); - goto failed; + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, + MGMT_STATUS_FAILED); + + if (cmd) + mgmt_pending_remove(cmd); } failed: @@ -1879,18 +1964,17 @@ unlock: return err; } -static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode) +static void set_le_complete(struct hci_dev *hdev, void *data, int err) { struct cmd_lookup match = { NULL, hdev }; + u8 status = mgmt_status(err); - hci_dev_lock(hdev); + bt_dev_dbg(hdev, "err %d", err); if (status) { - u8 mgmt_err = mgmt_status(status); - mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp, - &mgmt_err); - goto unlock; + &status); + return; } mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match); @@ -1899,39 +1983,54 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode) if (match.sk) sock_put(match.sk); +} + +static int set_le_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_mode *cp = cmd->param; + u8 val = !!cp->val; + int err; + + if (!val) { + if (hci_dev_test_flag(hdev, HCI_LE_ADV)) + hci_disable_advertising_sync(hdev); + + if (ext_adv_capable(hdev)) + hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk); + } else { + hci_dev_set_flag(hdev, HCI_LE_ENABLED); + } + + err = hci_write_le_host_supported_sync(hdev, val, 0); /* Make sure the controller has a good default for * advertising data. Restrict the update to when LE * has actually been enabled. During power on, the * update in powered_update_hci will take care of it. */ - if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { - struct hci_request req; - hci_req_init(&req, hdev); + if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { if (ext_adv_capable(hdev)) { - int err; + int status; - err = __hci_req_setup_ext_adv_instance(&req, 0x00); - if (!err) - __hci_req_update_scan_rsp_data(&req, 0x00); + status = hci_setup_ext_adv_instance_sync(hdev, 0x00); + if (!status) + hci_update_scan_rsp_data_sync(hdev, 0x00); } else { - __hci_req_update_adv_data(&req, 0x00); - __hci_req_update_scan_rsp_data(&req, 0x00); + hci_update_adv_data_sync(hdev, 0x00); + hci_update_scan_rsp_data_sync(hdev, 0x00); } - hci_req_run(&req, NULL); - hci_update_background_scan(hdev); + + hci_update_passive_scan(hdev); } -unlock: - hci_dev_unlock(hdev); + return err; } static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; - struct hci_cp_write_le_host_supported hci_cp; struct mgmt_pending_cmd *cmd; - struct hci_request req; int err; u8 val, enabled; @@ -2001,33 +2100,20 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) } cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len); - if (!cmd) { + if (!cmd) err = -ENOMEM; - goto unlock; - } - - hci_req_init(&req, hdev); - - memset(&hci_cp, 0, sizeof(hci_cp)); + else + err = hci_cmd_sync_queue(hdev, set_le_sync, cmd, + set_le_complete); - if (val) { - hci_cp.le = val; - hci_cp.simul = 0x00; - } else { - if (hci_dev_test_flag(hdev, HCI_LE_ADV)) - __hci_req_disable_advertising(&req); + if (err < 0) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE, + MGMT_STATUS_FAILED); - if (ext_adv_capable(hdev)) - __hci_req_clear_ext_adv_sets(&req); + if (cmd) + mgmt_pending_remove(cmd); } - hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp), - &hci_cp); - - err = hci_req_run(&req, le_enable_complete); - if (err < 0) - mgmt_pending_remove(cmd); - unlock: hci_dev_unlock(hdev); return err; @@ -2075,37 +2161,33 @@ static u8 get_uuid_size(const u8 *uuid) return 16; } -static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status) +static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err) { - struct mgmt_pending_cmd *cmd; + struct mgmt_pending_cmd *cmd = data; - hci_dev_lock(hdev); - - cmd = pending_find(mgmt_op, hdev); - if (!cmd) - goto unlock; + bt_dev_dbg(hdev, "err %d", err); mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, - mgmt_status(status), hdev->dev_class, 3); - - mgmt_pending_remove(cmd); + mgmt_status(err), hdev->dev_class, 3); -unlock: - hci_dev_unlock(hdev); + mgmt_pending_free(cmd); } -static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode) +static int add_uuid_sync(struct hci_dev *hdev, void *data) { - bt_dev_dbg(hdev, "status 0x%02x", status); + int err; + + err = hci_update_class_sync(hdev); + if (err) + return err; - mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status); + return hci_update_eir_sync(hdev); } static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_add_uuid *cp = data; struct mgmt_pending_cmd *cmd; - struct hci_request req; struct bt_uuid *uuid; int err; @@ -2131,28 +2213,17 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) list_add_tail(&uuid->list, &hdev->uuids); - hci_req_init(&req, hdev); - - __hci_req_update_class(&req); - __hci_req_update_eir(&req); - - err = hci_req_run(&req, add_uuid_complete); - if (err < 0) { - if (err != -ENODATA) - goto failed; - - err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0, - hdev->dev_class, 3); - goto failed; - } - - cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len); + cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } - err = 0; + err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete); + if (err < 0) { + mgmt_pending_free(cmd); + goto failed; + } failed: hci_dev_unlock(hdev); @@ -2173,11 +2244,15 @@ static bool enable_service_cache(struct hci_dev *hdev) return false; } -static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode) +static int remove_uuid_sync(struct hci_dev *hdev, void *data) { - bt_dev_dbg(hdev, "status 0x%02x", status); + int err; + + err = hci_update_class_sync(hdev); + if (err) + return err; - mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status); + return hci_update_eir_sync(hdev); } static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, @@ -2187,7 +2262,6 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, struct mgmt_pending_cmd *cmd; struct bt_uuid *match, *tmp; u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - struct hci_request req; int err, found; bt_dev_dbg(hdev, "sock %p", sk); @@ -2231,39 +2305,35 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, } update_class: - hci_req_init(&req, hdev); - - __hci_req_update_class(&req); - __hci_req_update_eir(&req); - - err = hci_req_run(&req, remove_uuid_complete); - if (err < 0) { - if (err != -ENODATA) - goto unlock; - - err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0, - hdev->dev_class, 3); - goto unlock; - } - - cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len); + cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len); if (!cmd) { err = -ENOMEM; goto unlock; } - err = 0; + err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd, + mgmt_class_complete); + if (err < 0) + mgmt_pending_free(cmd); unlock: hci_dev_unlock(hdev); return err; } -static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode) +static int set_class_sync(struct hci_dev *hdev, void *data) { - bt_dev_dbg(hdev, "status 0x%02x", status); + int err = 0; + + if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) { + cancel_delayed_work_sync(&hdev->service_cache); + err = hci_update_eir_sync(hdev); + } + + if (err) + return err; - mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status); + return hci_update_class_sync(hdev); } static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data, @@ -2271,7 +2341,6 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data, { struct mgmt_cp_set_dev_class *cp = data; struct mgmt_pending_cmd *cmd; - struct hci_request req; int err; bt_dev_dbg(hdev, "sock %p", sk); @@ -2303,34 +2372,16 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data, goto unlock; } - hci_req_init(&req, hdev); - - if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) { - hci_dev_unlock(hdev); - cancel_delayed_work_sync(&hdev->service_cache); - hci_dev_lock(hdev); - __hci_req_update_eir(&req); - } - - __hci_req_update_class(&req); - - err = hci_req_run(&req, set_class_complete); - if (err < 0) { - if (err != -ENODATA) - goto unlock; - - err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0, - hdev->dev_class, 3); - goto unlock; - } - - cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len); + cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len); if (!cmd) { err = -ENOMEM; goto unlock; } - err = 0; + err = hci_cmd_sync_queue(hdev, set_class_sync, cmd, + mgmt_class_complete); + if (err < 0) + mgmt_pending_free(cmd); unlock: hci_dev_unlock(hdev); @@ -3228,65 +3279,70 @@ static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev, HCI_OP_USER_PASSKEY_NEG_REPLY, 0); } -static void adv_expire(struct hci_dev *hdev, u32 flags) +static int adv_expire_sync(struct hci_dev *hdev, u32 flags) { struct adv_info *adv_instance; - struct hci_request req; - int err; adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance); if (!adv_instance) - return; + return 0; /* stop if current instance doesn't need to be changed */ if (!(adv_instance->flags & flags)) - return; + return 0; cancel_adv_timeout(hdev); adv_instance = hci_get_next_instance(hdev, adv_instance->instance); if (!adv_instance) - return; + return 0; - hci_req_init(&req, hdev); - err = __hci_req_schedule_adv_instance(&req, adv_instance->instance, - true); - if (err) - return; + hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true); - hci_req_run(&req, NULL); + return 0; } -static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode) +static int name_changed_sync(struct hci_dev *hdev, void *data) { - struct mgmt_cp_set_local_name *cp; - struct mgmt_pending_cmd *cmd; - - bt_dev_dbg(hdev, "status 0x%02x", status); - - hci_dev_lock(hdev); + return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME); +} - cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev); - if (!cmd) - goto unlock; +static void set_name_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_set_local_name *cp = cmd->param; + u8 status = mgmt_status(err); - cp = cmd->param; + bt_dev_dbg(hdev, "err %d", err); if (status) { mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, - mgmt_status(status)); + status); } else { mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, cp, sizeof(*cp)); if (hci_dev_test_flag(hdev, HCI_LE_ADV)) - adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME); + hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL); } mgmt_pending_remove(cmd); +} -unlock: - hci_dev_unlock(hdev); +static int set_name_sync(struct hci_dev *hdev, void *data) +{ + if (lmp_bredr_capable(hdev)) { + hci_update_name_sync(hdev); + hci_update_eir_sync(hdev); + } + + /* The name is stored in the scan response data and so + * no need to update the advertising data here. + */ + if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING)) + hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance); + + return 0; } static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data, @@ -3294,7 +3350,6 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data, { struct mgmt_cp_set_local_name *cp = data; struct mgmt_pending_cmd *cmd; - struct hci_request req; int err; bt_dev_dbg(hdev, "sock %p", sk); @@ -3330,35 +3385,34 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data, } cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len); - if (!cmd) { + if (!cmd) err = -ENOMEM; - goto failed; - } + else + err = hci_cmd_sync_queue(hdev, set_name_sync, cmd, + set_name_complete); - memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name)); + if (err < 0) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, + MGMT_STATUS_FAILED); - hci_req_init(&req, hdev); + if (cmd) + mgmt_pending_remove(cmd); - if (lmp_bredr_capable(hdev)) { - __hci_req_update_name(&req); - __hci_req_update_eir(&req); + goto failed; } - /* The name is stored in the scan response data and so - * no need to update the advertising data here. - */ - if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING)) - __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance); - - err = hci_req_run(&req, set_name_complete); - if (err < 0) - mgmt_pending_remove(cmd); + memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name)); failed: hci_dev_unlock(hdev); return err; } +static int appearance_changed_sync(struct hci_dev *hdev, void *data) +{ + return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE); +} + static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { @@ -3380,7 +3434,8 @@ static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data, hdev->appearance = appearance; if (hci_dev_test_flag(hdev, HCI_LE_ADV)) - adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE); + hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL, + NULL); ext_info_changed(hdev, sk); } @@ -3426,23 +3481,26 @@ int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip) sizeof(ev), skip); } -static void set_default_phy_complete(struct hci_dev *hdev, u8 status, - u16 opcode, struct sk_buff *skb) +static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err) { - struct mgmt_pending_cmd *cmd; - - bt_dev_dbg(hdev, "status 0x%02x", status); + struct mgmt_pending_cmd *cmd = data; + struct sk_buff *skb = cmd->skb; + u8 status = mgmt_status(err); - hci_dev_lock(hdev); + if (!status) { + if (!skb) + status = MGMT_STATUS_FAILED; + else if (IS_ERR(skb)) + status = mgmt_status(PTR_ERR(skb)); + else + status = mgmt_status(skb->data[0]); + } - cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev); - if (!cmd) - goto unlock; + bt_dev_dbg(hdev, "status %d", status); if (status) { mgmt_cmd_status(cmd->sk, hdev->id, - MGMT_OP_SET_PHY_CONFIGURATION, - mgmt_status(status)); + MGMT_OP_SET_PHY_CONFIGURATION, status); } else { mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_PHY_CONFIGURATION, 0, @@ -3451,19 +3509,56 @@ static void set_default_phy_complete(struct hci_dev *hdev, u8 status, mgmt_phy_configuration_changed(hdev, cmd->sk); } + if (skb && !IS_ERR(skb)) + kfree_skb(skb); + mgmt_pending_remove(cmd); +} -unlock: - hci_dev_unlock(hdev); +static int set_default_phy_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_set_phy_configuration *cp = cmd->param; + struct hci_cp_le_set_default_phy cp_phy; + u32 selected_phys = __le32_to_cpu(cp->selected_phys); + + memset(&cp_phy, 0, sizeof(cp_phy)); + + if (!(selected_phys & MGMT_PHY_LE_TX_MASK)) + cp_phy.all_phys |= 0x01; + + if (!(selected_phys & MGMT_PHY_LE_RX_MASK)) + cp_phy.all_phys |= 0x02; + + if (selected_phys & MGMT_PHY_LE_1M_TX) + cp_phy.tx_phys |= HCI_LE_SET_PHY_1M; + + if (selected_phys & MGMT_PHY_LE_2M_TX) + cp_phy.tx_phys |= HCI_LE_SET_PHY_2M; + + if (selected_phys & MGMT_PHY_LE_CODED_TX) + cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED; + + if (selected_phys & MGMT_PHY_LE_1M_RX) + cp_phy.rx_phys |= HCI_LE_SET_PHY_1M; + + if (selected_phys & MGMT_PHY_LE_2M_RX) + cp_phy.rx_phys |= HCI_LE_SET_PHY_2M; + + if (selected_phys & MGMT_PHY_LE_CODED_RX) + cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED; + + cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY, + sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT); + + return 0; } static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_phy_configuration *cp = data; - struct hci_cp_le_set_default_phy cp_phy; struct mgmt_pending_cmd *cmd; - struct hci_request req; u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys; u16 pkt_type = (HCI_DH1 | HCI_DM1); bool changed = false; @@ -3567,44 +3662,20 @@ static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev, cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data, len); - if (!cmd) { + if (!cmd) err = -ENOMEM; - goto unlock; - } - - hci_req_init(&req, hdev); - - memset(&cp_phy, 0, sizeof(cp_phy)); - - if (!(selected_phys & MGMT_PHY_LE_TX_MASK)) - cp_phy.all_phys |= 0x01; - - if (!(selected_phys & MGMT_PHY_LE_RX_MASK)) - cp_phy.all_phys |= 0x02; - - if (selected_phys & MGMT_PHY_LE_1M_TX) - cp_phy.tx_phys |= HCI_LE_SET_PHY_1M; - - if (selected_phys & MGMT_PHY_LE_2M_TX) - cp_phy.tx_phys |= HCI_LE_SET_PHY_2M; - - if (selected_phys & MGMT_PHY_LE_CODED_TX) - cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED; - - if (selected_phys & MGMT_PHY_LE_1M_RX) - cp_phy.rx_phys |= HCI_LE_SET_PHY_1M; - - if (selected_phys & MGMT_PHY_LE_2M_RX) - cp_phy.rx_phys |= HCI_LE_SET_PHY_2M; - - if (selected_phys & MGMT_PHY_LE_CODED_RX) - cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED; + else + err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd, + set_default_phy_complete); - hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy); + if (err < 0) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_PHY_CONFIGURATION, + MGMT_STATUS_FAILED); - err = hci_req_run_skb(&req, set_default_phy_complete); - if (err < 0) - mgmt_pending_remove(cmd); + if (cmd) + mgmt_pending_remove(cmd); + } unlock: hci_dev_unlock(hdev); @@ -3852,7 +3923,7 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, idx++; } - if (hdev && use_ll_privacy(hdev)) { + if (hdev && ll_privacy_capable(hdev)) { if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) flags = BIT(0) | BIT(1); else @@ -3863,7 +3934,8 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, idx++; } - if (hdev && hdev->set_quality_report) { + if (hdev && (aosp_has_quality_report(hdev) || + hdev->set_quality_report)) { if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT)) flags = BIT(0); else @@ -3927,7 +3999,9 @@ static int exp_debug_feature_changed(bool enabled, struct sock *skip) } #endif -static int exp_quality_report_feature_changed(bool enabled, struct sock *skip) +static int exp_quality_report_feature_changed(bool enabled, + struct hci_dev *hdev, + struct sock *skip) { struct mgmt_ev_exp_feature_changed ev; @@ -3935,7 +4009,7 @@ static int exp_quality_report_feature_changed(bool enabled, struct sock *skip) memcpy(ev.uuid, quality_report_uuid, 16); ev.flags = cpu_to_le32(enabled ? BIT(0) : 0); - return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL, + return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev, &ev, sizeof(ev), HCI_MGMT_EXP_FEATURE_EVENTS, skip); } @@ -4125,7 +4199,7 @@ static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev, val = !!cp->param[0]; changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT)); - if (!hdev->set_quality_report) { + if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_NOT_SUPPORTED); @@ -4133,13 +4207,18 @@ static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev, } if (changed) { - err = hdev->set_quality_report(hdev, val); + if (hdev->set_quality_report) + err = hdev->set_quality_report(hdev, val); + else + err = aosp_set_quality_report(hdev, val); + if (err) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_FAILED); goto unlock_quality_report; } + if (val) hci_dev_set_flag(hdev, HCI_QUALITY_REPORT); else @@ -4151,19 +4230,20 @@ static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev, memcpy(rp.uuid, quality_report_uuid, 16); rp.flags = cpu_to_le32(val ? BIT(0) : 0); hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); - err = mgmt_cmd_complete(sk, hdev->id, - MGMT_OP_SET_EXP_FEATURE, 0, + + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0, &rp, sizeof(rp)); if (changed) - exp_quality_report_feature_changed(val, sk); + exp_quality_report_feature_changed(val, hdev, sk); unlock_quality_report: hci_req_sync_unlock(hdev); return err; } -static int exp_offload_codec_feature_changed(bool enabled, struct sock *skip) +static int exp_offload_codec_feature_changed(bool enabled, struct hci_dev *hdev, + struct sock *skip) { struct mgmt_ev_exp_feature_changed ev; @@ -4171,7 +4251,7 @@ static int exp_offload_codec_feature_changed(bool enabled, struct sock *skip) memcpy(ev.uuid, offload_codecs_uuid, 16); ev.flags = cpu_to_le32(enabled ? BIT(0) : 0); - return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL, + return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev, &ev, sizeof(ev), HCI_MGMT_EXP_FEATURE_EVENTS, skip); } @@ -4229,7 +4309,7 @@ static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev, &rp, sizeof(rp)); if (changed) - exp_offload_codec_feature_changed(val, sk); + exp_offload_codec_feature_changed(val, hdev, sk); return err; } @@ -4496,7 +4576,7 @@ int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status) hdev->adv_monitors_cnt++; if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED) monitor->state = ADV_MONITOR_STATE_REGISTERED; - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); } err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, @@ -4722,7 +4802,7 @@ int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status) rp.monitor_handle = cp->monitor_handle; if (!status) - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status), &rp, sizeof(rp)); @@ -4801,28 +4881,33 @@ unlock: status); } -static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status, - u16 opcode, struct sk_buff *skb) +static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_rp_read_local_oob_data mgmt_rp; size_t rp_size = sizeof(mgmt_rp); - struct mgmt_pending_cmd *cmd; + struct mgmt_pending_cmd *cmd = data; + struct sk_buff *skb = cmd->skb; + u8 status = mgmt_status(err); - bt_dev_dbg(hdev, "status %u", status); + if (!status) { + if (!skb) + status = MGMT_STATUS_FAILED; + else if (IS_ERR(skb)) + status = mgmt_status(PTR_ERR(skb)); + else + status = mgmt_status(skb->data[0]); + } - cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev); - if (!cmd) - return; + bt_dev_dbg(hdev, "status %d", status); - if (status || !skb) { - mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, - status ? mgmt_status(status) : MGMT_STATUS_FAILED); + if (status) { + mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status); goto remove; } memset(&mgmt_rp, 0, sizeof(mgmt_rp)); - if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) { + if (!bredr_sc_enabled(hdev)) { struct hci_rp_read_local_oob_data *rp = (void *) skb->data; if (skb->len < sizeof(*rp)) { @@ -4857,14 +4942,31 @@ static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status, MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size); remove: - mgmt_pending_remove(cmd); + if (skb && !IS_ERR(skb)) + kfree_skb(skb); + + mgmt_pending_free(cmd); +} + +static int read_local_oob_data_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + + if (bredr_sc_enabled(hdev)) + cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk); + else + cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk); + + if (IS_ERR(cmd->skb)) + return PTR_ERR(cmd->skb); + else + return 0; } static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_pending_cmd *cmd; - struct hci_request req; int err; bt_dev_dbg(hdev, "sock %p", sk); @@ -4889,22 +4991,20 @@ static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev, goto unlock; } - cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0); - if (!cmd) { + cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0); + if (!cmd) err = -ENOMEM; - goto unlock; - } - - hci_req_init(&req, hdev); - - if (bredr_sc_enabled(hdev)) - hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL); else - hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL); + err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd, + read_local_oob_data_complete); - err = hci_req_run_skb(&req, read_local_oob_data_complete); - if (err < 0) - mgmt_pending_remove(cmd); + if (err < 0) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, + MGMT_STATUS_FAILED); + + if (cmd) + mgmt_pending_free(cmd); + } unlock: hci_dev_unlock(hdev); @@ -5077,13 +5177,6 @@ void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status) } hci_dev_unlock(hdev); - - /* Handle suspend notifier */ - if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY, - hdev->suspend_tasks)) { - bt_dev_dbg(hdev, "Unpaused discovery"); - wake_up(&hdev->suspend_wait_q); - } } static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type, @@ -5113,6 +5206,25 @@ static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type, return true; } +static void start_discovery_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + + bt_dev_dbg(hdev, "err %d", err); + + mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err), + cmd->param, 1); + mgmt_pending_free(cmd); + + hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED: + DISCOVERY_FINDING); +} + +static int start_discovery_sync(struct hci_dev *hdev, void *data) +{ + return hci_start_discovery_sync(hdev); +} + static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev, u16 op, void *data, u16 len) { @@ -5164,17 +5276,20 @@ static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev, else hdev->discovery.limited = false; - cmd = mgmt_pending_add(sk, op, hdev, data, len); + cmd = mgmt_pending_new(sk, op, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } - cmd->cmd_complete = generic_cmd_complete; + err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd, + start_discovery_complete); + if (err < 0) { + mgmt_pending_free(cmd); + goto failed; + } hci_discovery_set_state(hdev, DISCOVERY_STARTING); - queue_work(hdev->req_workqueue, &hdev->discov_update); - err = 0; failed: hci_dev_unlock(hdev); @@ -5196,13 +5311,6 @@ static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev, data, len); } -static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd, - u8 status) -{ - return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, - cmd->param, 1); -} - static int start_service_discovery(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { @@ -5271,15 +5379,13 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev, goto failed; } - cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY, + cmd = mgmt_pending_new(sk, MGMT_OP_START_SERVICE_DISCOVERY, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } - cmd->cmd_complete = service_discovery_cmd_complete; - /* Clear the discovery filter first to free any previously * allocated memory for the UUID list. */ @@ -5303,9 +5409,14 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev, } } + err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd, + start_discovery_complete); + if (err < 0) { + mgmt_pending_free(cmd); + goto failed; + } + hci_discovery_set_state(hdev, DISCOVERY_STARTING); - queue_work(hdev->req_workqueue, &hdev->discov_update); - err = 0; failed: hci_dev_unlock(hdev); @@ -5327,12 +5438,25 @@ void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status) } hci_dev_unlock(hdev); +} - /* Handle suspend notifier */ - if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) { - bt_dev_dbg(hdev, "Paused discovery"); - wake_up(&hdev->suspend_wait_q); - } +static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + + bt_dev_dbg(hdev, "err %d", err); + + mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err), + cmd->param, 1); + mgmt_pending_free(cmd); + + if (!err) + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); +} + +static int stop_discovery_sync(struct hci_dev *hdev, void *data) +{ + return hci_stop_discovery_sync(hdev); } static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data, @@ -5360,17 +5484,20 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data, goto unlock; } - cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len); + cmd = mgmt_pending_new(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len); if (!cmd) { err = -ENOMEM; goto unlock; } - cmd->cmd_complete = generic_cmd_complete; + err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd, + stop_discovery_complete); + if (err < 0) { + mgmt_pending_free(cmd); + goto unlock; + } hci_discovery_set_state(hdev, DISCOVERY_STOPPING); - queue_work(hdev->req_workqueue, &hdev->discov_update); - err = 0; unlock: hci_dev_unlock(hdev); @@ -5491,11 +5618,15 @@ done: return err; } +static int set_device_id_sync(struct hci_dev *hdev, void *data) +{ + return hci_update_eir_sync(hdev); +} + static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_device_id *cp = data; - struct hci_request req; int err; __u16 source; @@ -5517,38 +5648,32 @@ static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data, err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0); - hci_req_init(&req, hdev); - __hci_req_update_eir(&req); - hci_req_run(&req, NULL); + hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL); hci_dev_unlock(hdev); return err; } -static void enable_advertising_instance(struct hci_dev *hdev, u8 status, - u16 opcode) +static void enable_advertising_instance(struct hci_dev *hdev, int err) { - bt_dev_dbg(hdev, "status %u", status); + if (err) + bt_dev_err(hdev, "failed to re-configure advertising %d", err); + else + bt_dev_dbg(hdev, "status %d", err); } -static void set_advertising_complete(struct hci_dev *hdev, u8 status, - u16 opcode) +static void set_advertising_complete(struct hci_dev *hdev, void *data, int err) { struct cmd_lookup match = { NULL, hdev }; - struct hci_request req; u8 instance; struct adv_info *adv_instance; - int err; - - hci_dev_lock(hdev); + u8 status = mgmt_status(err); if (status) { - u8 mgmt_err = mgmt_status(status); - mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, - cmd_status_rsp, &mgmt_err); - goto unlock; + cmd_status_rsp, &status); + return; } if (hci_dev_test_flag(hdev, HCI_LE_ADV)) @@ -5564,46 +5689,60 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status, if (match.sk) sock_put(match.sk); - /* Handle suspend notifier */ - if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING, - hdev->suspend_tasks)) { - bt_dev_dbg(hdev, "Paused advertising"); - wake_up(&hdev->suspend_wait_q); - } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING, - hdev->suspend_tasks)) { - bt_dev_dbg(hdev, "Unpaused advertising"); - wake_up(&hdev->suspend_wait_q); - } - /* If "Set Advertising" was just disabled and instance advertising was * set up earlier, then re-enable multi-instance advertising. */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || list_empty(&hdev->adv_instances)) - goto unlock; + return; instance = hdev->cur_adv_instance; if (!instance) { adv_instance = list_first_entry_or_null(&hdev->adv_instances, struct adv_info, list); if (!adv_instance) - goto unlock; + return; instance = adv_instance->instance; } - hci_req_init(&req, hdev); + err = hci_schedule_adv_instance_sync(hdev, instance, true); - err = __hci_req_schedule_adv_instance(&req, instance, true); + enable_advertising_instance(hdev, err); +} - if (!err) - err = hci_req_run(&req, enable_advertising_instance); +static int set_adv_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_mode *cp = cmd->param; + u8 val = !!cp->val; - if (err) - bt_dev_err(hdev, "failed to re-configure advertising"); + if (cp->val == 0x02) + hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE); + else + hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE); -unlock: - hci_dev_unlock(hdev); + cancel_adv_timeout(hdev); + + if (val) { + /* Switch to instance "0" for the Set Advertising setting. + * We cannot use update_[adv|scan_rsp]_data() here as the + * HCI_ADVERTISING flag is not yet set. + */ + hdev->cur_adv_instance = 0x00; + + if (ext_adv_capable(hdev)) { + hci_start_ext_adv_sync(hdev, 0x00); + } else { + hci_update_adv_data_sync(hdev, 0x00); + hci_update_scan_rsp_data_sync(hdev, 0x00); + hci_enable_advertising_sync(hdev); + } + } else { + hci_disable_advertising_sync(hdev); + } + + return 0; } static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data, @@ -5611,7 +5750,6 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data, { struct mgmt_mode *cp = data; struct mgmt_pending_cmd *cmd; - struct hci_request req; u8 val, status; int err; @@ -5622,13 +5760,6 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data, return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, status); - /* Enabling the experimental LL Privay support disables support for - * advertising. - */ - if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) - return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, - MGMT_STATUS_NOT_SUPPORTED); - if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, MGMT_STATUS_INVALID_PARAMS); @@ -5684,40 +5815,13 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data, } cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len); - if (!cmd) { + if (!cmd) err = -ENOMEM; - goto unlock; - } - - hci_req_init(&req, hdev); - - if (cp->val == 0x02) - hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE); else - hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE); - - cancel_adv_timeout(hdev); - - if (val) { - /* Switch to instance "0" for the Set Advertising setting. - * We cannot use update_[adv|scan_rsp]_data() here as the - * HCI_ADVERTISING flag is not yet set. - */ - hdev->cur_adv_instance = 0x00; + err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd, + set_advertising_complete); - if (ext_adv_capable(hdev)) { - __hci_req_start_ext_adv(&req, 0x00); - } else { - __hci_req_update_adv_data(&req, 0x00); - __hci_req_update_scan_rsp_data(&req, 0x00); - __hci_req_enable_advertising(&req); - } - } else { - __hci_req_disable_advertising(&req); - } - - err = hci_req_run(&req, set_advertising_complete); - if (err < 0) + if (err < 0 && cmd) mgmt_pending_remove(cmd); unlock: @@ -5810,38 +5914,23 @@ static int set_scan_params(struct sock *sk, struct hci_dev *hdev, * loaded. */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN) && - hdev->discovery.state == DISCOVERY_STOPPED) { - struct hci_request req; - - hci_req_init(&req, hdev); - - hci_req_add_le_scan_disable(&req, false); - hci_req_add_le_passive_scan(&req); - - hci_req_run(&req, NULL); - } + hdev->discovery.state == DISCOVERY_STOPPED) + hci_update_passive_scan(hdev); hci_dev_unlock(hdev); return err; } -static void fast_connectable_complete(struct hci_dev *hdev, u8 status, - u16 opcode) +static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err) { - struct mgmt_pending_cmd *cmd; - - bt_dev_dbg(hdev, "status 0x%02x", status); + struct mgmt_pending_cmd *cmd = data; - hci_dev_lock(hdev); - - cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev); - if (!cmd) - goto unlock; + bt_dev_dbg(hdev, "err %d", err); - if (status) { + if (err) { mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, - mgmt_status(status)); + mgmt_status(err)); } else { struct mgmt_mode *cp = cmd->param; @@ -5854,10 +5943,15 @@ static void fast_connectable_complete(struct hci_dev *hdev, u8 status, new_settings(hdev, cmd->sk); } - mgmt_pending_remove(cmd); + mgmt_pending_free(cmd); +} -unlock: - hci_dev_unlock(hdev); +static int write_fast_connectable_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_mode *cp = cmd->param; + + return hci_write_fast_connectable_sync(hdev, cp->val); } static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev, @@ -5865,58 +5959,49 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev, { struct mgmt_mode *cp = data; struct mgmt_pending_cmd *cmd; - struct hci_request req; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) || hdev->hci_ver < BLUETOOTH_VER_1_2) - return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_FAST_CONNECTABLE, MGMT_STATUS_NOT_SUPPORTED); if (cp->val != 0x00 && cp->val != 0x01) - return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_FAST_CONNECTABLE, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); - if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) { - err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, - MGMT_STATUS_BUSY); - goto unlock; - } - if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) { - err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, - hdev); + err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev); goto unlock; } if (!hdev_is_powered(hdev)) { hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE); - err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, - hdev); + err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev); new_settings(hdev, sk); goto unlock; } - cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, - data, len); - if (!cmd) { + cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data, + len); + if (!cmd) err = -ENOMEM; - goto unlock; - } - - hci_req_init(&req, hdev); - - __hci_req_write_fast_connectable(&req, cp->val); + else + err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd, + fast_connectable_complete); - err = hci_req_run(&req, fast_connectable_complete); if (err < 0) { - err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, - MGMT_STATUS_FAILED); - mgmt_pending_remove(cmd); + mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, + MGMT_STATUS_FAILED); + + if (cmd) + mgmt_pending_free(cmd); } unlock: @@ -5925,20 +6010,14 @@ unlock: return err; } -static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode) +static void set_bredr_complete(struct hci_dev *hdev, void *data, int err) { - struct mgmt_pending_cmd *cmd; + struct mgmt_pending_cmd *cmd = data; - bt_dev_dbg(hdev, "status 0x%02x", status); - - hci_dev_lock(hdev); - - cmd = pending_find(MGMT_OP_SET_BREDR, hdev); - if (!cmd) - goto unlock; + bt_dev_dbg(hdev, "err %d", err); - if (status) { - u8 mgmt_err = mgmt_status(status); + if (err) { + u8 mgmt_err = mgmt_status(err); /* We need to restore the flag if related HCI commands * failed. @@ -5951,17 +6030,31 @@ static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode) new_settings(hdev, cmd->sk); } - mgmt_pending_remove(cmd); + mgmt_pending_free(cmd); +} -unlock: - hci_dev_unlock(hdev); +static int set_bredr_sync(struct hci_dev *hdev, void *data) +{ + int status; + + status = hci_write_fast_connectable_sync(hdev, false); + + if (!status) + status = hci_update_scan_sync(hdev); + + /* Since only the advertising data flags will change, there + * is no need to update the scan response data. + */ + if (!status) + status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance); + + return status; } static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; struct mgmt_pending_cmd *cmd; - struct hci_request req; int err; bt_dev_dbg(hdev, "sock %p", sk); @@ -6033,15 +6126,19 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) } } - if (pending_find(MGMT_OP_SET_BREDR, hdev)) { - err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, - MGMT_STATUS_BUSY); - goto unlock; - } - - cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len); - if (!cmd) { + cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len); + if (!cmd) err = -ENOMEM; + else + err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd, + set_bredr_complete); + + if (err < 0) { + mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, + MGMT_STATUS_FAILED); + if (cmd) + mgmt_pending_free(cmd); + goto unlock; } @@ -6050,42 +6147,23 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) */ hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); - hci_req_init(&req, hdev); - - __hci_req_write_fast_connectable(&req, false); - __hci_req_update_scan(&req); - - /* Since only the advertising data flags will change, there - * is no need to update the scan response data. - */ - __hci_req_update_adv_data(&req, hdev->cur_adv_instance); - - err = hci_req_run(&req, set_bredr_complete); - if (err < 0) - mgmt_pending_remove(cmd); - unlock: hci_dev_unlock(hdev); return err; } -static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode) +static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err) { - struct mgmt_pending_cmd *cmd; + struct mgmt_pending_cmd *cmd = data; struct mgmt_mode *cp; - bt_dev_dbg(hdev, "status %u", status); - - hci_dev_lock(hdev); + bt_dev_dbg(hdev, "err %d", err); - cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev); - if (!cmd) - goto unlock; + if (err) { + u8 mgmt_err = mgmt_status(err); - if (status) { - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, - mgmt_status(status)); - goto remove; + mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); + goto done; } cp = cmd->param; @@ -6105,13 +6183,23 @@ static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode) break; } - send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev); + send_settings_rsp(cmd->sk, cmd->opcode, hdev); new_settings(hdev, cmd->sk); -remove: - mgmt_pending_remove(cmd); -unlock: - hci_dev_unlock(hdev); +done: + mgmt_pending_free(cmd); +} + +static int set_secure_conn_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_mode *cp = cmd->param; + u8 val = !!cp->val; + + /* Force write of val */ + hci_dev_set_flag(hdev, HCI_SC_ENABLED); + + return hci_write_sc_support_sync(hdev, val); } static int set_secure_conn(struct sock *sk, struct hci_dev *hdev, @@ -6119,7 +6207,6 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev, { struct mgmt_mode *cp = data; struct mgmt_pending_cmd *cmd; - struct hci_request req; u8 val; int err; @@ -6138,7 +6225,7 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev, if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, - MGMT_STATUS_INVALID_PARAMS); + MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); @@ -6169,12 +6256,6 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev, goto failed; } - if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) { - err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, - MGMT_STATUS_BUSY); - goto failed; - } - val = !!cp->val; if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) && @@ -6183,18 +6264,18 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev, goto failed; } - cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len); - if (!cmd) { + cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len); + if (!cmd) err = -ENOMEM; - goto failed; - } + else + err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd, + set_secure_conn_complete); - hci_req_init(&req, hdev); - hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val); - err = hci_req_run(&req, sc_enable_complete); if (err < 0) { - mgmt_pending_remove(cmd); - goto failed; + mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, + MGMT_STATUS_FAILED); + if (cmd) + mgmt_pending_free(cmd); } failed: @@ -6508,14 +6589,19 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, return err; } -static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status) +static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err) { + struct mgmt_pending_cmd *cmd = data; struct hci_conn *conn = cmd->user_data; + struct mgmt_cp_get_conn_info *cp = cmd->param; struct mgmt_rp_get_conn_info rp; - int err; + u8 status; - memcpy(&rp.addr, cmd->param, sizeof(rp.addr)); + bt_dev_dbg(hdev, "err %d", err); + + memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr)); + status = mgmt_status(err); if (status == MGMT_STATUS_SUCCESS) { rp.rssi = conn->rssi; rp.tx_power = conn->tx_power; @@ -6526,67 +6612,58 @@ static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status) rp.max_tx_power = HCI_TX_POWER_INVALID; } - err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, - status, &rp, sizeof(rp)); + mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status, + &rp, sizeof(rp)); - hci_conn_drop(conn); - hci_conn_put(conn); + if (conn) { + hci_conn_drop(conn); + hci_conn_put(conn); + } - return err; + mgmt_pending_free(cmd); } -static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status, - u16 opcode) +static int get_conn_info_sync(struct hci_dev *hdev, void *data) { - struct hci_cp_read_rssi *cp; - struct mgmt_pending_cmd *cmd; + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_get_conn_info *cp = cmd->param; struct hci_conn *conn; - u16 handle; - u8 status; - - bt_dev_dbg(hdev, "status 0x%02x", hci_status); + int err; + __le16 handle; - hci_dev_lock(hdev); + /* Make sure we are still connected */ + if (cp->addr.type == BDADDR_BREDR) + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, + &cp->addr.bdaddr); + else + conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); - /* Commands sent in request are either Read RSSI or Read Transmit Power - * Level so we check which one was last sent to retrieve connection - * handle. Both commands have handle as first parameter so it's safe to - * cast data on the same command struct. - * - * First command sent is always Read RSSI and we fail only if it fails. - * In other case we simply override error to indicate success as we - * already remembered if TX power value is actually valid. - */ - cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI); - if (!cp) { - cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); - status = MGMT_STATUS_SUCCESS; - } else { - status = mgmt_status(hci_status); + if (!conn || conn != cmd->user_data || conn->state != BT_CONNECTED) { + if (cmd->user_data) { + hci_conn_drop(cmd->user_data); + hci_conn_put(cmd->user_data); + cmd->user_data = NULL; + } + return MGMT_STATUS_NOT_CONNECTED; } - if (!cp) { - bt_dev_err(hdev, "invalid sent_cmd in conn_info response"); - goto unlock; - } + handle = cpu_to_le16(conn->handle); - handle = __le16_to_cpu(cp->handle); - conn = hci_conn_hash_lookup_handle(hdev, handle); - if (!conn) { - bt_dev_err(hdev, "unknown handle (%u) in conn_info response", - handle); - goto unlock; - } + /* Refresh RSSI each time */ + err = hci_read_rssi_sync(hdev, handle); - cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn); - if (!cmd) - goto unlock; + /* For LE links TX power does not change thus we don't need to + * query for it once value is known. + */ + if (!err && (!bdaddr_type_is_le(cp->addr.type) || + conn->tx_power == HCI_TX_POWER_INVALID)) + err = hci_read_tx_power_sync(hdev, handle, 0x00); - cmd->cmd_complete(cmd, status); - mgmt_pending_remove(cmd); + /* Max TX power needs to be read only once per connection */ + if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID) + err = hci_read_tx_power_sync(hdev, handle, 0x01); -unlock: - hci_dev_unlock(hdev); + return err; } static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data, @@ -6631,12 +6708,6 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data, goto unlock; } - if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) { - err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, - MGMT_STATUS_BUSY, &rp, sizeof(rp)); - goto unlock; - } - /* To avoid client trying to guess when to poll again for information we * calculate conn info age as random value between min/max set in hdev. */ @@ -6650,49 +6721,28 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data, if (time_after(jiffies, conn->conn_info_timestamp + msecs_to_jiffies(conn_info_age)) || !conn->conn_info_timestamp) { - struct hci_request req; - struct hci_cp_read_tx_power req_txp_cp; - struct hci_cp_read_rssi req_rssi_cp; struct mgmt_pending_cmd *cmd; - hci_req_init(&req, hdev); - req_rssi_cp.handle = cpu_to_le16(conn->handle); - hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp), - &req_rssi_cp); - - /* For LE links TX power does not change thus we don't need to - * query for it once value is known. - */ - if (!bdaddr_type_is_le(cp->addr.type) || - conn->tx_power == HCI_TX_POWER_INVALID) { - req_txp_cp.handle = cpu_to_le16(conn->handle); - req_txp_cp.type = 0x00; - hci_req_add(&req, HCI_OP_READ_TX_POWER, - sizeof(req_txp_cp), &req_txp_cp); - } + cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data, + len); + if (!cmd) + err = -ENOMEM; + else + err = hci_cmd_sync_queue(hdev, get_conn_info_sync, + cmd, get_conn_info_complete); - /* Max TX power needs to be read only once per connection */ - if (conn->max_tx_power == HCI_TX_POWER_INVALID) { - req_txp_cp.handle = cpu_to_le16(conn->handle); - req_txp_cp.type = 0x01; - hci_req_add(&req, HCI_OP_READ_TX_POWER, - sizeof(req_txp_cp), &req_txp_cp); - } + if (err < 0) { + mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, + MGMT_STATUS_FAILED, &rp, sizeof(rp)); - err = hci_req_run(&req, conn_info_refresh_complete); - if (err < 0) - goto unlock; + if (cmd) + mgmt_pending_free(cmd); - cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev, - data, len); - if (!cmd) { - err = -ENOMEM; goto unlock; } hci_conn_hold(conn); cmd->user_data = hci_conn_get(conn); - cmd->cmd_complete = conn_info_cmd_complete; conn->conn_info_timestamp = jiffies; } else { @@ -6710,82 +6760,76 @@ unlock: return err; } -static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status) +static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err) { - struct hci_conn *conn = cmd->user_data; + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_get_clock_info *cp = cmd->param; struct mgmt_rp_get_clock_info rp; - struct hci_dev *hdev; - int err; + struct hci_conn *conn = cmd->user_data; + u8 status = mgmt_status(err); + + bt_dev_dbg(hdev, "err %d", err); memset(&rp, 0, sizeof(rp)); - memcpy(&rp.addr, cmd->param, sizeof(rp.addr)); + bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); + rp.addr.type = cp->addr.type; - if (status) + if (err) goto complete; - hdev = hci_dev_get(cmd->index); - if (hdev) { - rp.local_clock = cpu_to_le32(hdev->clock); - hci_dev_put(hdev); - } + rp.local_clock = cpu_to_le32(hdev->clock); if (conn) { rp.piconet_clock = cpu_to_le32(conn->clock); rp.accuracy = cpu_to_le16(conn->clock_accuracy); - } - -complete: - err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp, - sizeof(rp)); - - if (conn) { hci_conn_drop(conn); hci_conn_put(conn); } - return err; +complete: + mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp, + sizeof(rp)); + + mgmt_pending_free(cmd); } -static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode) +static int get_clock_info_sync(struct hci_dev *hdev, void *data) { - struct hci_cp_read_clock *hci_cp; - struct mgmt_pending_cmd *cmd; - struct hci_conn *conn; - - bt_dev_dbg(hdev, "status %u", status); + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_get_clock_info *cp = cmd->param; + struct hci_cp_read_clock hci_cp; + struct hci_conn *conn = cmd->user_data; + int err; - hci_dev_lock(hdev); + memset(&hci_cp, 0, sizeof(hci_cp)); + err = hci_read_clock_sync(hdev, &hci_cp); - hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK); - if (!hci_cp) - goto unlock; + if (conn) { + /* Make sure connection still exists */ + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, + &cp->addr.bdaddr); - if (hci_cp->which) { - u16 handle = __le16_to_cpu(hci_cp->handle); - conn = hci_conn_hash_lookup_handle(hdev, handle); - } else { - conn = NULL; + if (conn && conn == cmd->user_data && + conn->state == BT_CONNECTED) { + hci_cp.handle = cpu_to_le16(conn->handle); + hci_cp.which = 0x01; /* Piconet clock */ + err = hci_read_clock_sync(hdev, &hci_cp); + } else if (cmd->user_data) { + hci_conn_drop(cmd->user_data); + hci_conn_put(cmd->user_data); + cmd->user_data = NULL; + } } - cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn); - if (!cmd) - goto unlock; - - cmd->cmd_complete(cmd, mgmt_status(status)); - mgmt_pending_remove(cmd); - -unlock: - hci_dev_unlock(hdev); + return err; } static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data, - u16 len) + u16 len) { struct mgmt_cp_get_clock_info *cp = data; struct mgmt_rp_get_clock_info rp; - struct hci_cp_read_clock hci_cp; struct mgmt_pending_cmd *cmd; - struct hci_request req; struct hci_conn *conn; int err; @@ -6823,31 +6867,25 @@ static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data, conn = NULL; } - cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len); - if (!cmd) { + cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len); + if (!cmd) err = -ENOMEM; - goto unlock; - } - - cmd->cmd_complete = clock_info_cmd_complete; + else + err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd, + get_clock_info_complete); - hci_req_init(&req, hdev); + if (err < 0) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO, + MGMT_STATUS_FAILED, &rp, sizeof(rp)); - memset(&hci_cp, 0, sizeof(hci_cp)); - hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp); + if (cmd) + mgmt_pending_free(cmd); - if (conn) { + } else if (conn) { hci_conn_hold(conn); cmd->user_data = hci_conn_get(conn); - - hci_cp.handle = cpu_to_le16(conn->handle); - hci_cp.which = 0x01; /* Piconet clock */ - hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp); } - err = hci_req_run(&req, get_clock_info_complete); - if (err < 0) - mgmt_pending_remove(cmd); unlock: hci_dev_unlock(hdev); @@ -6928,6 +6966,11 @@ static void device_added(struct sock *sk, struct hci_dev *hdev, mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk); } +static int add_device_sync(struct hci_dev *hdev, void *data) +{ + return hci_update_passive_scan_sync(hdev); +} + static int add_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { @@ -7010,7 +7053,9 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, current_flags = params->current_flags; } - hci_update_background_scan(hdev); + err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL); + if (err < 0) + goto unlock; added: device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action); @@ -7037,6 +7082,11 @@ static void device_removed(struct sock *sk, struct hci_dev *hdev, mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk); } +static int remove_device_sync(struct hci_dev *hdev, void *data) +{ + return hci_update_passive_scan_sync(hdev); +} + static int remove_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { @@ -7116,7 +7166,6 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev, list_del(¶ms->action); list_del(¶ms->list); kfree(params); - hci_update_background_scan(hdev); device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type); } else { @@ -7153,10 +7202,10 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev, } bt_dev_dbg(hdev, "All LE connection parameters were removed"); - - hci_update_background_scan(hdev); } + hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL); + complete: err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE, MGMT_STATUS_SUCCESS, &cp->addr, @@ -7359,21 +7408,27 @@ unlock: return err; } -static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status, - u16 opcode, struct sk_buff *skb) +static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data, + int err) { const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp; struct mgmt_rp_read_local_oob_ext_data *mgmt_rp; u8 *h192, *r192, *h256, *r256; - struct mgmt_pending_cmd *cmd; + struct mgmt_pending_cmd *cmd = data; + struct sk_buff *skb = cmd->skb; + u8 status = mgmt_status(err); u16 eir_len; - int err; - bt_dev_dbg(hdev, "status %u", status); + if (!status) { + if (!skb) + status = MGMT_STATUS_FAILED; + else if (IS_ERR(skb)) + status = mgmt_status(PTR_ERR(skb)); + else + status = mgmt_status(skb->data[0]); + } - cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev); - if (!cmd) - return; + bt_dev_dbg(hdev, "status %u", status); mgmt_cp = cmd->param; @@ -7385,7 +7440,7 @@ static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status, r192 = NULL; h256 = NULL; r256 = NULL; - } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) { + } else if (!bredr_sc_enabled(hdev)) { struct hci_rp_read_local_oob_data *rp; if (skb->len != sizeof(*rp)) { @@ -7466,6 +7521,9 @@ send_rsp: mgmt_rp, sizeof(*mgmt_rp) + eir_len, HCI_MGMT_OOB_DATA_EVENTS, cmd->sk); done: + if (skb && !IS_ERR(skb)) + kfree_skb(skb); + kfree(mgmt_rp); mgmt_pending_remove(cmd); } @@ -7474,7 +7532,6 @@ static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk, struct mgmt_cp_read_local_oob_ext_data *cp) { struct mgmt_pending_cmd *cmd; - struct hci_request req; int err; cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev, @@ -7482,14 +7539,9 @@ static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk, if (!cmd) return -ENOMEM; - hci_req_init(&req, hdev); - - if (bredr_sc_enabled(hdev)) - hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL); - else - hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL); + err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd, + read_local_oob_ext_data_complete); - err = hci_req_run_skb(&req, read_local_oob_ext_data_complete); if (err < 0) { mgmt_pending_remove(cmd); return err; @@ -7713,13 +7765,6 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev, return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES, MGMT_STATUS_REJECTED); - /* Enabling the experimental LL Privay support disables support for - * advertising. - */ - if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) - return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES, - MGMT_STATUS_NOT_SUPPORTED); - hci_dev_lock(hdev); rp_len = sizeof(*rp) + hdev->adv_instance_cnt; @@ -7876,58 +7921,66 @@ static bool adv_busy(struct hci_dev *hdev) pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev)); } -static void add_advertising_complete(struct hci_dev *hdev, u8 status, - u16 opcode) +static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance, + int err) { - struct mgmt_pending_cmd *cmd; - struct mgmt_cp_add_advertising *cp; - struct mgmt_rp_add_advertising rp; - struct adv_info *adv_instance, *n; - u8 instance; + struct adv_info *adv, *n; - bt_dev_dbg(hdev, "status %u", status); + bt_dev_dbg(hdev, "err %d", err); hci_dev_lock(hdev); - cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev); - if (!cmd) - cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev); + list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { + u8 instance; - list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { - if (!adv_instance->pending) + if (!adv->pending) continue; - if (!status) { - adv_instance->pending = false; + if (!err) { + adv->pending = false; continue; } - instance = adv_instance->instance; + instance = adv->instance; if (hdev->cur_adv_instance == instance) cancel_adv_timeout(hdev); hci_remove_adv_instance(hdev, instance); - mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance); + mgmt_advertising_removed(sk, hdev, instance); } - if (!cmd) - goto unlock; + hci_dev_unlock(hdev); +} + +static void add_advertising_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_add_advertising *cp = cmd->param; + struct mgmt_rp_add_advertising rp; + + memset(&rp, 0, sizeof(rp)); - cp = cmd->param; rp.instance = cp->instance; - if (status) + if (err) mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, - mgmt_status(status)); + mgmt_status(err)); else mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, - mgmt_status(status), &rp, sizeof(rp)); + mgmt_status(err), &rp, sizeof(rp)); - mgmt_pending_remove(cmd); + add_adv_complete(hdev, cmd->sk, cp->instance, err); -unlock: - hci_dev_unlock(hdev); + mgmt_pending_free(cmd); +} + +static int add_advertising_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_add_advertising *cp = cmd->param; + + return hci_schedule_adv_instance_sync(hdev, cp->instance, true); } static int add_advertising(struct sock *sk, struct hci_dev *hdev, @@ -7943,7 +7996,6 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev, struct adv_info *next_instance; int err; struct mgmt_pending_cmd *cmd; - struct hci_request req; bt_dev_dbg(hdev, "sock %p", sk); @@ -7952,13 +8004,6 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev, return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, status); - /* Enabling the experimental LL Privay support disables support for - * advertising. - */ - if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) - return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, - MGMT_STATUS_NOT_SUPPORTED); - if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_INVALID_PARAMS); @@ -8051,25 +8096,19 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev, /* We're good to go, update advertising data, parameters, and start * advertising. */ - cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data, + cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data, data_len); if (!cmd) { err = -ENOMEM; goto unlock; } - hci_req_init(&req, hdev); - - err = __hci_req_schedule_adv_instance(&req, schedule_instance, true); - - if (!err) - err = hci_req_run(&req, add_advertising_complete); + cp->instance = schedule_instance; - if (err < 0) { - err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, - MGMT_STATUS_FAILED); - mgmt_pending_remove(cmd); - } + err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd, + add_advertising_complete); + if (err < 0) + mgmt_pending_free(cmd); unlock: hci_dev_unlock(hdev); @@ -8077,30 +8116,25 @@ unlock: return err; } -static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status, - u16 opcode) +static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data, + int err) { - struct mgmt_pending_cmd *cmd; - struct mgmt_cp_add_ext_adv_params *cp; + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_add_ext_adv_params *cp = cmd->param; struct mgmt_rp_add_ext_adv_params rp; - struct adv_info *adv_instance; + struct adv_info *adv; u32 flags; BT_DBG("%s", hdev->name); hci_dev_lock(hdev); - cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev); - if (!cmd) - goto unlock; - - cp = cmd->param; - adv_instance = hci_find_adv_instance(hdev, cp->instance); - if (!adv_instance) + adv = hci_find_adv_instance(hdev, cp->instance); + if (!adv) goto unlock; rp.instance = cp->instance; - rp.tx_power = adv_instance->tx_power; + rp.tx_power = adv->tx_power; /* While we're at it, inform userspace of the available space for this * advertisement, given the flags that will be used. @@ -8109,39 +8143,44 @@ static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status, rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true); rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false); - if (status) { + if (err) { /* If this advertisement was previously advertising and we * failed to update it, we signal that it has been removed and * delete its structure */ - if (!adv_instance->pending) + if (!adv->pending) mgmt_advertising_removed(cmd->sk, hdev, cp->instance); hci_remove_adv_instance(hdev, cp->instance); mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, - mgmt_status(status)); - + mgmt_status(err)); } else { mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, - mgmt_status(status), &rp, sizeof(rp)); + mgmt_status(err), &rp, sizeof(rp)); } unlock: if (cmd) - mgmt_pending_remove(cmd); + mgmt_pending_free(cmd); hci_dev_unlock(hdev); } +static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_add_ext_adv_params *cp = cmd->param; + + return hci_setup_ext_adv_instance_sync(hdev, cp->instance); +} + static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_cp_add_ext_adv_params *cp = data; struct mgmt_rp_add_ext_adv_params rp; struct mgmt_pending_cmd *cmd = NULL; - struct adv_info *adv_instance; - struct hci_request req; u32 flags, min_interval, max_interval; u16 timeout, duration; u8 status; @@ -8223,29 +8262,18 @@ static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev, /* Submit request for advertising params if ext adv available */ if (ext_adv_capable(hdev)) { - hci_req_init(&req, hdev); - adv_instance = hci_find_adv_instance(hdev, cp->instance); - - /* Updating parameters of an active instance will return a - * Command Disallowed error, so we must first disable the - * instance if it is active. - */ - if (!adv_instance->pending) - __hci_req_disable_ext_adv_instance(&req, cp->instance); - - __hci_req_setup_ext_adv_instance(&req, cp->instance); - - err = hci_req_run(&req, add_ext_adv_params_complete); - - if (!err) - cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, - hdev, data, data_len); + cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev, + data, data_len); if (!cmd) { err = -ENOMEM; hci_remove_adv_instance(hdev, cp->instance); goto unlock; } + err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd, + add_ext_adv_params_complete); + if (err < 0) + mgmt_pending_free(cmd); } else { rp.instance = cp->instance; rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE; @@ -8262,6 +8290,49 @@ unlock: return err; } +static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_add_ext_adv_data *cp = cmd->param; + struct mgmt_rp_add_advertising rp; + + add_adv_complete(hdev, cmd->sk, cp->instance, err); + + memset(&rp, 0, sizeof(rp)); + + rp.instance = cp->instance; + + if (err) + mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, + mgmt_status(err)); + else + mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_status(err), &rp, sizeof(rp)); + + mgmt_pending_free(cmd); +} + +static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_add_ext_adv_data *cp = cmd->param; + int err; + + if (ext_adv_capable(hdev)) { + err = hci_update_adv_data_sync(hdev, cp->instance); + if (err) + return err; + + err = hci_update_scan_rsp_data_sync(hdev, cp->instance); + if (err) + return err; + + return hci_enable_ext_advertising_sync(hdev, cp->instance); + } + + return hci_schedule_adv_instance_sync(hdev, cp->instance, true); +} + static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { @@ -8272,7 +8343,6 @@ static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data, struct adv_info *adv_instance; int err = 0; struct mgmt_pending_cmd *cmd; - struct hci_request req; BT_DBG("%s", hdev->name); @@ -8314,78 +8384,52 @@ static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data, cp->data, cp->scan_rsp_len, cp->data + cp->adv_data_len); - /* We're good to go, update advertising data, parameters, and start - * advertising. - */ - - hci_req_init(&req, hdev); - - hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL); - - if (ext_adv_capable(hdev)) { - __hci_req_update_adv_data(&req, cp->instance); - __hci_req_update_scan_rsp_data(&req, cp->instance); - __hci_req_enable_ext_advertising(&req, cp->instance); - - } else { - /* If using software rotation, determine next instance to use */ - - if (hdev->cur_adv_instance == cp->instance) { - /* If the currently advertised instance is being changed - * then cancel the current advertising and schedule the - * next instance. If there is only one instance then the - * overridden advertising data will be visible right - * away - */ - cancel_adv_timeout(hdev); - - next_instance = hci_get_next_instance(hdev, - cp->instance); - if (next_instance) - schedule_instance = next_instance->instance; - } else if (!hdev->adv_instance_timeout) { - /* Immediately advertise the new instance if no other - * instance is currently being advertised. - */ - schedule_instance = cp->instance; - } + /* If using software rotation, determine next instance to use */ + if (hdev->cur_adv_instance == cp->instance) { + /* If the currently advertised instance is being changed + * then cancel the current advertising and schedule the + * next instance. If there is only one instance then the + * overridden advertising data will be visible right + * away + */ + cancel_adv_timeout(hdev); - /* If the HCI_ADVERTISING flag is set or there is no instance to - * be advertised then we have no HCI communication to make. - * Simply return. + next_instance = hci_get_next_instance(hdev, cp->instance); + if (next_instance) + schedule_instance = next_instance->instance; + } else if (!hdev->adv_instance_timeout) { + /* Immediately advertise the new instance if no other + * instance is currently being advertised. */ - if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || - !schedule_instance) { - if (adv_instance->pending) { - mgmt_advertising_added(sk, hdev, cp->instance); - adv_instance->pending = false; - } - rp.instance = cp->instance; - err = mgmt_cmd_complete(sk, hdev->id, - MGMT_OP_ADD_EXT_ADV_DATA, - MGMT_STATUS_SUCCESS, &rp, - sizeof(rp)); - goto unlock; - } + schedule_instance = cp->instance; + } - err = __hci_req_schedule_adv_instance(&req, schedule_instance, - true); + /* If the HCI_ADVERTISING flag is set or there is no instance to + * be advertised then we have no HCI communication to make. + * Simply return. + */ + if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) { + if (adv_instance->pending) { + mgmt_advertising_added(sk, hdev, cp->instance); + adv_instance->pending = false; + } + rp.instance = cp->instance; + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA, + MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); + goto unlock; } - cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data, + cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data, data_len); if (!cmd) { err = -ENOMEM; goto clear_new_instance; } - if (!err) - err = hci_req_run(&req, add_advertising_complete); - + err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd, + add_ext_adv_data_complete); if (err < 0) { - err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA, - MGMT_STATUS_FAILED); - mgmt_pending_remove(cmd); + mgmt_pending_free(cmd); goto clear_new_instance; } @@ -8408,54 +8452,53 @@ unlock: return err; } -static void remove_advertising_complete(struct hci_dev *hdev, u8 status, - u16 opcode) +static void remove_advertising_complete(struct hci_dev *hdev, void *data, + int err) { - struct mgmt_pending_cmd *cmd; - struct mgmt_cp_remove_advertising *cp; + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_remove_advertising *cp = cmd->param; struct mgmt_rp_remove_advertising rp; - bt_dev_dbg(hdev, "status %u", status); + bt_dev_dbg(hdev, "err %d", err); - hci_dev_lock(hdev); + memset(&rp, 0, sizeof(rp)); + rp.instance = cp->instance; - /* A failure status here only means that we failed to disable - * advertising. Otherwise, the advertising instance has been removed, - * so report success. - */ - cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev); - if (!cmd) - goto unlock; + if (err) + mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, + mgmt_status(err)); + else + mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); - cp = cmd->param; - rp.instance = cp->instance; + mgmt_pending_free(cmd); +} - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS, - &rp, sizeof(rp)); - mgmt_pending_remove(cmd); +static int remove_advertising_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_remove_advertising *cp = cmd->param; + int err; -unlock: - hci_dev_unlock(hdev); + err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true); + if (err) + return err; + + if (list_empty(&hdev->adv_instances)) + err = hci_disable_advertising_sync(hdev); + + return err; } static int remove_advertising(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_cp_remove_advertising *cp = data; - struct mgmt_rp_remove_advertising rp; struct mgmt_pending_cmd *cmd; - struct hci_request req; int err; bt_dev_dbg(hdev, "sock %p", sk); - /* Enabling the experimental LL Privay support disables support for - * advertising. - */ - if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) - return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING, - MGMT_STATUS_NOT_SUPPORTED); - hci_dev_lock(hdev); if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) { @@ -8479,44 +8522,17 @@ static int remove_advertising(struct sock *sk, struct hci_dev *hdev, goto unlock; } - hci_req_init(&req, hdev); - - /* If we use extended advertising, instance is disabled and removed */ - if (ext_adv_capable(hdev)) { - __hci_req_disable_ext_adv_instance(&req, cp->instance); - __hci_req_remove_ext_adv_instance(&req, cp->instance); - } - - hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true); - - if (list_empty(&hdev->adv_instances)) - __hci_req_disable_advertising(&req); - - /* If no HCI commands have been collected so far or the HCI_ADVERTISING - * flag is set or the device isn't powered then we have no HCI - * communication to make. Simply return. - */ - if (skb_queue_empty(&req.cmd_q) || - !hdev_is_powered(hdev) || - hci_dev_test_flag(hdev, HCI_ADVERTISING)) { - hci_req_purge(&req); - rp.instance = cp->instance; - err = mgmt_cmd_complete(sk, hdev->id, - MGMT_OP_REMOVE_ADVERTISING, - MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); - goto unlock; - } - - cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data, + cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data, data_len); if (!cmd) { err = -ENOMEM; goto unlock; } - err = hci_req_run(&req, remove_advertising_complete); + err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd, + remove_advertising_complete); if (err < 0) - mgmt_pending_remove(cmd); + mgmt_pending_free(cmd); unlock: hci_dev_unlock(hdev); @@ -8758,31 +8774,6 @@ void mgmt_index_removed(struct hci_dev *hdev) HCI_MGMT_EXT_INDEX_EVENTS); } -/* This function requires the caller holds hdev->lock */ -static void restart_le_actions(struct hci_dev *hdev) -{ - struct hci_conn_params *p; - - list_for_each_entry(p, &hdev->le_conn_params, list) { - /* Needed for AUTO_OFF case where might not "really" - * have been powered off. - */ - list_del_init(&p->action); - - switch (p->auto_connect) { - case HCI_AUTO_CONN_DIRECT: - case HCI_AUTO_CONN_ALWAYS: - list_add(&p->action, &hdev->pend_le_conns); - break; - case HCI_AUTO_CONN_REPORT: - list_add(&p->action, &hdev->pend_le_reports); - break; - default: - break; - } - } -} - void mgmt_power_on(struct hci_dev *hdev, int err) { struct cmd_lookup match = { NULL, hdev }; @@ -8793,7 +8784,7 @@ void mgmt_power_on(struct hci_dev *hdev, int err) if (!err) { restart_le_actions(hdev); - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); } mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); @@ -9349,74 +9340,6 @@ void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status) sock_put(match.sk); } -static void clear_eir(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - struct hci_cp_write_eir cp; - - if (!lmp_ext_inq_capable(hdev)) - return; - - memset(hdev->eir, 0, sizeof(hdev->eir)); - - memset(&cp, 0, sizeof(cp)); - - hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); -} - -void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status) -{ - struct cmd_lookup match = { NULL, hdev }; - struct hci_request req; - bool changed = false; - - if (status) { - u8 mgmt_err = mgmt_status(status); - - if (enable && hci_dev_test_and_clear_flag(hdev, - HCI_SSP_ENABLED)) { - hci_dev_clear_flag(hdev, HCI_HS_ENABLED); - new_settings(hdev, NULL); - } - - mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp, - &mgmt_err); - return; - } - - if (enable) { - changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED); - } else { - changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED); - if (!changed) - changed = hci_dev_test_and_clear_flag(hdev, - HCI_HS_ENABLED); - else - hci_dev_clear_flag(hdev, HCI_HS_ENABLED); - } - - mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match); - - if (changed) - new_settings(hdev, match.sk); - - if (match.sk) - sock_put(match.sk); - - hci_req_init(&req, hdev); - - if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { - if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) - hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE, - sizeof(enable), &enable); - __hci_req_update_eir(&req); - } else { - clear_eir(&req); - } - - hci_req_run(&req, NULL); -} - static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data) { struct cmd_lookup *match = data; diff --git a/net/bluetooth/mgmt_util.c b/net/bluetooth/mgmt_util.c index 0d0a6d77b9e8..83875f2a0604 100644 --- a/net/bluetooth/mgmt_util.c +++ b/net/bluetooth/mgmt_util.c @@ -227,7 +227,7 @@ void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, } } -struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, +struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode, struct hci_dev *hdev, void *data, u16 len) { @@ -251,6 +251,19 @@ struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, cmd->sk = sk; sock_hold(sk); + return cmd; +} + +struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, + struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_pending_cmd *cmd; + + cmd = mgmt_pending_new(sk, opcode, hdev, data, len); + if (!cmd) + return NULL; + list_add(&cmd->list, &hdev->mgmt_pending); return cmd; diff --git a/net/bluetooth/mgmt_util.h b/net/bluetooth/mgmt_util.h index 6559f189213c..63b965eaaaac 100644 --- a/net/bluetooth/mgmt_util.h +++ b/net/bluetooth/mgmt_util.h @@ -27,6 +27,7 @@ struct mgmt_pending_cmd { void *param; size_t param_len; struct sock *sk; + struct sk_buff *skb; void *user_data; int (*cmd_complete)(struct mgmt_pending_cmd *cmd, u8 status); }; @@ -49,5 +50,8 @@ void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, struct hci_dev *hdev, void *data, u16 len); +struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode, + struct hci_dev *hdev, + void *data, u16 len); void mgmt_pending_free(struct mgmt_pending_cmd *cmd); void mgmt_pending_remove(struct mgmt_pending_cmd *cmd); diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c index 255cffa554ee..1122097e1e49 100644 --- a/net/bluetooth/msft.c +++ b/net/bluetooth/msft.c @@ -93,7 +93,7 @@ struct msft_data { struct list_head handle_map; __u16 pending_add_handle; __u16 pending_remove_handle; - __u8 reregistering; + __u8 resuming; __u8 suspending; __u8 filter_enabled; }; @@ -156,7 +156,6 @@ failed: return false; } -/* This function requires the caller holds hdev->lock */ static void reregister_monitor(struct hci_dev *hdev, int handle) { struct adv_monitor *monitor; @@ -166,9 +165,9 @@ static void reregister_monitor(struct hci_dev *hdev, int handle) while (1) { monitor = idr_get_next(&hdev->adv_monitors_idr, &handle); if (!monitor) { - /* All monitors have been reregistered */ - msft->reregistering = false; - hci_update_background_scan(hdev); + /* All monitors have been resumed */ + msft->resuming = false; + hci_update_passive_scan(hdev); return; } @@ -185,67 +184,317 @@ static void reregister_monitor(struct hci_dev *hdev, int handle) } } -/* This function requires the caller holds hdev->lock */ -static void remove_monitor_on_suspend(struct hci_dev *hdev, int handle) +/* is_mgmt = true matches the handle exposed to userspace via mgmt. + * is_mgmt = false matches the handle used by the msft controller. + * This function requires the caller holds hdev->lock + */ +static struct msft_monitor_advertisement_handle_data *msft_find_handle_data + (struct hci_dev *hdev, u16 handle, bool is_mgmt) +{ + struct msft_monitor_advertisement_handle_data *entry; + struct msft_data *msft = hdev->msft_data; + + list_for_each_entry(entry, &msft->handle_map, list) { + if (is_mgmt && entry->mgmt_handle == handle) + return entry; + if (!is_mgmt && entry->msft_handle == handle) + return entry; + } + + return NULL; +} + +static void msft_le_monitor_advertisement_cb(struct hci_dev *hdev, + u8 status, u16 opcode, + struct sk_buff *skb) +{ + struct msft_rp_le_monitor_advertisement *rp; + struct adv_monitor *monitor; + struct msft_monitor_advertisement_handle_data *handle_data; + struct msft_data *msft = hdev->msft_data; + + hci_dev_lock(hdev); + + monitor = idr_find(&hdev->adv_monitors_idr, msft->pending_add_handle); + if (!monitor) { + bt_dev_err(hdev, "msft add advmon: monitor %u is not found!", + msft->pending_add_handle); + status = HCI_ERROR_UNSPECIFIED; + goto unlock; + } + + if (status) + goto unlock; + + rp = (struct msft_rp_le_monitor_advertisement *)skb->data; + if (skb->len < sizeof(*rp)) { + status = HCI_ERROR_UNSPECIFIED; + goto unlock; + } + + handle_data = kmalloc(sizeof(*handle_data), GFP_KERNEL); + if (!handle_data) { + status = HCI_ERROR_UNSPECIFIED; + goto unlock; + } + + handle_data->mgmt_handle = monitor->handle; + handle_data->msft_handle = rp->handle; + INIT_LIST_HEAD(&handle_data->list); + list_add(&handle_data->list, &msft->handle_map); + + monitor->state = ADV_MONITOR_STATE_OFFLOADED; + +unlock: + if (status && monitor) + hci_free_adv_monitor(hdev, monitor); + + hci_dev_unlock(hdev); + + if (!msft->resuming) + hci_add_adv_patterns_monitor_complete(hdev, status); +} + +static void msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev, + u8 status, u16 opcode, + struct sk_buff *skb) { + struct msft_cp_le_cancel_monitor_advertisement *cp; + struct msft_rp_le_cancel_monitor_advertisement *rp; struct adv_monitor *monitor; + struct msft_monitor_advertisement_handle_data *handle_data; struct msft_data *msft = hdev->msft_data; int err; + bool pending; - while (1) { - monitor = idr_get_next(&hdev->adv_monitors_idr, &handle); - if (!monitor) { - /* All monitors have been removed */ - msft->suspending = false; - hci_update_background_scan(hdev); + if (status) + goto done; + + rp = (struct msft_rp_le_cancel_monitor_advertisement *)skb->data; + if (skb->len < sizeof(*rp)) { + status = HCI_ERROR_UNSPECIFIED; + goto done; + } + + hci_dev_lock(hdev); + + cp = hci_sent_cmd_data(hdev, hdev->msft_opcode); + handle_data = msft_find_handle_data(hdev, cp->handle, false); + + if (handle_data) { + monitor = idr_find(&hdev->adv_monitors_idr, + handle_data->mgmt_handle); + + if (monitor && monitor->state == ADV_MONITOR_STATE_OFFLOADED) + monitor->state = ADV_MONITOR_STATE_REGISTERED; + + /* Do not free the monitor if it is being removed due to + * suspend. It will be re-monitored on resume. + */ + if (monitor && !msft->suspending) + hci_free_adv_monitor(hdev, monitor); + + list_del(&handle_data->list); + kfree(handle_data); + } + + /* If remove all monitors is required, we need to continue the process + * here because the earlier it was paused when waiting for the + * response from controller. + */ + if (msft->pending_remove_handle == 0) { + pending = hci_remove_all_adv_monitor(hdev, &err); + if (pending) { + hci_dev_unlock(hdev); return; } - msft->pending_remove_handle = (u16)handle; - err = __msft_remove_monitor(hdev, monitor, handle); + if (err) + status = HCI_ERROR_UNSPECIFIED; + } - /* If success, return and wait for monitor removed callback */ - if (!err) - return; + hci_dev_unlock(hdev); + +done: + if (!msft->suspending) + hci_remove_adv_monitor_complete(hdev, status); +} + +static int msft_remove_monitor_sync(struct hci_dev *hdev, + struct adv_monitor *monitor) +{ + struct msft_cp_le_cancel_monitor_advertisement cp; + struct msft_monitor_advertisement_handle_data *handle_data; + struct sk_buff *skb; + u8 status; + + handle_data = msft_find_handle_data(hdev, monitor->handle, true); + + /* If no matched handle, just remove without telling controller */ + if (!handle_data) + return -ENOENT; + + cp.sub_opcode = MSFT_OP_LE_CANCEL_MONITOR_ADVERTISEMENT; + cp.handle = handle_data->msft_handle; + + skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp, + HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + status = skb->data[0]; + skb_pull(skb, 1); + + msft_le_cancel_monitor_advertisement_cb(hdev, status, hdev->msft_opcode, + skb); + + return status; +} + +/* This function requires the caller holds hci_req_sync_lock */ +int msft_suspend_sync(struct hci_dev *hdev) +{ + struct msft_data *msft = hdev->msft_data; + struct adv_monitor *monitor; + int handle = 0; + + if (!msft || !msft_monitor_supported(hdev)) + return 0; + + msft->suspending = true; + + while (1) { + monitor = idr_get_next(&hdev->adv_monitors_idr, &handle); + if (!monitor) + break; + + msft_remove_monitor_sync(hdev, monitor); - /* Otherwise free the monitor and keep removing */ - hci_free_adv_monitor(hdev, monitor); handle++; } + + /* All monitors have been removed */ + msft->suspending = false; + + return 0; } -/* This function requires the caller holds hdev->lock */ -void msft_suspend(struct hci_dev *hdev) +static bool msft_monitor_rssi_valid(struct adv_monitor *monitor) { - struct msft_data *msft = hdev->msft_data; + struct adv_rssi_thresholds *r = &monitor->rssi; - if (!msft) - return; + if (r->high_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN || + r->high_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX || + r->low_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN || + r->low_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX) + return false; - if (msft_monitor_supported(hdev)) { - msft->suspending = true; - /* Quitely remove all monitors on suspend to avoid waking up - * the system. - */ - remove_monitor_on_suspend(hdev, 0); + /* High_threshold_timeout is not supported, + * once high_threshold is reached, events are immediately reported. + */ + if (r->high_threshold_timeout != 0) + return false; + + if (r->low_threshold_timeout > MSFT_RSSI_LOW_TIMEOUT_MAX) + return false; + + /* Sampling period from 0x00 to 0xFF are all allowed */ + return true; +} + +static bool msft_monitor_pattern_valid(struct adv_monitor *monitor) +{ + return msft_monitor_rssi_valid(monitor); + /* No additional check needed for pattern-based monitor */ +} + +static int msft_add_monitor_sync(struct hci_dev *hdev, + struct adv_monitor *monitor) +{ + struct msft_cp_le_monitor_advertisement *cp; + struct msft_le_monitor_advertisement_pattern_data *pattern_data; + struct msft_le_monitor_advertisement_pattern *pattern; + struct adv_pattern *entry; + size_t total_size = sizeof(*cp) + sizeof(*pattern_data); + ptrdiff_t offset = 0; + u8 pattern_count = 0; + struct sk_buff *skb; + u8 status; + + if (!msft_monitor_pattern_valid(monitor)) + return -EINVAL; + + list_for_each_entry(entry, &monitor->patterns, list) { + pattern_count++; + total_size += sizeof(*pattern) + entry->length; } + + cp = kmalloc(total_size, GFP_KERNEL); + if (!cp) + return -ENOMEM; + + cp->sub_opcode = MSFT_OP_LE_MONITOR_ADVERTISEMENT; + cp->rssi_high = monitor->rssi.high_threshold; + cp->rssi_low = monitor->rssi.low_threshold; + cp->rssi_low_interval = (u8)monitor->rssi.low_threshold_timeout; + cp->rssi_sampling_period = monitor->rssi.sampling_period; + + cp->cond_type = MSFT_MONITOR_ADVERTISEMENT_TYPE_PATTERN; + + pattern_data = (void *)cp->data; + pattern_data->count = pattern_count; + + list_for_each_entry(entry, &monitor->patterns, list) { + pattern = (void *)(pattern_data->data + offset); + /* the length also includes data_type and offset */ + pattern->length = entry->length + 2; + pattern->data_type = entry->ad_type; + pattern->start_byte = entry->offset; + memcpy(pattern->pattern, entry->value, entry->length); + offset += sizeof(*pattern) + entry->length; + } + + skb = __hci_cmd_sync(hdev, hdev->msft_opcode, total_size, cp, + HCI_CMD_TIMEOUT); + kfree(cp); + + if (IS_ERR(skb)) + return PTR_ERR(skb); + + status = skb->data[0]; + skb_pull(skb, 1); + + msft_le_monitor_advertisement_cb(hdev, status, hdev->msft_opcode, skb); + + return status; } -/* This function requires the caller holds hdev->lock */ -void msft_resume(struct hci_dev *hdev) +/* This function requires the caller holds hci_req_sync_lock */ +int msft_resume_sync(struct hci_dev *hdev) { struct msft_data *msft = hdev->msft_data; + struct adv_monitor *monitor; + int handle = 0; - if (!msft) - return; + if (!msft || !msft_monitor_supported(hdev)) + return 0; - if (msft_monitor_supported(hdev)) { - msft->reregistering = true; - /* Monitors are removed on suspend, so we need to add all - * monitors on resume. - */ - reregister_monitor(hdev, 0); + msft->resuming = true; + + while (1) { + monitor = idr_get_next(&hdev->adv_monitors_idr, &handle); + if (!monitor) + break; + + msft_add_monitor_sync(hdev, monitor); + + handle++; } + + /* All monitors have been resumed */ + msft->resuming = false; + + return 0; } void msft_do_open(struct hci_dev *hdev) @@ -275,7 +524,7 @@ void msft_do_open(struct hci_dev *hdev) } if (msft_monitor_supported(hdev)) { - msft->reregistering = true; + msft->resuming = true; msft_set_filter_enable(hdev, true); /* Monitors get removed on power off, so we need to explicitly * tell the controller to re-monitor. @@ -381,151 +630,6 @@ __u64 msft_get_features(struct hci_dev *hdev) return msft ? msft->features : 0; } -/* is_mgmt = true matches the handle exposed to userspace via mgmt. - * is_mgmt = false matches the handle used by the msft controller. - * This function requires the caller holds hdev->lock - */ -static struct msft_monitor_advertisement_handle_data *msft_find_handle_data - (struct hci_dev *hdev, u16 handle, bool is_mgmt) -{ - struct msft_monitor_advertisement_handle_data *entry; - struct msft_data *msft = hdev->msft_data; - - list_for_each_entry(entry, &msft->handle_map, list) { - if (is_mgmt && entry->mgmt_handle == handle) - return entry; - if (!is_mgmt && entry->msft_handle == handle) - return entry; - } - - return NULL; -} - -static void msft_le_monitor_advertisement_cb(struct hci_dev *hdev, - u8 status, u16 opcode, - struct sk_buff *skb) -{ - struct msft_rp_le_monitor_advertisement *rp; - struct adv_monitor *monitor; - struct msft_monitor_advertisement_handle_data *handle_data; - struct msft_data *msft = hdev->msft_data; - - hci_dev_lock(hdev); - - monitor = idr_find(&hdev->adv_monitors_idr, msft->pending_add_handle); - if (!monitor) { - bt_dev_err(hdev, "msft add advmon: monitor %u is not found!", - msft->pending_add_handle); - status = HCI_ERROR_UNSPECIFIED; - goto unlock; - } - - if (status) - goto unlock; - - rp = (struct msft_rp_le_monitor_advertisement *)skb->data; - if (skb->len < sizeof(*rp)) { - status = HCI_ERROR_UNSPECIFIED; - goto unlock; - } - - handle_data = kmalloc(sizeof(*handle_data), GFP_KERNEL); - if (!handle_data) { - status = HCI_ERROR_UNSPECIFIED; - goto unlock; - } - - handle_data->mgmt_handle = monitor->handle; - handle_data->msft_handle = rp->handle; - INIT_LIST_HEAD(&handle_data->list); - list_add(&handle_data->list, &msft->handle_map); - - monitor->state = ADV_MONITOR_STATE_OFFLOADED; - -unlock: - if (status && monitor) - hci_free_adv_monitor(hdev, monitor); - - /* If in restart/reregister sequence, keep registering. */ - if (msft->reregistering) - reregister_monitor(hdev, msft->pending_add_handle + 1); - - hci_dev_unlock(hdev); - - if (!msft->reregistering) - hci_add_adv_patterns_monitor_complete(hdev, status); -} - -static void msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev, - u8 status, u16 opcode, - struct sk_buff *skb) -{ - struct msft_cp_le_cancel_monitor_advertisement *cp; - struct msft_rp_le_cancel_monitor_advertisement *rp; - struct adv_monitor *monitor; - struct msft_monitor_advertisement_handle_data *handle_data; - struct msft_data *msft = hdev->msft_data; - int err; - bool pending; - - if (status) - goto done; - - rp = (struct msft_rp_le_cancel_monitor_advertisement *)skb->data; - if (skb->len < sizeof(*rp)) { - status = HCI_ERROR_UNSPECIFIED; - goto done; - } - - hci_dev_lock(hdev); - - cp = hci_sent_cmd_data(hdev, hdev->msft_opcode); - handle_data = msft_find_handle_data(hdev, cp->handle, false); - - if (handle_data) { - monitor = idr_find(&hdev->adv_monitors_idr, - handle_data->mgmt_handle); - - if (monitor && monitor->state == ADV_MONITOR_STATE_OFFLOADED) - monitor->state = ADV_MONITOR_STATE_REGISTERED; - - /* Do not free the monitor if it is being removed due to - * suspend. It will be re-monitored on resume. - */ - if (monitor && !msft->suspending) - hci_free_adv_monitor(hdev, monitor); - - list_del(&handle_data->list); - kfree(handle_data); - } - - /* If in suspend/remove sequence, keep removing. */ - if (msft->suspending) - remove_monitor_on_suspend(hdev, - msft->pending_remove_handle + 1); - - /* If remove all monitors is required, we need to continue the process - * here because the earlier it was paused when waiting for the - * response from controller. - */ - if (msft->pending_remove_handle == 0) { - pending = hci_remove_all_adv_monitor(hdev, &err); - if (pending) { - hci_dev_unlock(hdev); - return; - } - - if (err) - status = HCI_ERROR_UNSPECIFIED; - } - - hci_dev_unlock(hdev); - -done: - if (!msft->suspending) - hci_remove_adv_monitor_complete(hdev, status); -} - static void msft_le_set_advertisement_filter_enable_cb(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb) @@ -560,35 +664,6 @@ static void msft_le_set_advertisement_filter_enable_cb(struct hci_dev *hdev, hci_dev_unlock(hdev); } -static bool msft_monitor_rssi_valid(struct adv_monitor *monitor) -{ - struct adv_rssi_thresholds *r = &monitor->rssi; - - if (r->high_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN || - r->high_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX || - r->low_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN || - r->low_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX) - return false; - - /* High_threshold_timeout is not supported, - * once high_threshold is reached, events are immediately reported. - */ - if (r->high_threshold_timeout != 0) - return false; - - if (r->low_threshold_timeout > MSFT_RSSI_LOW_TIMEOUT_MAX) - return false; - - /* Sampling period from 0x00 to 0xFF are all allowed */ - return true; -} - -static bool msft_monitor_pattern_valid(struct adv_monitor *monitor) -{ - return msft_monitor_rssi_valid(monitor); - /* No additional check needed for pattern-based monitor */ -} - /* This function requires the caller holds hdev->lock */ static int __msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor) @@ -656,7 +731,7 @@ int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor) if (!msft) return -EOPNOTSUPP; - if (msft->reregistering || msft->suspending) + if (msft->resuming || msft->suspending) return -EBUSY; return __msft_add_monitor_pattern(hdev, monitor); @@ -700,7 +775,7 @@ int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, if (!msft) return -EOPNOTSUPP; - if (msft->reregistering || msft->suspending) + if (msft->resuming || msft->suspending) return -EBUSY; return __msft_remove_monitor(hdev, monitor, handle); diff --git a/net/bluetooth/msft.h b/net/bluetooth/msft.h index 59c6e081c789..b59b63dc0ea8 100644 --- a/net/bluetooth/msft.h +++ b/net/bluetooth/msft.h @@ -24,8 +24,8 @@ int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, u16 handle); void msft_req_add_set_filter_enable(struct hci_request *req, bool enable); int msft_set_filter_enable(struct hci_dev *hdev, bool enable); -void msft_suspend(struct hci_dev *hdev); -void msft_resume(struct hci_dev *hdev); +int msft_suspend_sync(struct hci_dev *hdev); +int msft_resume_sync(struct hci_dev *hdev); bool msft_curve_validity(struct hci_dev *hdev); #else @@ -61,8 +61,15 @@ static inline int msft_set_filter_enable(struct hci_dev *hdev, bool enable) return -EOPNOTSUPP; } -static inline void msft_suspend(struct hci_dev *hdev) {} -static inline void msft_resume(struct hci_dev *hdev) {} +static inline int msft_suspend_sync(struct hci_dev *hdev) +{ + return -EOPNOTSUPP; +} + +static inline int msft_resume_sync(struct hci_dev *hdev) +{ + return -EOPNOTSUPP; +} static inline bool msft_curve_validity(struct hci_dev *hdev) { diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index c1183fef1f21..3915832a03c2 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -397,10 +397,10 @@ static int find_portno(struct net_bridge *br) if (!inuse) return -ENOMEM; - set_bit(0, inuse); /* zero is reserved */ - list_for_each_entry(p, &br->port_list, list) { - set_bit(p->port_no, inuse); - } + __set_bit(0, inuse); /* zero is reserved */ + list_for_each_entry(p, &br->port_list, list) + __set_bit(p->port_no, inuse); + index = find_first_zero_bit(inuse, BR_MAX_PORTS); bitmap_free(inuse); @@ -525,8 +525,8 @@ static void br_set_gso_limits(struct net_bridge *br) gso_max_size = min(gso_max_size, p->dev->gso_max_size); gso_max_segs = min(gso_max_segs, p->dev->gso_max_segs); } - br->dev->gso_max_size = gso_max_size; - br->dev->gso_max_segs = gso_max_segs; + netif_set_gso_max_size(br->dev, gso_max_size); + netif_set_gso_max_segs(br->dev, gso_max_segs); } /* diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index d9a89ddd0331..159590d5c2af 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c @@ -36,15 +36,14 @@ static ssize_t store_bridge_parm(struct device *d, struct net_bridge *br = to_bridge(d); struct netlink_ext_ack extack = {0}; unsigned long val; - char *endp; int err; if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN)) return -EPERM; - val = simple_strtoul(buf, &endp, 0); - if (endp == buf) - return -EINVAL; + err = kstrtoul(buf, 0, &val); + if (err != 0) + return err; if (!rtnl_trylock()) return restart_syscall(); diff --git a/net/core/Makefile b/net/core/Makefile index 4268846f2f47..a8e4f737692b 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -11,7 +11,9 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o obj-y += dev.o dev_addr_lists.o dst.o netevent.o \ neighbour.o rtnetlink.o utils.o link_watch.o filter.o \ sock_diag.o dev_ioctl.o tso.o sock_reuseport.o \ - fib_notifier.o xdp.o flow_offload.o + fib_notifier.o xdp.o flow_offload.o gro.o + +obj-$(CONFIG_NETDEV_ADDR_LIST_TEST) += dev_addr_lists_test.o obj-y += net-sysfs.o obj-$(CONFIG_PAGE_POOL) += page_pool.o diff --git a/net/core/dev.c b/net/core/dev.c index 15ac064b5562..823917de0d2b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -153,16 +153,10 @@ #include "net-sysfs.h" -#define MAX_GRO_SKBS 8 - -/* This should be increased if a protocol with a bigger head is added. */ -#define GRO_MAX_HEAD (MAX_HEADER + 128) static DEFINE_SPINLOCK(ptype_lock); -static DEFINE_SPINLOCK(offload_lock); struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; struct list_head ptype_all __read_mostly; /* Taps */ -static struct list_head offload_base __read_mostly; static int netif_rx_internal(struct sk_buff *skb); static int call_netdevice_notifiers_info(unsigned long val, @@ -604,84 +598,6 @@ void dev_remove_pack(struct packet_type *pt) EXPORT_SYMBOL(dev_remove_pack); -/** - * dev_add_offload - register offload handlers - * @po: protocol offload declaration - * - * Add protocol offload handlers to the networking stack. The passed - * &proto_offload is linked into kernel lists and may not be freed until - * it has been removed from the kernel lists. - * - * This call does not sleep therefore it can not - * guarantee all CPU's that are in middle of receiving packets - * will see the new offload handlers (until the next received packet). - */ -void dev_add_offload(struct packet_offload *po) -{ - struct packet_offload *elem; - - spin_lock(&offload_lock); - list_for_each_entry(elem, &offload_base, list) { - if (po->priority < elem->priority) - break; - } - list_add_rcu(&po->list, elem->list.prev); - spin_unlock(&offload_lock); -} -EXPORT_SYMBOL(dev_add_offload); - -/** - * __dev_remove_offload - remove offload handler - * @po: packet offload declaration - * - * Remove a protocol offload handler that was previously added to the - * kernel offload handlers by dev_add_offload(). The passed &offload_type - * is removed from the kernel lists and can be freed or reused once this - * function returns. - * - * The packet type might still be in use by receivers - * and must not be freed until after all the CPU's have gone - * through a quiescent state. - */ -static void __dev_remove_offload(struct packet_offload *po) -{ - struct list_head *head = &offload_base; - struct packet_offload *po1; - - spin_lock(&offload_lock); - - list_for_each_entry(po1, head, list) { - if (po == po1) { - list_del_rcu(&po->list); - goto out; - } - } - - pr_warn("dev_remove_offload: %p not found\n", po); -out: - spin_unlock(&offload_lock); -} - -/** - * dev_remove_offload - remove packet offload handler - * @po: packet offload declaration - * - * Remove a packet offload handler that was previously added to the kernel - * offload handlers by dev_add_offload(). The passed &offload_type is - * removed from the kernel lists and can be freed or reused once this - * function returns. - * - * This call sleeps to guarantee that no CPU is looking at the packet - * type after return. - */ -void dev_remove_offload(struct packet_offload *po) -{ - __dev_remove_offload(po); - - synchronize_net(); -} -EXPORT_SYMBOL(dev_remove_offload); - /******************************************************************************* * * Device Interface Subroutines @@ -1461,6 +1377,7 @@ static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack) int ret; ASSERT_RTNL(); + dev_addr_check(dev); if (!netif_device_present(dev)) { /* may be detached because parent is runtime-suspended */ @@ -3315,40 +3232,6 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth) return __vlan_get_protocol(skb, type, depth); } -/** - * skb_mac_gso_segment - mac layer segmentation handler. - * @skb: buffer to segment - * @features: features for the output path (see dev->features) - */ -struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, - netdev_features_t features) -{ - struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); - struct packet_offload *ptype; - int vlan_depth = skb->mac_len; - __be16 type = skb_network_protocol(skb, &vlan_depth); - - if (unlikely(!type)) - return ERR_PTR(-EINVAL); - - __skb_pull(skb, vlan_depth); - - rcu_read_lock(); - list_for_each_entry_rcu(ptype, &offload_base, list) { - if (ptype->type == type && ptype->callbacks.gso_segment) { - segs = ptype->callbacks.gso_segment(skb, features); - break; - } - } - rcu_read_unlock(); - - __skb_push(skb, skb->data - skb_mac_header(skb)); - - return segs; -} -EXPORT_SYMBOL(skb_mac_gso_segment); - - /* openvswitch calls this on rx path, so we need a different check. */ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) @@ -3513,7 +3396,7 @@ static netdev_features_t gso_features_check(const struct sk_buff *skb, { u16 gso_segs = skb_shinfo(skb)->gso_segs; - if (gso_segs > dev->gso_max_segs) + if (gso_segs > READ_ONCE(dev->gso_max_segs)) return features & ~NETIF_F_GSO_MASK; if (!skb_shinfo(skb)->gso_type) { @@ -4320,8 +4203,6 @@ int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */ int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */ int dev_rx_weight __read_mostly = 64; int dev_tx_weight __read_mostly = 64; -/* Maximum number of GRO_NORMAL skbs to batch up for list-RX */ -int gro_normal_batch __read_mostly = 8; /* Called with irq disabled */ static inline void ____napi_schedule(struct softnet_data *sd, @@ -5664,7 +5545,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb) return ret; } -static void netif_receive_skb_list_internal(struct list_head *head) +void netif_receive_skb_list_internal(struct list_head *head) { struct sk_buff *skb, *next; struct list_head sublist; @@ -5842,550 +5723,6 @@ static void flush_all_backlogs(void) cpus_read_unlock(); } -/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */ -static void gro_normal_list(struct napi_struct *napi) -{ - if (!napi->rx_count) - return; - netif_receive_skb_list_internal(&napi->rx_list); - INIT_LIST_HEAD(&napi->rx_list); - napi->rx_count = 0; -} - -/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded, - * pass the whole batch up to the stack. - */ -static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs) -{ - list_add_tail(&skb->list, &napi->rx_list); - napi->rx_count += segs; - if (napi->rx_count >= gro_normal_batch) - gro_normal_list(napi); -} - -static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) -{ - struct packet_offload *ptype; - __be16 type = skb->protocol; - struct list_head *head = &offload_base; - int err = -ENOENT; - - BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); - - if (NAPI_GRO_CB(skb)->count == 1) { - skb_shinfo(skb)->gso_size = 0; - goto out; - } - - rcu_read_lock(); - list_for_each_entry_rcu(ptype, head, list) { - if (ptype->type != type || !ptype->callbacks.gro_complete) - continue; - - err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete, - ipv6_gro_complete, inet_gro_complete, - skb, 0); - break; - } - rcu_read_unlock(); - - if (err) { - WARN_ON(&ptype->list == head); - kfree_skb(skb); - return; - } - -out: - gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count); -} - -static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, - bool flush_old) -{ - struct list_head *head = &napi->gro_hash[index].list; - struct sk_buff *skb, *p; - - list_for_each_entry_safe_reverse(skb, p, head, list) { - if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) - return; - skb_list_del_init(skb); - napi_gro_complete(napi, skb); - napi->gro_hash[index].count--; - } - - if (!napi->gro_hash[index].count) - __clear_bit(index, &napi->gro_bitmask); -} - -/* napi->gro_hash[].list contains packets ordered by age. - * youngest packets at the head of it. - * Complete skbs in reverse order to reduce latencies. - */ -void napi_gro_flush(struct napi_struct *napi, bool flush_old) -{ - unsigned long bitmask = napi->gro_bitmask; - unsigned int i, base = ~0U; - - while ((i = ffs(bitmask)) != 0) { - bitmask >>= i; - base += i; - __napi_gro_flush_chain(napi, base, flush_old); - } -} -EXPORT_SYMBOL(napi_gro_flush); - -static void gro_list_prepare(const struct list_head *head, - const struct sk_buff *skb) -{ - unsigned int maclen = skb->dev->hard_header_len; - u32 hash = skb_get_hash_raw(skb); - struct sk_buff *p; - - list_for_each_entry(p, head, list) { - unsigned long diffs; - - NAPI_GRO_CB(p)->flush = 0; - - if (hash != skb_get_hash_raw(p)) { - NAPI_GRO_CB(p)->same_flow = 0; - continue; - } - - diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; - diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb); - if (skb_vlan_tag_present(p)) - diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb); - diffs |= skb_metadata_differs(p, skb); - if (maclen == ETH_HLEN) - diffs |= compare_ether_header(skb_mac_header(p), - skb_mac_header(skb)); - else if (!diffs) - diffs = memcmp(skb_mac_header(p), - skb_mac_header(skb), - maclen); - - /* in most common scenarions 'slow_gro' is 0 - * otherwise we are already on some slower paths - * either skip all the infrequent tests altogether or - * avoid trying too hard to skip each of them individually - */ - if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) { -#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT) - struct tc_skb_ext *skb_ext; - struct tc_skb_ext *p_ext; -#endif - - diffs |= p->sk != skb->sk; - diffs |= skb_metadata_dst_cmp(p, skb); - diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb); - -#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT) - skb_ext = skb_ext_find(skb, TC_SKB_EXT); - p_ext = skb_ext_find(p, TC_SKB_EXT); - - diffs |= (!!p_ext) ^ (!!skb_ext); - if (!diffs && unlikely(skb_ext)) - diffs |= p_ext->chain ^ skb_ext->chain; -#endif - } - - NAPI_GRO_CB(p)->same_flow = !diffs; - } -} - -static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff) -{ - const struct skb_shared_info *pinfo = skb_shinfo(skb); - const skb_frag_t *frag0 = &pinfo->frags[0]; - - NAPI_GRO_CB(skb)->data_offset = 0; - NAPI_GRO_CB(skb)->frag0 = NULL; - NAPI_GRO_CB(skb)->frag0_len = 0; - - if (!skb_headlen(skb) && pinfo->nr_frags && - !PageHighMem(skb_frag_page(frag0)) && - (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) { - NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); - NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int, - skb_frag_size(frag0), - skb->end - skb->tail); - } -} - -static void gro_pull_from_frag0(struct sk_buff *skb, int grow) -{ - struct skb_shared_info *pinfo = skb_shinfo(skb); - - BUG_ON(skb->end - skb->tail < grow); - - memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); - - skb->data_len -= grow; - skb->tail += grow; - - skb_frag_off_add(&pinfo->frags[0], grow); - skb_frag_size_sub(&pinfo->frags[0], grow); - - if (unlikely(!skb_frag_size(&pinfo->frags[0]))) { - skb_frag_unref(skb, 0); - memmove(pinfo->frags, pinfo->frags + 1, - --pinfo->nr_frags * sizeof(pinfo->frags[0])); - } -} - -static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head) -{ - struct sk_buff *oldest; - - oldest = list_last_entry(head, struct sk_buff, list); - - /* We are called with head length >= MAX_GRO_SKBS, so this is - * impossible. - */ - if (WARN_ON_ONCE(!oldest)) - return; - - /* Do not adjust napi->gro_hash[].count, caller is adding a new - * SKB to the chain. - */ - skb_list_del_init(oldest); - napi_gro_complete(napi, oldest); -} - -static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) -{ - u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1); - struct gro_list *gro_list = &napi->gro_hash[bucket]; - struct list_head *head = &offload_base; - struct packet_offload *ptype; - __be16 type = skb->protocol; - struct sk_buff *pp = NULL; - enum gro_result ret; - int same_flow; - int grow; - - if (netif_elide_gro(skb->dev)) - goto normal; - - gro_list_prepare(&gro_list->list, skb); - - rcu_read_lock(); - list_for_each_entry_rcu(ptype, head, list) { - if (ptype->type != type || !ptype->callbacks.gro_receive) - continue; - - skb_set_network_header(skb, skb_gro_offset(skb)); - skb_reset_mac_len(skb); - NAPI_GRO_CB(skb)->same_flow = 0; - NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb); - NAPI_GRO_CB(skb)->free = 0; - NAPI_GRO_CB(skb)->encap_mark = 0; - NAPI_GRO_CB(skb)->recursion_counter = 0; - NAPI_GRO_CB(skb)->is_fou = 0; - NAPI_GRO_CB(skb)->is_atomic = 1; - NAPI_GRO_CB(skb)->gro_remcsum_start = 0; - - /* Setup for GRO checksum validation */ - switch (skb->ip_summed) { - case CHECKSUM_COMPLETE: - NAPI_GRO_CB(skb)->csum = skb->csum; - NAPI_GRO_CB(skb)->csum_valid = 1; - NAPI_GRO_CB(skb)->csum_cnt = 0; - break; - case CHECKSUM_UNNECESSARY: - NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1; - NAPI_GRO_CB(skb)->csum_valid = 0; - break; - default: - NAPI_GRO_CB(skb)->csum_cnt = 0; - NAPI_GRO_CB(skb)->csum_valid = 0; - } - - pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive, - ipv6_gro_receive, inet_gro_receive, - &gro_list->list, skb); - break; - } - rcu_read_unlock(); - - if (&ptype->list == head) - goto normal; - - if (PTR_ERR(pp) == -EINPROGRESS) { - ret = GRO_CONSUMED; - goto ok; - } - - same_flow = NAPI_GRO_CB(skb)->same_flow; - ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; - - if (pp) { - skb_list_del_init(pp); - napi_gro_complete(napi, pp); - gro_list->count--; - } - - if (same_flow) - goto ok; - - if (NAPI_GRO_CB(skb)->flush) - goto normal; - - if (unlikely(gro_list->count >= MAX_GRO_SKBS)) - gro_flush_oldest(napi, &gro_list->list); - else - gro_list->count++; - - NAPI_GRO_CB(skb)->count = 1; - NAPI_GRO_CB(skb)->age = jiffies; - NAPI_GRO_CB(skb)->last = skb; - skb_shinfo(skb)->gso_size = skb_gro_len(skb); - list_add(&skb->list, &gro_list->list); - ret = GRO_HELD; - -pull: - grow = skb_gro_offset(skb) - skb_headlen(skb); - if (grow > 0) - gro_pull_from_frag0(skb, grow); -ok: - if (gro_list->count) { - if (!test_bit(bucket, &napi->gro_bitmask)) - __set_bit(bucket, &napi->gro_bitmask); - } else if (test_bit(bucket, &napi->gro_bitmask)) { - __clear_bit(bucket, &napi->gro_bitmask); - } - - return ret; - -normal: - ret = GRO_NORMAL; - goto pull; -} - -struct packet_offload *gro_find_receive_by_type(__be16 type) -{ - struct list_head *offload_head = &offload_base; - struct packet_offload *ptype; - - list_for_each_entry_rcu(ptype, offload_head, list) { - if (ptype->type != type || !ptype->callbacks.gro_receive) - continue; - return ptype; - } - return NULL; -} -EXPORT_SYMBOL(gro_find_receive_by_type); - -struct packet_offload *gro_find_complete_by_type(__be16 type) -{ - struct list_head *offload_head = &offload_base; - struct packet_offload *ptype; - - list_for_each_entry_rcu(ptype, offload_head, list) { - if (ptype->type != type || !ptype->callbacks.gro_complete) - continue; - return ptype; - } - return NULL; -} -EXPORT_SYMBOL(gro_find_complete_by_type); - -static gro_result_t napi_skb_finish(struct napi_struct *napi, - struct sk_buff *skb, - gro_result_t ret) -{ - switch (ret) { - case GRO_NORMAL: - gro_normal_one(napi, skb, 1); - break; - - case GRO_MERGED_FREE: - if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) - napi_skb_free_stolen_head(skb); - else if (skb->fclone != SKB_FCLONE_UNAVAILABLE) - __kfree_skb(skb); - else - __kfree_skb_defer(skb); - break; - - case GRO_HELD: - case GRO_MERGED: - case GRO_CONSUMED: - break; - } - - return ret; -} - -gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) -{ - gro_result_t ret; - - skb_mark_napi_id(skb, napi); - trace_napi_gro_receive_entry(skb); - - skb_gro_reset_offset(skb, 0); - - ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb)); - trace_napi_gro_receive_exit(ret); - - return ret; -} -EXPORT_SYMBOL(napi_gro_receive); - -static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) -{ - if (unlikely(skb->pfmemalloc)) { - consume_skb(skb); - return; - } - __skb_pull(skb, skb_headlen(skb)); - /* restore the reserve we had after netdev_alloc_skb_ip_align() */ - skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); - __vlan_hwaccel_clear_tag(skb); - skb->dev = napi->dev; - skb->skb_iif = 0; - - /* eth_type_trans() assumes pkt_type is PACKET_HOST */ - skb->pkt_type = PACKET_HOST; - - skb->encapsulation = 0; - skb_shinfo(skb)->gso_type = 0; - skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); - if (unlikely(skb->slow_gro)) { - skb_orphan(skb); - skb_ext_reset(skb); - nf_reset_ct(skb); - skb->slow_gro = 0; - } - - napi->skb = skb; -} - -struct sk_buff *napi_get_frags(struct napi_struct *napi) -{ - struct sk_buff *skb = napi->skb; - - if (!skb) { - skb = napi_alloc_skb(napi, GRO_MAX_HEAD); - if (skb) { - napi->skb = skb; - skb_mark_napi_id(skb, napi); - } - } - return skb; -} -EXPORT_SYMBOL(napi_get_frags); - -static gro_result_t napi_frags_finish(struct napi_struct *napi, - struct sk_buff *skb, - gro_result_t ret) -{ - switch (ret) { - case GRO_NORMAL: - case GRO_HELD: - __skb_push(skb, ETH_HLEN); - skb->protocol = eth_type_trans(skb, skb->dev); - if (ret == GRO_NORMAL) - gro_normal_one(napi, skb, 1); - break; - - case GRO_MERGED_FREE: - if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) - napi_skb_free_stolen_head(skb); - else - napi_reuse_skb(napi, skb); - break; - - case GRO_MERGED: - case GRO_CONSUMED: - break; - } - - return ret; -} - -/* Upper GRO stack assumes network header starts at gro_offset=0 - * Drivers could call both napi_gro_frags() and napi_gro_receive() - * We copy ethernet header into skb->data to have a common layout. - */ -static struct sk_buff *napi_frags_skb(struct napi_struct *napi) -{ - struct sk_buff *skb = napi->skb; - const struct ethhdr *eth; - unsigned int hlen = sizeof(*eth); - - napi->skb = NULL; - - skb_reset_mac_header(skb); - skb_gro_reset_offset(skb, hlen); - - if (unlikely(skb_gro_header_hard(skb, hlen))) { - eth = skb_gro_header_slow(skb, hlen, 0); - if (unlikely(!eth)) { - net_warn_ratelimited("%s: dropping impossible skb from %s\n", - __func__, napi->dev->name); - napi_reuse_skb(napi, skb); - return NULL; - } - } else { - eth = (const struct ethhdr *)skb->data; - gro_pull_from_frag0(skb, hlen); - NAPI_GRO_CB(skb)->frag0 += hlen; - NAPI_GRO_CB(skb)->frag0_len -= hlen; - } - __skb_pull(skb, hlen); - - /* - * This works because the only protocols we care about don't require - * special handling. - * We'll fix it up properly in napi_frags_finish() - */ - skb->protocol = eth->h_proto; - - return skb; -} - -gro_result_t napi_gro_frags(struct napi_struct *napi) -{ - gro_result_t ret; - struct sk_buff *skb = napi_frags_skb(napi); - - trace_napi_gro_frags_entry(skb); - - ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb)); - trace_napi_gro_frags_exit(ret); - - return ret; -} -EXPORT_SYMBOL(napi_gro_frags); - -/* Compute the checksum from gro_offset and return the folded value - * after adding in any pseudo checksum. - */ -__sum16 __skb_gro_checksum_complete(struct sk_buff *skb) -{ - __wsum wsum; - __sum16 sum; - - wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0); - - /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */ - sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum)); - /* See comments in __skb_checksum_complete(). */ - if (likely(!sum)) { - if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && - !skb->csum_complete_sw) - netdev_rx_csum_fault(skb->dev, skb); - } - - NAPI_GRO_CB(skb)->csum = wsum; - NAPI_GRO_CB(skb)->csum_valid = 1; - - return sum; -} -EXPORT_SYMBOL(__skb_gro_checksum_complete); - static void net_rps_send_ipi(struct softnet_data *remsd) { #ifdef CONFIG_RPS @@ -9221,35 +8558,17 @@ bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b) EXPORT_SYMBOL(netdev_port_same_parent_id); /** - * dev_change_proto_down - update protocol port state information + * dev_change_proto_down - set carrier according to proto_down. + * * @dev: device * @proto_down: new value - * - * This info can be used by switch drivers to set the phys state of the - * port. */ int dev_change_proto_down(struct net_device *dev, bool proto_down) { - const struct net_device_ops *ops = dev->netdev_ops; - - if (!ops->ndo_change_proto_down) + if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) return -EOPNOTSUPP; if (!netif_device_present(dev)) return -ENODEV; - return ops->ndo_change_proto_down(dev, proto_down); -} -EXPORT_SYMBOL(dev_change_proto_down); - -/** - * dev_change_proto_down_generic - generic implementation for - * ndo_change_proto_down that sets carrier according to - * proto_down. - * - * @dev: device - * @proto_down: new value - */ -int dev_change_proto_down_generic(struct net_device *dev, bool proto_down) -{ if (proto_down) netif_carrier_off(dev); else @@ -9257,7 +8576,7 @@ int dev_change_proto_down_generic(struct net_device *dev, bool proto_down) dev->proto_down = proto_down; return 0; } -EXPORT_SYMBOL(dev_change_proto_down_generic); +EXPORT_SYMBOL(dev_change_proto_down); /** * dev_change_proto_down_reason - proto down reason @@ -11640,8 +10959,6 @@ static int __init net_dev_init(void) for (i = 0; i < PTYPE_HASH_SIZE; i++) INIT_LIST_HEAD(&ptype_base[i]); - INIT_LIST_HEAD(&offload_base); - if (register_pernet_subsys(&netdev_net_ops)) goto out; diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index f0cb38344126..bead38ca50bd 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -16,6 +16,35 @@ * General list handling functions */ +static int __hw_addr_insert(struct netdev_hw_addr_list *list, + struct netdev_hw_addr *new, int addr_len) +{ + struct rb_node **ins_point = &list->tree.rb_node, *parent = NULL; + struct netdev_hw_addr *ha; + + while (*ins_point) { + int diff; + + ha = rb_entry(*ins_point, struct netdev_hw_addr, node); + diff = memcmp(new->addr, ha->addr, addr_len); + if (diff == 0) + diff = memcmp(&new->type, &ha->type, sizeof(new->type)); + + parent = *ins_point; + if (diff < 0) + ins_point = &parent->rb_left; + else if (diff > 0) + ins_point = &parent->rb_right; + else + return -EEXIST; + } + + rb_link_node_rcu(&new->node, parent, ins_point); + rb_insert_color(&new->node, &list->tree); + + return 0; +} + static struct netdev_hw_addr* __hw_addr_create(const unsigned char *addr, int addr_len, unsigned char addr_type, bool global, bool sync) @@ -50,11 +79,6 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list, if (addr_len > MAX_ADDR_LEN) return -EINVAL; - ha = list_first_entry(&list->list, struct netdev_hw_addr, list); - if (ha && !memcmp(addr, ha->addr, addr_len) && - (!addr_type || addr_type == ha->type)) - goto found_it; - while (*ins_point) { int diff; @@ -69,7 +93,6 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list, } else if (diff > 0) { ins_point = &parent->rb_right; } else { -found_it: if (exclusive) return -EEXIST; if (global) { @@ -94,16 +117,8 @@ found_it: if (!ha) return -ENOMEM; - /* The first address in dev->dev_addrs is pointed to by dev->dev_addr - * and mutated freely by device drivers and netdev ops, so if we insert - * it into the tree we'll end up with an invalid rbtree. - */ - if (list->count > 0) { - rb_link_node(&ha->node, parent, ins_point); - rb_insert_color(&ha->node, &list->tree); - } else { - RB_CLEAR_NODE(&ha->node); - } + rb_link_node(&ha->node, parent, ins_point); + rb_insert_color(&ha->node, &list->tree); list_add_tail_rcu(&ha->list, &list->list); list->count++; @@ -138,8 +153,7 @@ static int __hw_addr_del_entry(struct netdev_hw_addr_list *list, if (--ha->refcount) return 0; - if (!RB_EMPTY_NODE(&ha->node)) - rb_erase(&ha->node, &list->tree); + rb_erase(&ha->node, &list->tree); list_del_rcu(&ha->list); kfree_rcu(ha, rcu_head); @@ -151,18 +165,8 @@ static struct netdev_hw_addr *__hw_addr_lookup(struct netdev_hw_addr_list *list, const unsigned char *addr, int addr_len, unsigned char addr_type) { - struct netdev_hw_addr *ha; struct rb_node *node; - /* The first address isn't inserted into the tree because in the dev->dev_addrs - * list it's the address pointed to by dev->dev_addr which is freely mutated - * in place, so we need to check it separately. - */ - ha = list_first_entry(&list->list, struct netdev_hw_addr, list); - if (ha && !memcmp(addr, ha->addr, addr_len) && - (!addr_type || addr_type == ha->type)) - return ha; - node = list->tree.rb_node; while (node) { @@ -498,6 +502,21 @@ EXPORT_SYMBOL(__hw_addr_init); * Device addresses handling functions */ +/* Check that netdev->dev_addr is not written to directly as this would + * break the rbtree layout. All changes should go thru dev_addr_set() and co. + * Remove this check in mid-2024. + */ +void dev_addr_check(struct net_device *dev) +{ + if (!memcmp(dev->dev_addr, dev->dev_addr_shadow, MAX_ADDR_LEN)) + return; + + netdev_warn(dev, "Current addr: %*ph\n", MAX_ADDR_LEN, dev->dev_addr); + netdev_warn(dev, "Expected addr: %*ph\n", + MAX_ADDR_LEN, dev->dev_addr_shadow); + netdev_WARN(dev, "Incorrect netdev->dev_addr\n"); +} + /** * dev_addr_flush - Flush device address list * @dev: device @@ -509,11 +528,11 @@ EXPORT_SYMBOL(__hw_addr_init); void dev_addr_flush(struct net_device *dev) { /* rtnl_mutex must be held here */ + dev_addr_check(dev); __hw_addr_flush(&dev->dev_addrs); dev->dev_addr = NULL; } -EXPORT_SYMBOL(dev_addr_flush); /** * dev_addr_init - Init device address list @@ -547,7 +566,21 @@ int dev_addr_init(struct net_device *dev) } return err; } -EXPORT_SYMBOL(dev_addr_init); + +void dev_addr_mod(struct net_device *dev, unsigned int offset, + const void *addr, size_t len) +{ + struct netdev_hw_addr *ha; + + dev_addr_check(dev); + + ha = container_of(dev->dev_addr, struct netdev_hw_addr, addr[0]); + rb_erase(&ha->node, &dev->dev_addrs.tree); + memcpy(&ha->addr[offset], addr, len); + memcpy(&dev->dev_addr_shadow[offset], addr, len); + WARN_ON(__hw_addr_insert(&dev->dev_addrs, ha, dev->addr_len)); +} +EXPORT_SYMBOL(dev_addr_mod); /** * dev_addr_add - Add a device address diff --git a/net/core/dev_addr_lists_test.c b/net/core/dev_addr_lists_test.c new file mode 100644 index 000000000000..049cfbc58aa9 --- /dev/null +++ b/net/core/dev_addr_lists_test.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <kunit/test.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/rtnetlink.h> + +static const struct net_device_ops dummy_netdev_ops = { +}; + +struct dev_addr_test_priv { + u32 addr_seen; +}; + +static int dev_addr_test_sync(struct net_device *netdev, const unsigned char *a) +{ + struct dev_addr_test_priv *datp = netdev_priv(netdev); + + if (a[0] < 31 && !memchr_inv(a, a[0], ETH_ALEN)) + datp->addr_seen |= 1 << a[0]; + return 0; +} + +static int dev_addr_test_unsync(struct net_device *netdev, + const unsigned char *a) +{ + struct dev_addr_test_priv *datp = netdev_priv(netdev); + + if (a[0] < 31 && !memchr_inv(a, a[0], ETH_ALEN)) + datp->addr_seen &= ~(1 << a[0]); + return 0; +} + +static int dev_addr_test_init(struct kunit *test) +{ + struct dev_addr_test_priv *datp; + struct net_device *netdev; + int err; + + netdev = alloc_etherdev(sizeof(*datp)); + KUNIT_ASSERT_TRUE(test, !!netdev); + + test->priv = netdev; + netdev->netdev_ops = &dummy_netdev_ops; + + err = register_netdev(netdev); + if (err) { + free_netdev(netdev); + KUNIT_FAIL(test, "Can't register netdev %d", err); + } + + rtnl_lock(); + return 0; +} + +static void dev_addr_test_exit(struct kunit *test) +{ + struct net_device *netdev = test->priv; + + rtnl_unlock(); + unregister_netdev(netdev); + free_netdev(netdev); +} + +static void dev_addr_test_basic(struct kunit *test) +{ + struct net_device *netdev = test->priv; + u8 addr[ETH_ALEN]; + + KUNIT_EXPECT_TRUE(test, !!netdev->dev_addr); + + memset(addr, 2, sizeof(addr)); + eth_hw_addr_set(netdev, addr); + KUNIT_EXPECT_EQ(test, 0, memcmp(netdev->dev_addr, addr, sizeof(addr))); + + memset(addr, 3, sizeof(addr)); + dev_addr_set(netdev, addr); + KUNIT_EXPECT_EQ(test, 0, memcmp(netdev->dev_addr, addr, sizeof(addr))); +} + +static void dev_addr_test_sync_one(struct kunit *test) +{ + struct net_device *netdev = test->priv; + struct dev_addr_test_priv *datp; + u8 addr[ETH_ALEN]; + + datp = netdev_priv(netdev); + + memset(addr, 1, sizeof(addr)); + eth_hw_addr_set(netdev, addr); + + __hw_addr_sync_dev(&netdev->dev_addrs, netdev, dev_addr_test_sync, + dev_addr_test_unsync); + KUNIT_EXPECT_EQ(test, 2, datp->addr_seen); + + memset(addr, 2, sizeof(addr)); + eth_hw_addr_set(netdev, addr); + + datp->addr_seen = 0; + __hw_addr_sync_dev(&netdev->dev_addrs, netdev, dev_addr_test_sync, + dev_addr_test_unsync); + /* It's not going to sync anything because the main address is + * considered synced and we overwrite in place. + */ + KUNIT_EXPECT_EQ(test, 0, datp->addr_seen); +} + +static void dev_addr_test_add_del(struct kunit *test) +{ + struct net_device *netdev = test->priv; + struct dev_addr_test_priv *datp; + u8 addr[ETH_ALEN]; + int i; + + datp = netdev_priv(netdev); + + for (i = 1; i < 4; i++) { + memset(addr, i, sizeof(addr)); + KUNIT_EXPECT_EQ(test, 0, dev_addr_add(netdev, addr, + NETDEV_HW_ADDR_T_LAN)); + } + /* Add 3 again */ + KUNIT_EXPECT_EQ(test, 0, dev_addr_add(netdev, addr, + NETDEV_HW_ADDR_T_LAN)); + + __hw_addr_sync_dev(&netdev->dev_addrs, netdev, dev_addr_test_sync, + dev_addr_test_unsync); + KUNIT_EXPECT_EQ(test, 0xf, datp->addr_seen); + + KUNIT_EXPECT_EQ(test, 0, dev_addr_del(netdev, addr, + NETDEV_HW_ADDR_T_LAN)); + + __hw_addr_sync_dev(&netdev->dev_addrs, netdev, dev_addr_test_sync, + dev_addr_test_unsync); + KUNIT_EXPECT_EQ(test, 0xf, datp->addr_seen); + + for (i = 1; i < 4; i++) { + memset(addr, i, sizeof(addr)); + KUNIT_EXPECT_EQ(test, 0, dev_addr_del(netdev, addr, + NETDEV_HW_ADDR_T_LAN)); + } + + __hw_addr_sync_dev(&netdev->dev_addrs, netdev, dev_addr_test_sync, + dev_addr_test_unsync); + KUNIT_EXPECT_EQ(test, 1, datp->addr_seen); +} + +static void dev_addr_test_del_main(struct kunit *test) +{ + struct net_device *netdev = test->priv; + u8 addr[ETH_ALEN]; + + memset(addr, 1, sizeof(addr)); + eth_hw_addr_set(netdev, addr); + + KUNIT_EXPECT_EQ(test, -ENOENT, dev_addr_del(netdev, addr, + NETDEV_HW_ADDR_T_LAN)); + KUNIT_EXPECT_EQ(test, 0, dev_addr_add(netdev, addr, + NETDEV_HW_ADDR_T_LAN)); + KUNIT_EXPECT_EQ(test, 0, dev_addr_del(netdev, addr, + NETDEV_HW_ADDR_T_LAN)); + KUNIT_EXPECT_EQ(test, -ENOENT, dev_addr_del(netdev, addr, + NETDEV_HW_ADDR_T_LAN)); +} + +static void dev_addr_test_add_set(struct kunit *test) +{ + struct net_device *netdev = test->priv; + struct dev_addr_test_priv *datp; + u8 addr[ETH_ALEN]; + int i; + + datp = netdev_priv(netdev); + + /* There is no external API like dev_addr_add_excl(), + * so shuffle the tree a little bit and exploit aliasing. + */ + for (i = 1; i < 16; i++) { + memset(addr, i, sizeof(addr)); + KUNIT_EXPECT_EQ(test, 0, dev_addr_add(netdev, addr, + NETDEV_HW_ADDR_T_LAN)); + } + + memset(addr, i, sizeof(addr)); + eth_hw_addr_set(netdev, addr); + KUNIT_EXPECT_EQ(test, 0, dev_addr_add(netdev, addr, + NETDEV_HW_ADDR_T_LAN)); + memset(addr, 0, sizeof(addr)); + eth_hw_addr_set(netdev, addr); + + __hw_addr_sync_dev(&netdev->dev_addrs, netdev, dev_addr_test_sync, + dev_addr_test_unsync); + KUNIT_EXPECT_EQ(test, 0xffff, datp->addr_seen); +} + +static void dev_addr_test_add_excl(struct kunit *test) +{ + struct net_device *netdev = test->priv; + u8 addr[ETH_ALEN]; + int i; + + for (i = 0; i < 10; i++) { + memset(addr, i, sizeof(addr)); + KUNIT_EXPECT_EQ(test, 0, dev_uc_add_excl(netdev, addr)); + } + KUNIT_EXPECT_EQ(test, -EEXIST, dev_uc_add_excl(netdev, addr)); + + for (i = 0; i < 10; i += 2) { + memset(addr, i, sizeof(addr)); + KUNIT_EXPECT_EQ(test, 0, dev_uc_del(netdev, addr)); + } + for (i = 1; i < 10; i += 2) { + memset(addr, i, sizeof(addr)); + KUNIT_EXPECT_EQ(test, -EEXIST, dev_uc_add_excl(netdev, addr)); + } +} + +static struct kunit_case dev_addr_test_cases[] = { + KUNIT_CASE(dev_addr_test_basic), + KUNIT_CASE(dev_addr_test_sync_one), + KUNIT_CASE(dev_addr_test_add_del), + KUNIT_CASE(dev_addr_test_del_main), + KUNIT_CASE(dev_addr_test_add_set), + KUNIT_CASE(dev_addr_test_add_excl), + {} +}; + +static struct kunit_suite dev_addr_test_suite = { + .name = "dev-addr-list-test", + .test_cases = dev_addr_test_cases, + .init = dev_addr_test_init, + .exit = dev_addr_test_exit, +}; +kunit_test_suite(dev_addr_test_suite); + +MODULE_LICENSE("GPL"); diff --git a/net/core/devlink.c b/net/core/devlink.c index 5ad72dbfcd07..d144a62cbf73 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -4432,6 +4432,11 @@ static const struct devlink_param devlink_param_generic[] = { .name = DEVLINK_PARAM_GENERIC_ENABLE_VNET_NAME, .type = DEVLINK_PARAM_GENERIC_ENABLE_VNET_TYPE, }, + { + .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP, + .name = DEVLINK_PARAM_GENERIC_ENABLE_IWARP_NAME, + .type = DEVLINK_PARAM_GENERIC_ENABLE_IWARP_TYPE, + }, }; static int devlink_param_generic_verify(const struct devlink_param *param) diff --git a/net/core/filter.c b/net/core/filter.c index 6102f093d59a..8271624a19aa 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -301,7 +301,7 @@ static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg, break; case SKF_AD_PKTTYPE: - *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET()); + *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET); *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX); #ifdef __BIG_ENDIAN_BITFIELD *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5); @@ -323,7 +323,7 @@ static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg, offsetof(struct sk_buff, vlan_tci)); break; case SKF_AD_VLAN_TAG_PRESENT: - *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET()); + *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET); if (PKT_VLAN_PRESENT_BIT) *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT); if (PKT_VLAN_PRESENT_BIT < 7) @@ -8029,7 +8029,7 @@ static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write, * (Fast-path, otherwise approximation that we might be * a clone, do the rest in helper.) */ - *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET()); + *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET); *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK); *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7); @@ -8617,7 +8617,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, case offsetof(struct __sk_buff, pkt_type): *target_size = 1; *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg, - PKT_TYPE_OFFSET()); + PKT_TYPE_OFFSET); *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX); #ifdef __BIG_ENDIAN_BITFIELD *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5); @@ -8642,7 +8642,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, case offsetof(struct __sk_buff, vlan_present): *target_size = 1; *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg, - PKT_VLAN_PRESENT_OFFSET()); + PKT_VLAN_PRESENT_OFFSET); if (PKT_VLAN_PRESENT_BIT) *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, PKT_VLAN_PRESENT_BIT); if (PKT_VLAN_PRESENT_BIT < 7) @@ -10543,6 +10543,7 @@ static bool sk_lookup_is_valid_access(int off, int size, case bpf_ctx_range_till(struct bpf_sk_lookup, local_ip6[0], local_ip6[3]): case bpf_ctx_range(struct bpf_sk_lookup, remote_port): case bpf_ctx_range(struct bpf_sk_lookup, local_port): + case bpf_ctx_range(struct bpf_sk_lookup, ingress_ifindex): bpf_ctx_record_field_size(info, sizeof(__u32)); return bpf_ctx_narrow_access_ok(off, size, sizeof(__u32)); @@ -10632,6 +10633,12 @@ static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type, bpf_target_off(struct bpf_sk_lookup_kern, dport, 2, target_size)); break; + + case offsetof(struct bpf_sk_lookup, ingress_ifindex): + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, + bpf_target_off(struct bpf_sk_lookup_kern, + ingress_ifindex, 4, target_size)); + break; } return insn - insn_buf; @@ -10656,14 +10663,10 @@ void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog) bpf_dispatcher_change_prog(BPF_DISPATCHER_PTR(xdp), prev_prog, prog); } -#ifdef CONFIG_DEBUG_INFO_BTF -BTF_ID_LIST_GLOBAL(btf_sock_ids) +BTF_ID_LIST_GLOBAL(btf_sock_ids, MAX_BTF_SOCK_TYPE) #define BTF_SOCK_TYPE(name, type) BTF_ID(struct, type) BTF_SOCK_TYPE_xxx #undef BTF_SOCK_TYPE -#else -u32 btf_sock_ids[MAX_BTF_SOCK_TYPE]; -#endif BPF_CALL_1(bpf_skc_to_tcp6_sock, struct sock *, sk) { diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 3255f57f5131..257976cb55ce 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -1460,7 +1460,7 @@ out_bad: } EXPORT_SYMBOL(__skb_flow_dissect); -static siphash_key_t hashrnd __read_mostly; +static siphash_aligned_key_t hashrnd; static __always_inline void __flow_hash_secret_init(void) { net_get_random_once(&hashrnd, sizeof(hashrnd)); diff --git a/net/core/gro.c b/net/core/gro.c new file mode 100644 index 000000000000..8ec8b44596da --- /dev/null +++ b/net/core/gro.c @@ -0,0 +1,766 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include <net/gro.h> +#include <net/dst_metadata.h> +#include <net/busy_poll.h> +#include <trace/events/net.h> + +#define MAX_GRO_SKBS 8 + +/* This should be increased if a protocol with a bigger head is added. */ +#define GRO_MAX_HEAD (MAX_HEADER + 128) + +static DEFINE_SPINLOCK(offload_lock); +static struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base); +/* Maximum number of GRO_NORMAL skbs to batch up for list-RX */ +int gro_normal_batch __read_mostly = 8; + +/** + * dev_add_offload - register offload handlers + * @po: protocol offload declaration + * + * Add protocol offload handlers to the networking stack. The passed + * &proto_offload is linked into kernel lists and may not be freed until + * it has been removed from the kernel lists. + * + * This call does not sleep therefore it can not + * guarantee all CPU's that are in middle of receiving packets + * will see the new offload handlers (until the next received packet). + */ +void dev_add_offload(struct packet_offload *po) +{ + struct packet_offload *elem; + + spin_lock(&offload_lock); + list_for_each_entry(elem, &offload_base, list) { + if (po->priority < elem->priority) + break; + } + list_add_rcu(&po->list, elem->list.prev); + spin_unlock(&offload_lock); +} +EXPORT_SYMBOL(dev_add_offload); + +/** + * __dev_remove_offload - remove offload handler + * @po: packet offload declaration + * + * Remove a protocol offload handler that was previously added to the + * kernel offload handlers by dev_add_offload(). The passed &offload_type + * is removed from the kernel lists and can be freed or reused once this + * function returns. + * + * The packet type might still be in use by receivers + * and must not be freed until after all the CPU's have gone + * through a quiescent state. + */ +static void __dev_remove_offload(struct packet_offload *po) +{ + struct list_head *head = &offload_base; + struct packet_offload *po1; + + spin_lock(&offload_lock); + + list_for_each_entry(po1, head, list) { + if (po == po1) { + list_del_rcu(&po->list); + goto out; + } + } + + pr_warn("dev_remove_offload: %p not found\n", po); +out: + spin_unlock(&offload_lock); +} + +/** + * dev_remove_offload - remove packet offload handler + * @po: packet offload declaration + * + * Remove a packet offload handler that was previously added to the kernel + * offload handlers by dev_add_offload(). The passed &offload_type is + * removed from the kernel lists and can be freed or reused once this + * function returns. + * + * This call sleeps to guarantee that no CPU is looking at the packet + * type after return. + */ +void dev_remove_offload(struct packet_offload *po) +{ + __dev_remove_offload(po); + + synchronize_net(); +} +EXPORT_SYMBOL(dev_remove_offload); + +/** + * skb_mac_gso_segment - mac layer segmentation handler. + * @skb: buffer to segment + * @features: features for the output path (see dev->features) + */ +struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, + netdev_features_t features) +{ + struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); + struct packet_offload *ptype; + int vlan_depth = skb->mac_len; + __be16 type = skb_network_protocol(skb, &vlan_depth); + + if (unlikely(!type)) + return ERR_PTR(-EINVAL); + + __skb_pull(skb, vlan_depth); + + rcu_read_lock(); + list_for_each_entry_rcu(ptype, &offload_base, list) { + if (ptype->type == type && ptype->callbacks.gso_segment) { + segs = ptype->callbacks.gso_segment(skb, features); + break; + } + } + rcu_read_unlock(); + + __skb_push(skb, skb->data - skb_mac_header(skb)); + + return segs; +} +EXPORT_SYMBOL(skb_mac_gso_segment); + +int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) +{ + struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); + unsigned int offset = skb_gro_offset(skb); + unsigned int headlen = skb_headlen(skb); + unsigned int len = skb_gro_len(skb); + unsigned int delta_truesize; + unsigned int new_truesize; + struct sk_buff *lp; + + if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush)) + return -E2BIG; + + lp = NAPI_GRO_CB(p)->last; + pinfo = skb_shinfo(lp); + + if (headlen <= offset) { + skb_frag_t *frag; + skb_frag_t *frag2; + int i = skbinfo->nr_frags; + int nr_frags = pinfo->nr_frags + i; + + if (nr_frags > MAX_SKB_FRAGS) + goto merge; + + offset -= headlen; + pinfo->nr_frags = nr_frags; + skbinfo->nr_frags = 0; + + frag = pinfo->frags + nr_frags; + frag2 = skbinfo->frags + i; + do { + *--frag = *--frag2; + } while (--i); + + skb_frag_off_add(frag, offset); + skb_frag_size_sub(frag, offset); + + /* all fragments truesize : remove (head size + sk_buff) */ + new_truesize = SKB_TRUESIZE(skb_end_offset(skb)); + delta_truesize = skb->truesize - new_truesize; + + skb->truesize = new_truesize; + skb->len -= skb->data_len; + skb->data_len = 0; + + NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; + goto done; + } else if (skb->head_frag) { + int nr_frags = pinfo->nr_frags; + skb_frag_t *frag = pinfo->frags + nr_frags; + struct page *page = virt_to_head_page(skb->head); + unsigned int first_size = headlen - offset; + unsigned int first_offset; + + if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) + goto merge; + + first_offset = skb->data - + (unsigned char *)page_address(page) + + offset; + + pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; + + __skb_frag_set_page(frag, page); + skb_frag_off_set(frag, first_offset); + skb_frag_size_set(frag, first_size); + + memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); + /* We dont need to clear skbinfo->nr_frags here */ + + new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff)); + delta_truesize = skb->truesize - new_truesize; + skb->truesize = new_truesize; + NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; + goto done; + } + +merge: + /* sk owenrship - if any - completely transferred to the aggregated packet */ + skb->destructor = NULL; + delta_truesize = skb->truesize; + if (offset > headlen) { + unsigned int eat = offset - headlen; + + skb_frag_off_add(&skbinfo->frags[0], eat); + skb_frag_size_sub(&skbinfo->frags[0], eat); + skb->data_len -= eat; + skb->len -= eat; + offset = headlen; + } + + __skb_pull(skb, offset); + + if (NAPI_GRO_CB(p)->last == p) + skb_shinfo(p)->frag_list = skb; + else + NAPI_GRO_CB(p)->last->next = skb; + NAPI_GRO_CB(p)->last = skb; + __skb_header_release(skb); + lp = p; + +done: + NAPI_GRO_CB(p)->count++; + p->data_len += len; + p->truesize += delta_truesize; + p->len += len; + if (lp != p) { + lp->data_len += len; + lp->truesize += delta_truesize; + lp->len += len; + } + NAPI_GRO_CB(skb)->same_flow = 1; + return 0; +} + + +static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) +{ + struct packet_offload *ptype; + __be16 type = skb->protocol; + struct list_head *head = &offload_base; + int err = -ENOENT; + + BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); + + if (NAPI_GRO_CB(skb)->count == 1) { + skb_shinfo(skb)->gso_size = 0; + goto out; + } + + rcu_read_lock(); + list_for_each_entry_rcu(ptype, head, list) { + if (ptype->type != type || !ptype->callbacks.gro_complete) + continue; + + err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete, + ipv6_gro_complete, inet_gro_complete, + skb, 0); + break; + } + rcu_read_unlock(); + + if (err) { + WARN_ON(&ptype->list == head); + kfree_skb(skb); + return; + } + +out: + gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count); +} + +static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, + bool flush_old) +{ + struct list_head *head = &napi->gro_hash[index].list; + struct sk_buff *skb, *p; + + list_for_each_entry_safe_reverse(skb, p, head, list) { + if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) + return; + skb_list_del_init(skb); + napi_gro_complete(napi, skb); + napi->gro_hash[index].count--; + } + + if (!napi->gro_hash[index].count) + __clear_bit(index, &napi->gro_bitmask); +} + +/* napi->gro_hash[].list contains packets ordered by age. + * youngest packets at the head of it. + * Complete skbs in reverse order to reduce latencies. + */ +void napi_gro_flush(struct napi_struct *napi, bool flush_old) +{ + unsigned long bitmask = napi->gro_bitmask; + unsigned int i, base = ~0U; + + while ((i = ffs(bitmask)) != 0) { + bitmask >>= i; + base += i; + __napi_gro_flush_chain(napi, base, flush_old); + } +} +EXPORT_SYMBOL(napi_gro_flush); + +static void gro_list_prepare(const struct list_head *head, + const struct sk_buff *skb) +{ + unsigned int maclen = skb->dev->hard_header_len; + u32 hash = skb_get_hash_raw(skb); + struct sk_buff *p; + + list_for_each_entry(p, head, list) { + unsigned long diffs; + + NAPI_GRO_CB(p)->flush = 0; + + if (hash != skb_get_hash_raw(p)) { + NAPI_GRO_CB(p)->same_flow = 0; + continue; + } + + diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; + diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb); + if (skb_vlan_tag_present(p)) + diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb); + diffs |= skb_metadata_differs(p, skb); + if (maclen == ETH_HLEN) + diffs |= compare_ether_header(skb_mac_header(p), + skb_mac_header(skb)); + else if (!diffs) + diffs = memcmp(skb_mac_header(p), + skb_mac_header(skb), + maclen); + + /* in most common scenarions 'slow_gro' is 0 + * otherwise we are already on some slower paths + * either skip all the infrequent tests altogether or + * avoid trying too hard to skip each of them individually + */ + if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) { +#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT) + struct tc_skb_ext *skb_ext; + struct tc_skb_ext *p_ext; +#endif + + diffs |= p->sk != skb->sk; + diffs |= skb_metadata_dst_cmp(p, skb); + diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb); + +#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT) + skb_ext = skb_ext_find(skb, TC_SKB_EXT); + p_ext = skb_ext_find(p, TC_SKB_EXT); + + diffs |= (!!p_ext) ^ (!!skb_ext); + if (!diffs && unlikely(skb_ext)) + diffs |= p_ext->chain ^ skb_ext->chain; +#endif + } + + NAPI_GRO_CB(p)->same_flow = !diffs; + } +} + +static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff) +{ + const struct skb_shared_info *pinfo = skb_shinfo(skb); + const skb_frag_t *frag0 = &pinfo->frags[0]; + + NAPI_GRO_CB(skb)->data_offset = 0; + NAPI_GRO_CB(skb)->frag0 = NULL; + NAPI_GRO_CB(skb)->frag0_len = 0; + + if (!skb_headlen(skb) && pinfo->nr_frags && + !PageHighMem(skb_frag_page(frag0)) && + (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) { + NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); + NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int, + skb_frag_size(frag0), + skb->end - skb->tail); + } +} + +static void gro_pull_from_frag0(struct sk_buff *skb, int grow) +{ + struct skb_shared_info *pinfo = skb_shinfo(skb); + + BUG_ON(skb->end - skb->tail < grow); + + memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); + + skb->data_len -= grow; + skb->tail += grow; + + skb_frag_off_add(&pinfo->frags[0], grow); + skb_frag_size_sub(&pinfo->frags[0], grow); + + if (unlikely(!skb_frag_size(&pinfo->frags[0]))) { + skb_frag_unref(skb, 0); + memmove(pinfo->frags, pinfo->frags + 1, + --pinfo->nr_frags * sizeof(pinfo->frags[0])); + } +} + +static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head) +{ + struct sk_buff *oldest; + + oldest = list_last_entry(head, struct sk_buff, list); + + /* We are called with head length >= MAX_GRO_SKBS, so this is + * impossible. + */ + if (WARN_ON_ONCE(!oldest)) + return; + + /* Do not adjust napi->gro_hash[].count, caller is adding a new + * SKB to the chain. + */ + skb_list_del_init(oldest); + napi_gro_complete(napi, oldest); +} + +static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) +{ + u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1); + struct gro_list *gro_list = &napi->gro_hash[bucket]; + struct list_head *head = &offload_base; + struct packet_offload *ptype; + __be16 type = skb->protocol; + struct sk_buff *pp = NULL; + enum gro_result ret; + int same_flow; + int grow; + + if (netif_elide_gro(skb->dev)) + goto normal; + + gro_list_prepare(&gro_list->list, skb); + + rcu_read_lock(); + list_for_each_entry_rcu(ptype, head, list) { + if (ptype->type != type || !ptype->callbacks.gro_receive) + continue; + + skb_set_network_header(skb, skb_gro_offset(skb)); + skb_reset_mac_len(skb); + NAPI_GRO_CB(skb)->same_flow = 0; + NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb); + NAPI_GRO_CB(skb)->free = 0; + NAPI_GRO_CB(skb)->encap_mark = 0; + NAPI_GRO_CB(skb)->recursion_counter = 0; + NAPI_GRO_CB(skb)->is_fou = 0; + NAPI_GRO_CB(skb)->is_atomic = 1; + NAPI_GRO_CB(skb)->gro_remcsum_start = 0; + + /* Setup for GRO checksum validation */ + switch (skb->ip_summed) { + case CHECKSUM_COMPLETE: + NAPI_GRO_CB(skb)->csum = skb->csum; + NAPI_GRO_CB(skb)->csum_valid = 1; + NAPI_GRO_CB(skb)->csum_cnt = 0; + break; + case CHECKSUM_UNNECESSARY: + NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1; + NAPI_GRO_CB(skb)->csum_valid = 0; + break; + default: + NAPI_GRO_CB(skb)->csum_cnt = 0; + NAPI_GRO_CB(skb)->csum_valid = 0; + } + + pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive, + ipv6_gro_receive, inet_gro_receive, + &gro_list->list, skb); + break; + } + rcu_read_unlock(); + + if (&ptype->list == head) + goto normal; + + if (PTR_ERR(pp) == -EINPROGRESS) { + ret = GRO_CONSUMED; + goto ok; + } + + same_flow = NAPI_GRO_CB(skb)->same_flow; + ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; + + if (pp) { + skb_list_del_init(pp); + napi_gro_complete(napi, pp); + gro_list->count--; + } + + if (same_flow) + goto ok; + + if (NAPI_GRO_CB(skb)->flush) + goto normal; + + if (unlikely(gro_list->count >= MAX_GRO_SKBS)) + gro_flush_oldest(napi, &gro_list->list); + else + gro_list->count++; + + NAPI_GRO_CB(skb)->count = 1; + NAPI_GRO_CB(skb)->age = jiffies; + NAPI_GRO_CB(skb)->last = skb; + skb_shinfo(skb)->gso_size = skb_gro_len(skb); + list_add(&skb->list, &gro_list->list); + ret = GRO_HELD; + +pull: + grow = skb_gro_offset(skb) - skb_headlen(skb); + if (grow > 0) + gro_pull_from_frag0(skb, grow); +ok: + if (gro_list->count) { + if (!test_bit(bucket, &napi->gro_bitmask)) + __set_bit(bucket, &napi->gro_bitmask); + } else if (test_bit(bucket, &napi->gro_bitmask)) { + __clear_bit(bucket, &napi->gro_bitmask); + } + + return ret; + +normal: + ret = GRO_NORMAL; + goto pull; +} + +struct packet_offload *gro_find_receive_by_type(__be16 type) +{ + struct list_head *offload_head = &offload_base; + struct packet_offload *ptype; + + list_for_each_entry_rcu(ptype, offload_head, list) { + if (ptype->type != type || !ptype->callbacks.gro_receive) + continue; + return ptype; + } + return NULL; +} +EXPORT_SYMBOL(gro_find_receive_by_type); + +struct packet_offload *gro_find_complete_by_type(__be16 type) +{ + struct list_head *offload_head = &offload_base; + struct packet_offload *ptype; + + list_for_each_entry_rcu(ptype, offload_head, list) { + if (ptype->type != type || !ptype->callbacks.gro_complete) + continue; + return ptype; + } + return NULL; +} +EXPORT_SYMBOL(gro_find_complete_by_type); + +static gro_result_t napi_skb_finish(struct napi_struct *napi, + struct sk_buff *skb, + gro_result_t ret) +{ + switch (ret) { + case GRO_NORMAL: + gro_normal_one(napi, skb, 1); + break; + + case GRO_MERGED_FREE: + if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) + napi_skb_free_stolen_head(skb); + else if (skb->fclone != SKB_FCLONE_UNAVAILABLE) + __kfree_skb(skb); + else + __kfree_skb_defer(skb); + break; + + case GRO_HELD: + case GRO_MERGED: + case GRO_CONSUMED: + break; + } + + return ret; +} + +gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) +{ + gro_result_t ret; + + skb_mark_napi_id(skb, napi); + trace_napi_gro_receive_entry(skb); + + skb_gro_reset_offset(skb, 0); + + ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb)); + trace_napi_gro_receive_exit(ret); + + return ret; +} +EXPORT_SYMBOL(napi_gro_receive); + +static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) +{ + if (unlikely(skb->pfmemalloc)) { + consume_skb(skb); + return; + } + __skb_pull(skb, skb_headlen(skb)); + /* restore the reserve we had after netdev_alloc_skb_ip_align() */ + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); + __vlan_hwaccel_clear_tag(skb); + skb->dev = napi->dev; + skb->skb_iif = 0; + + /* eth_type_trans() assumes pkt_type is PACKET_HOST */ + skb->pkt_type = PACKET_HOST; + + skb->encapsulation = 0; + skb_shinfo(skb)->gso_type = 0; + skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); + if (unlikely(skb->slow_gro)) { + skb_orphan(skb); + skb_ext_reset(skb); + nf_reset_ct(skb); + skb->slow_gro = 0; + } + + napi->skb = skb; +} + +struct sk_buff *napi_get_frags(struct napi_struct *napi) +{ + struct sk_buff *skb = napi->skb; + + if (!skb) { + skb = napi_alloc_skb(napi, GRO_MAX_HEAD); + if (skb) { + napi->skb = skb; + skb_mark_napi_id(skb, napi); + } + } + return skb; +} +EXPORT_SYMBOL(napi_get_frags); + +static gro_result_t napi_frags_finish(struct napi_struct *napi, + struct sk_buff *skb, + gro_result_t ret) +{ + switch (ret) { + case GRO_NORMAL: + case GRO_HELD: + __skb_push(skb, ETH_HLEN); + skb->protocol = eth_type_trans(skb, skb->dev); + if (ret == GRO_NORMAL) + gro_normal_one(napi, skb, 1); + break; + + case GRO_MERGED_FREE: + if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) + napi_skb_free_stolen_head(skb); + else + napi_reuse_skb(napi, skb); + break; + + case GRO_MERGED: + case GRO_CONSUMED: + break; + } + + return ret; +} + +/* Upper GRO stack assumes network header starts at gro_offset=0 + * Drivers could call both napi_gro_frags() and napi_gro_receive() + * We copy ethernet header into skb->data to have a common layout. + */ +static struct sk_buff *napi_frags_skb(struct napi_struct *napi) +{ + struct sk_buff *skb = napi->skb; + const struct ethhdr *eth; + unsigned int hlen = sizeof(*eth); + + napi->skb = NULL; + + skb_reset_mac_header(skb); + skb_gro_reset_offset(skb, hlen); + + if (unlikely(skb_gro_header_hard(skb, hlen))) { + eth = skb_gro_header_slow(skb, hlen, 0); + if (unlikely(!eth)) { + net_warn_ratelimited("%s: dropping impossible skb from %s\n", + __func__, napi->dev->name); + napi_reuse_skb(napi, skb); + return NULL; + } + } else { + eth = (const struct ethhdr *)skb->data; + gro_pull_from_frag0(skb, hlen); + NAPI_GRO_CB(skb)->frag0 += hlen; + NAPI_GRO_CB(skb)->frag0_len -= hlen; + } + __skb_pull(skb, hlen); + + /* + * This works because the only protocols we care about don't require + * special handling. + * We'll fix it up properly in napi_frags_finish() + */ + skb->protocol = eth->h_proto; + + return skb; +} + +gro_result_t napi_gro_frags(struct napi_struct *napi) +{ + gro_result_t ret; + struct sk_buff *skb = napi_frags_skb(napi); + + trace_napi_gro_frags_entry(skb); + + ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb)); + trace_napi_gro_frags_exit(ret); + + return ret; +} +EXPORT_SYMBOL(napi_gro_frags); + +/* Compute the checksum from gro_offset and return the folded value + * after adding in any pseudo checksum. + */ +__sum16 __skb_gro_checksum_complete(struct sk_buff *skb) +{ + __wsum wsum; + __sum16 sum; + + wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0); + + /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */ + sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum)); + /* See comments in __skb_checksum_complete(). */ + if (likely(!sum)) { + if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && + !skb->csum_complete_sw) + netdev_rx_csum_fault(skb->dev, skb); + } + + NAPI_GRO_CB(skb)->csum = wsum; + NAPI_GRO_CB(skb)->csum_valid = 1; + + return sum; +} +EXPORT_SYMBOL(__skb_gro_checksum_complete); diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 9c01c642cf9e..affe34d71d31 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -488,14 +488,6 @@ static ssize_t proto_down_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { - struct net_device *netdev = to_net_dev(dev); - - /* The check is also done in change_proto_down; this helps returning - * early without hitting the trylock/restart in netdev_store. - */ - if (!netdev->netdev_ops->ndo_change_proto_down) - return -EOPNOTSUPP; - return netdev_store(dev, attr, buf, len, change_proto_down); } NETDEVICE_SHOW_RW(proto_down, fmt_dec); @@ -1201,11 +1193,7 @@ static const struct sysfs_ops netdev_queue_sysfs_ops = { static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf) { - unsigned long trans_timeout; - - spin_lock_irq(&queue->_xmit_lock); - trans_timeout = queue->trans_timeout; - spin_unlock_irq(&queue->_xmit_lock); + unsigned long trans_timeout = atomic_long_read(&queue->trans_timeout); return sprintf(buf, fmt_ulong, trans_timeout); } @@ -1452,7 +1440,7 @@ static ssize_t xps_queue_show(struct net_device *dev, unsigned int index, for (i = map->len; i--;) { if (map->queues[i] == index) { - set_bit(j, mask); + __set_bit(j, mask); break; } } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 2af8aeeadadf..6f25c0a8aebe 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2539,13 +2539,12 @@ static int do_set_proto_down(struct net_device *dev, struct netlink_ext_ack *extack) { struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1]; - const struct net_device_ops *ops = dev->netdev_ops; unsigned long mask = 0; u32 value; bool proto_down; int err; - if (!ops->ndo_change_proto_down) { + if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) { NL_SET_ERR_MSG(extack, "Protodown not supported by device"); return -EOPNOTSUPP; } @@ -2768,7 +2767,7 @@ static int do_setlink(const struct sk_buff *skb, } if (dev->gso_max_segs ^ max_segs) { - dev->gso_max_segs = max_segs; + netif_set_gso_max_segs(dev, max_segs); status |= DO_SETLINK_MODIFIED; } } @@ -3222,7 +3221,7 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname, if (tb[IFLA_GSO_MAX_SIZE]) netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE])); if (tb[IFLA_GSO_MAX_SEGS]) - dev->gso_max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]); + netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS])); return dev; } diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c index b5bc680d4755..9b8443774449 100644 --- a/net/core/secure_seq.c +++ b/net/core/secure_seq.c @@ -19,8 +19,8 @@ #include <linux/in6.h> #include <net/tcp.h> -static siphash_key_t net_secret __read_mostly; -static siphash_key_t ts_secret __read_mostly; +static siphash_aligned_key_t net_secret; +static siphash_aligned_key_t ts_secret; static __always_inline void net_secret_init(void) { diff --git a/net/core/skbuff.c b/net/core/skbuff.c index ba2f38246f07..a33247fdb8f5 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -992,12 +992,10 @@ void napi_consume_skb(struct sk_buff *skb, int budget) } EXPORT_SYMBOL(napi_consume_skb); -/* Make sure a field is enclosed inside headers_start/headers_end section */ +/* Make sure a field is contained by headers group */ #define CHECK_SKB_FIELD(field) \ - BUILD_BUG_ON(offsetof(struct sk_buff, field) < \ - offsetof(struct sk_buff, headers_start)); \ - BUILD_BUG_ON(offsetof(struct sk_buff, field) > \ - offsetof(struct sk_buff, headers_end)); \ + BUILD_BUG_ON(offsetof(struct sk_buff, field) != \ + offsetof(struct sk_buff, headers.field)); \ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) { @@ -1009,14 +1007,12 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) __skb_ext_copy(new, old); __nf_copy(new, old, false); - /* Note : this field could be in headers_start/headers_end section + /* Note : this field could be in the headers group. * It is not yet because we do not want to have a 16 bit hole */ new->queue_mapping = old->queue_mapping; - memcpy(&new->headers_start, &old->headers_start, - offsetof(struct sk_buff, headers_end) - - offsetof(struct sk_buff, headers_start)); + memcpy(&new->headers, &old->headers, sizeof(new->headers)); CHECK_SKB_FIELD(protocol); CHECK_SKB_FIELD(csum); CHECK_SKB_FIELD(hash); @@ -3919,32 +3915,6 @@ err_linearize: } EXPORT_SYMBOL_GPL(skb_segment_list); -int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb) -{ - if (unlikely(p->len + skb->len >= 65536)) - return -E2BIG; - - if (NAPI_GRO_CB(p)->last == p) - skb_shinfo(p)->frag_list = skb; - else - NAPI_GRO_CB(p)->last->next = skb; - - skb_pull(skb, skb_gro_offset(skb)); - - NAPI_GRO_CB(p)->last = skb; - NAPI_GRO_CB(p)->count++; - p->data_len += skb->len; - - /* sk owenrship - if any - completely transferred to the aggregated packet */ - skb->destructor = NULL; - p->truesize += skb->truesize; - p->len += skb->len; - - NAPI_GRO_CB(skb)->same_flow = 1; - - return 0; -} - /** * skb_segment - Perform protocol segmentation on skb. * @head_skb: buffer to segment @@ -4297,122 +4267,6 @@ err: } EXPORT_SYMBOL_GPL(skb_segment); -int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) -{ - struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); - unsigned int offset = skb_gro_offset(skb); - unsigned int headlen = skb_headlen(skb); - unsigned int len = skb_gro_len(skb); - unsigned int delta_truesize; - unsigned int new_truesize; - struct sk_buff *lp; - - if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush)) - return -E2BIG; - - lp = NAPI_GRO_CB(p)->last; - pinfo = skb_shinfo(lp); - - if (headlen <= offset) { - skb_frag_t *frag; - skb_frag_t *frag2; - int i = skbinfo->nr_frags; - int nr_frags = pinfo->nr_frags + i; - - if (nr_frags > MAX_SKB_FRAGS) - goto merge; - - offset -= headlen; - pinfo->nr_frags = nr_frags; - skbinfo->nr_frags = 0; - - frag = pinfo->frags + nr_frags; - frag2 = skbinfo->frags + i; - do { - *--frag = *--frag2; - } while (--i); - - skb_frag_off_add(frag, offset); - skb_frag_size_sub(frag, offset); - - /* all fragments truesize : remove (head size + sk_buff) */ - new_truesize = SKB_TRUESIZE(skb_end_offset(skb)); - delta_truesize = skb->truesize - new_truesize; - - skb->truesize = new_truesize; - skb->len -= skb->data_len; - skb->data_len = 0; - - NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; - goto done; - } else if (skb->head_frag) { - int nr_frags = pinfo->nr_frags; - skb_frag_t *frag = pinfo->frags + nr_frags; - struct page *page = virt_to_head_page(skb->head); - unsigned int first_size = headlen - offset; - unsigned int first_offset; - - if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) - goto merge; - - first_offset = skb->data - - (unsigned char *)page_address(page) + - offset; - - pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; - - __skb_frag_set_page(frag, page); - skb_frag_off_set(frag, first_offset); - skb_frag_size_set(frag, first_size); - - memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); - /* We dont need to clear skbinfo->nr_frags here */ - - new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff)); - delta_truesize = skb->truesize - new_truesize; - skb->truesize = new_truesize; - NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; - goto done; - } - -merge: - /* sk owenrship - if any - completely transferred to the aggregated packet */ - skb->destructor = NULL; - delta_truesize = skb->truesize; - if (offset > headlen) { - unsigned int eat = offset - headlen; - - skb_frag_off_add(&skbinfo->frags[0], eat); - skb_frag_size_sub(&skbinfo->frags[0], eat); - skb->data_len -= eat; - skb->len -= eat; - offset = headlen; - } - - __skb_pull(skb, offset); - - if (NAPI_GRO_CB(p)->last == p) - skb_shinfo(p)->frag_list = skb; - else - NAPI_GRO_CB(p)->last->next = skb; - NAPI_GRO_CB(p)->last = skb; - __skb_header_release(skb); - lp = p; - -done: - NAPI_GRO_CB(p)->count++; - p->data_len += len; - p->truesize += delta_truesize; - p->len += len; - if (lp != p) { - lp->data_len += len; - lp->truesize += delta_truesize; - lp->len += len; - } - NAPI_GRO_CB(skb)->same_flow = 1; - return 0; -} - #ifdef CONFIG_SKB_EXTENSIONS #define SKB_EXT_ALIGN_VALUE 8 #define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE) @@ -4849,8 +4703,7 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb, serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { serr->ee.ee_data = skb_shinfo(skb)->tskey; - if (sk->sk_protocol == IPPROTO_TCP && - sk->sk_type == SOCK_STREAM) + if (sk_is_tcp(sk)) serr->ee.ee_data -= sk->sk_tskey; } @@ -4919,8 +4772,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, if (tsonly) { #ifdef CONFIG_INET if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) && - sk->sk_protocol == IPPROTO_TCP && - sk->sk_type == SOCK_STREAM) { + sk_is_tcp(sk)) { skb = tcp_get_timestamping_opt_stats(sk, orig_skb, ack_skb); opt_stats = true; diff --git a/net/core/sock.c b/net/core/sock.c index 41e91d0f7061..4a499d255f40 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -144,8 +144,6 @@ static DEFINE_MUTEX(proto_list_mutex); static LIST_HEAD(proto_list); -static void sock_inuse_add(struct net *net, int val); - /** * sk_ns_capable - General socket capability test * @sk: Socket to use a capability on or through @@ -327,7 +325,10 @@ int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) BUG_ON(!sock_flag(sk, SOCK_MEMALLOC)); noreclaim_flag = memalloc_noreclaim_save(); - ret = sk->sk_backlog_rcv(sk, skb); + ret = INDIRECT_CALL_INET(sk->sk_backlog_rcv, + tcp_v6_do_rcv, + tcp_v4_do_rcv, + sk, skb); memalloc_noreclaim_restore(noreclaim_flag); return ret; @@ -872,8 +873,7 @@ int sock_set_timestamping(struct sock *sk, int optname, if (val & SOF_TIMESTAMPING_OPT_ID && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { - if (sk->sk_protocol == IPPROTO_TCP && - sk->sk_type == SOCK_STREAM) { + if (sk_is_tcp(sk)) { if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) return -EINVAL; @@ -1135,6 +1135,7 @@ set_sndbuf: case SO_PRIORITY: if ((val >= 0 && val <= 6) || + ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) || ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) sk->sk_priority = val; else @@ -1280,7 +1281,8 @@ set_sndbuf: clear_bit(SOCK_PASSSEC, &sock->flags); break; case SO_MARK: - if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { + if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) && + !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { ret = -EPERM; break; } @@ -1370,8 +1372,7 @@ set_sndbuf: case SO_ZEROCOPY: if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) { - if (!((sk->sk_type == SOCK_STREAM && - sk->sk_protocol == IPPROTO_TCP) || + if (!(sk_is_tcp(sk) || (sk->sk_type == SOCK_DGRAM && sk->sk_protocol == IPPROTO_UDP))) ret = -ENOTSUPP; @@ -2246,17 +2247,22 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) u32 max_segs = 1; sk_dst_set(sk, dst); - sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps; + sk->sk_route_caps = dst->dev->features; + if (sk_is_tcp(sk)) + sk->sk_route_caps |= NETIF_F_GSO; if (sk->sk_route_caps & NETIF_F_GSO) sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; - sk->sk_route_caps &= ~sk->sk_route_nocaps; + if (unlikely(sk->sk_gso_disabled)) + sk->sk_route_caps &= ~NETIF_F_GSO_MASK; if (sk_can_gso(sk)) { if (dst->header_len && !xfrm_dst_offload_ok(dst)) { sk->sk_route_caps &= ~NETIF_F_GSO_MASK; } else { sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; - sk->sk_gso_max_size = dst->dev->gso_max_size; - max_segs = max_t(u32, dst->dev->gso_max_segs, 1); + /* pairs with the WRITE_ONCE() in netif_set_gso_max_size() */ + sk->sk_gso_max_size = READ_ONCE(dst->dev->gso_max_size); + /* pairs with the WRITE_ONCE() in netif_set_gso_max_segs() */ + max_segs = max_t(u32, READ_ONCE(dst->dev->gso_max_segs), 1); } } sk->sk_gso_max_segs = max_segs; @@ -3532,19 +3538,8 @@ void sk_get_meminfo(const struct sock *sk, u32 *mem) } #ifdef CONFIG_PROC_FS -#define PROTO_INUSE_NR 64 /* should be enough for the first time */ -struct prot_inuse { - int val[PROTO_INUSE_NR]; -}; - static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); -void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) -{ - __this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val); -} -EXPORT_SYMBOL_GPL(sock_prot_inuse_add); - int sock_prot_inuse_get(struct net *net, struct proto *prot) { int cpu, idx = prot->inuse_idx; @@ -3557,17 +3552,12 @@ int sock_prot_inuse_get(struct net *net, struct proto *prot) } EXPORT_SYMBOL_GPL(sock_prot_inuse_get); -static void sock_inuse_add(struct net *net, int val) -{ - this_cpu_add(*net->core.sock_inuse, val); -} - int sock_inuse_get(struct net *net) { int cpu, res = 0; for_each_possible_cpu(cpu) - res += *per_cpu_ptr(net->core.sock_inuse, cpu); + res += per_cpu_ptr(net->core.prot_inuse, cpu)->all; return res; } @@ -3579,22 +3569,12 @@ static int __net_init sock_inuse_init_net(struct net *net) net->core.prot_inuse = alloc_percpu(struct prot_inuse); if (net->core.prot_inuse == NULL) return -ENOMEM; - - net->core.sock_inuse = alloc_percpu(int); - if (net->core.sock_inuse == NULL) - goto out; - return 0; - -out: - free_percpu(net->core.prot_inuse); - return -ENOMEM; } static void __net_exit sock_inuse_exit_net(struct net *net) { free_percpu(net->core.prot_inuse); - free_percpu(net->core.sock_inuse); } static struct pernet_operations net_inuse_ops = { @@ -3640,9 +3620,6 @@ static inline void release_proto_idx(struct proto *prot) { } -static void sock_inuse_add(struct net *net, int val) -{ -} #endif static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot) diff --git a/net/dccp/proto.c b/net/dccp/proto.c index fc44dadc778b..a976b4d29892 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -238,17 +238,6 @@ void dccp_destroy_sock(struct sock *sk) EXPORT_SYMBOL_GPL(dccp_destroy_sock); -static inline int dccp_listen_start(struct sock *sk, int backlog) -{ - struct dccp_sock *dp = dccp_sk(sk); - - dp->dccps_role = DCCP_ROLE_LISTEN; - /* do not start to listen if feature negotiation setup fails */ - if (dccp_feat_finalise_settings(dp)) - return -EPROTO; - return inet_csk_listen_start(sk, backlog); -} - static inline int dccp_need_reset(int state) { return state != DCCP_CLOSED && state != DCCP_LISTEN && @@ -931,11 +920,17 @@ int inet_dccp_listen(struct socket *sock, int backlog) * we can only allow the backlog to be adjusted. */ if (old_state != DCCP_LISTEN) { - /* - * FIXME: here it probably should be sk->sk_prot->listen_start - * see tcp_listen_start - */ - err = dccp_listen_start(sk, backlog); + struct dccp_sock *dp = dccp_sk(sk); + + dp->dccps_role = DCCP_ROLE_LISTEN; + + /* do not start to listen if feature negotiation setup fails */ + if (dccp_feat_finalise_settings(dp)) { + err = -EPROTO; + goto out; + } + + err = inet_csk_listen_start(sk); if (err) goto out; } diff --git a/net/dccp/trace.h b/net/dccp/trace.h index 5062421beee9..5a43b3508c7f 100644 --- a/net/dccp/trace.h +++ b/net/dccp/trace.h @@ -60,9 +60,7 @@ TRACE_EVENT(dccp_probe, __entry->tx_t_ipi = hc->tx_t_ipi; } else { __entry->tx_s = 0; - memset(&__entry->tx_rtt, 0, (void *)&__entry->tx_t_ipi - - (void *)&__entry->tx_rtt + - sizeof(__entry->tx_t_ipi)); + memset_startat(__entry, 0, tx_rtt); } ), diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index c7d9e08107cb..ebcc812735a4 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -436,11 +436,10 @@ struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb) type = eh->h_proto; - rcu_read_lock(); ptype = gro_find_receive_by_type(type); if (ptype == NULL) { flush = 1; - goto out_unlock; + goto out; } skb_gro_pull(skb, sizeof(*eh)); @@ -450,8 +449,6 @@ struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb) ipv6_gro_receive, inet_gro_receive, head, skb); -out_unlock: - rcu_read_unlock(); out: skb_gro_flush_final(skb, pp, flush); @@ -469,14 +466,12 @@ int eth_gro_complete(struct sk_buff *skb, int nhoff) if (skb->encapsulation) skb_set_inner_mac_header(skb, nhoff); - rcu_read_lock(); ptype = gro_find_complete_by_type(type); if (ptype != NULL) err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete, ipv6_gro_complete, inet_gro_complete, skb, nhoff + sizeof(*eh)); - rcu_read_unlock(); return err; } EXPORT_SYMBOL(eth_gro_complete); diff --git a/net/ethtool/common.c b/net/ethtool/common.c index c63e0739dc6a..0c5210015911 100644 --- a/net/ethtool/common.c +++ b/net/ethtool/common.c @@ -89,6 +89,7 @@ tunable_strings[__ETHTOOL_TUNABLE_COUNT][ETH_GSTRING_LEN] = { [ETHTOOL_RX_COPYBREAK] = "rx-copybreak", [ETHTOOL_TX_COPYBREAK] = "tx-copybreak", [ETHTOOL_PFC_PREVENTION_TOUT] = "pfc-prevention-tout", + [ETHTOOL_TX_COPYBREAK_BUF_SIZE] = "tx-copybreak-buf-size", }; const char diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c index 20bcf86970ff..06d3866c69b4 100644 --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c @@ -1743,11 +1743,13 @@ static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr) { struct ethtool_ringparam ringparam = { .cmd = ETHTOOL_GRINGPARAM }; + struct kernel_ethtool_ringparam kernel_ringparam = {}; if (!dev->ethtool_ops->get_ringparam) return -EOPNOTSUPP; - dev->ethtool_ops->get_ringparam(dev, &ringparam); + dev->ethtool_ops->get_ringparam(dev, &ringparam, + &kernel_ringparam, NULL); if (copy_to_user(useraddr, &ringparam, sizeof(ringparam))) return -EFAULT; @@ -1757,6 +1759,7 @@ static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr) static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr) { struct ethtool_ringparam ringparam, max = { .cmd = ETHTOOL_GRINGPARAM }; + struct kernel_ethtool_ringparam kernel_ringparam; int ret; if (!dev->ethtool_ops->set_ringparam || !dev->ethtool_ops->get_ringparam) @@ -1765,7 +1768,7 @@ static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr) if (copy_from_user(&ringparam, useraddr, sizeof(ringparam))) return -EFAULT; - dev->ethtool_ops->get_ringparam(dev, &max); + dev->ethtool_ops->get_ringparam(dev, &max, &kernel_ringparam, NULL); /* ensure new ring parameters are within the maximums */ if (ringparam.rx_pending > max.rx_max_pending || @@ -1774,7 +1777,8 @@ static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr) ringparam.tx_pending > max.tx_max_pending) return -EINVAL; - ret = dev->ethtool_ops->set_ringparam(dev, &ringparam); + ret = dev->ethtool_ops->set_ringparam(dev, &ringparam, + &kernel_ringparam, NULL); if (!ret) ethtool_notify(dev, ETHTOOL_MSG_RINGS_NTF, NULL); return ret; @@ -2396,6 +2400,7 @@ static int ethtool_tunable_valid(const struct ethtool_tunable *tuna) switch (tuna->id) { case ETHTOOL_RX_COPYBREAK: case ETHTOOL_TX_COPYBREAK: + case ETHTOOL_TX_COPYBREAK_BUF_SIZE: if (tuna->len != sizeof(u32) || tuna->type_id != ETHTOOL_TUNABLE_U32) return -EINVAL; diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h index 836ee7157848..490598e5eedd 100644 --- a/net/ethtool/netlink.h +++ b/net/ethtool/netlink.h @@ -356,7 +356,7 @@ extern const struct nla_policy ethnl_features_set_policy[ETHTOOL_A_FEATURES_WANT extern const struct nla_policy ethnl_privflags_get_policy[ETHTOOL_A_PRIVFLAGS_HEADER + 1]; extern const struct nla_policy ethnl_privflags_set_policy[ETHTOOL_A_PRIVFLAGS_FLAGS + 1]; extern const struct nla_policy ethnl_rings_get_policy[ETHTOOL_A_RINGS_HEADER + 1]; -extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_TX + 1]; +extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_RX_BUF_LEN + 1]; extern const struct nla_policy ethnl_channels_get_policy[ETHTOOL_A_CHANNELS_HEADER + 1]; extern const struct nla_policy ethnl_channels_set_policy[ETHTOOL_A_CHANNELS_COMBINED_COUNT + 1]; extern const struct nla_policy ethnl_coalesce_get_policy[ETHTOOL_A_COALESCE_HEADER + 1]; diff --git a/net/ethtool/rings.c b/net/ethtool/rings.c index 4e097812a967..450b8866373d 100644 --- a/net/ethtool/rings.c +++ b/net/ethtool/rings.c @@ -10,6 +10,7 @@ struct rings_req_info { struct rings_reply_data { struct ethnl_reply_data base; struct ethtool_ringparam ringparam; + struct kernel_ethtool_ringparam kernel_ringparam; }; #define RINGS_REPDATA(__reply_base) \ @@ -25,6 +26,7 @@ static int rings_prepare_data(const struct ethnl_req_info *req_base, struct genl_info *info) { struct rings_reply_data *data = RINGS_REPDATA(reply_base); + struct netlink_ext_ack *extack = info ? info->extack : NULL; struct net_device *dev = reply_base->dev; int ret; @@ -33,7 +35,8 @@ static int rings_prepare_data(const struct ethnl_req_info *req_base, ret = ethnl_ops_begin(dev); if (ret < 0) return ret; - dev->ethtool_ops->get_ringparam(dev, &data->ringparam); + dev->ethtool_ops->get_ringparam(dev, &data->ringparam, + &data->kernel_ringparam, extack); ethnl_ops_complete(dev); return 0; @@ -49,7 +52,8 @@ static int rings_reply_size(const struct ethnl_req_info *req_base, nla_total_size(sizeof(u32)) + /* _RINGS_RX */ nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI */ nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO */ - nla_total_size(sizeof(u32)); /* _RINGS_TX */ + nla_total_size(sizeof(u32)) + /* _RINGS_TX */ + nla_total_size(sizeof(u32)); /* _RINGS_RX_BUF_LEN */ } static int rings_fill_reply(struct sk_buff *skb, @@ -57,6 +61,7 @@ static int rings_fill_reply(struct sk_buff *skb, const struct ethnl_reply_data *reply_base) { const struct rings_reply_data *data = RINGS_REPDATA(reply_base); + const struct kernel_ethtool_ringparam *kernel_ringparam = &data->kernel_ringparam; const struct ethtool_ringparam *ringparam = &data->ringparam; if ((ringparam->rx_max_pending && @@ -78,7 +83,10 @@ static int rings_fill_reply(struct sk_buff *skb, (nla_put_u32(skb, ETHTOOL_A_RINGS_TX_MAX, ringparam->tx_max_pending) || nla_put_u32(skb, ETHTOOL_A_RINGS_TX, - ringparam->tx_pending)))) + ringparam->tx_pending))) || + (kernel_ringparam->rx_buf_len && + (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_BUF_LEN, + kernel_ringparam->rx_buf_len)))) return -EMSGSIZE; return 0; @@ -105,10 +113,12 @@ const struct nla_policy ethnl_rings_set_policy[] = { [ETHTOOL_A_RINGS_RX_MINI] = { .type = NLA_U32 }, [ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_U32 }, [ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 }, + [ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1), }; int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) { + struct kernel_ethtool_ringparam kernel_ringparam = {}; struct ethtool_ringparam ringparam = {}; struct ethnl_req_info req_info = {}; struct nlattr **tb = info->attrs; @@ -134,7 +144,7 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) ret = ethnl_ops_begin(dev); if (ret < 0) goto out_rtnl; - ops->get_ringparam(dev, &ringparam); + ops->get_ringparam(dev, &ringparam, &kernel_ringparam, info->extack); ethnl_update_u32(&ringparam.rx_pending, tb[ETHTOOL_A_RINGS_RX], &mod); ethnl_update_u32(&ringparam.rx_mini_pending, @@ -142,6 +152,8 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) ethnl_update_u32(&ringparam.rx_jumbo_pending, tb[ETHTOOL_A_RINGS_RX_JUMBO], &mod); ethnl_update_u32(&ringparam.tx_pending, tb[ETHTOOL_A_RINGS_TX], &mod); + ethnl_update_u32(&kernel_ringparam.rx_buf_len, + tb[ETHTOOL_A_RINGS_RX_BUF_LEN], &mod); ret = 0; if (!mod) goto out_ops; @@ -164,7 +176,17 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) goto out_ops; } - ret = dev->ethtool_ops->set_ringparam(dev, &ringparam); + if (kernel_ringparam.rx_buf_len != 0 && + !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_BUF_LEN)) { + ret = -EOPNOTSUPP; + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_RINGS_RX_BUF_LEN], + "setting rx buf len not supported"); + goto out_ops; + } + + ret = dev->ethtool_ops->set_ringparam(dev, &ringparam, + &kernel_ringparam, info->extack); if (ret < 0) goto out_ops; ethtool_notify(dev, ETHTOOL_MSG_RINGS_NTF, NULL); diff --git a/net/ethtool/stats.c b/net/ethtool/stats.c index ec07f5765e03..a20e0a24ff61 100644 --- a/net/ethtool/stats.c +++ b/net/ethtool/stats.c @@ -14,10 +14,12 @@ struct stats_req_info { struct stats_reply_data { struct ethnl_reply_data base; - struct ethtool_eth_phy_stats phy_stats; - struct ethtool_eth_mac_stats mac_stats; - struct ethtool_eth_ctrl_stats ctrl_stats; - struct ethtool_rmon_stats rmon_stats; + struct_group(stats, + struct ethtool_eth_phy_stats phy_stats; + struct ethtool_eth_mac_stats mac_stats; + struct ethtool_eth_ctrl_stats ctrl_stats; + struct ethtool_rmon_stats rmon_stats; + ); const struct ethtool_rmon_hist_range *rmon_ranges; }; @@ -117,10 +119,7 @@ static int stats_prepare_data(const struct ethnl_req_info *req_base, /* Mark all stats as unset (see ETHTOOL_STAT_NOT_SET) to prevent them * from being reported to user space in case driver did not set them. */ - memset(&data->phy_stats, 0xff, sizeof(data->phy_stats)); - memset(&data->mac_stats, 0xff, sizeof(data->mac_stats)); - memset(&data->ctrl_stats, 0xff, sizeof(data->ctrl_stats)); - memset(&data->rmon_stats, 0xff, sizeof(data->rmon_stats)); + memset(&data->stats, 0xff, sizeof(data->stats)); if (test_bit(ETHTOOL_STATS_ETH_PHY, req_info->stat_mask) && dev->ethtool_ops->get_eth_phy_stats) diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index 7bb9ef35c570..3b2366a88c3c 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -174,8 +174,8 @@ static int raw_hash(struct sock *sk) { write_lock_bh(&raw_lock); sk_add_node(sk, &raw_head); - sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); write_unlock_bh(&raw_lock); + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); return 0; } @@ -453,8 +453,8 @@ static int dgram_hash(struct sock *sk) { write_lock_bh(&dgram_lock); sk_add_node(sk, &dgram_head); - sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); write_unlock_bh(&dgram_lock); + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); return 0; } diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 0189e3cd4a7d..04067b249bf3 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -99,6 +99,7 @@ #include <net/route.h> #include <net/ip_fib.h> #include <net/inet_connection_sock.h> +#include <net/gro.h> #include <net/tcp.h> #include <net/udp.h> #include <net/udplite.h> @@ -224,7 +225,7 @@ int inet_listen(struct socket *sock, int backlog) tcp_fastopen_init_key_once(sock_net(sk)); } - err = inet_csk_listen_start(sk, backlog); + err = inet_csk_listen_start(sk); if (err) goto out; tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_LISTEN_CB, 0, NULL); @@ -488,11 +489,8 @@ int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, * is temporarily down) */ err = -EADDRNOTAVAIL; - if (!inet_can_nonlocal_bind(net, inet) && - addr->sin_addr.s_addr != htonl(INADDR_ANY) && - chk_addr_ret != RTN_LOCAL && - chk_addr_ret != RTN_MULTICAST && - chk_addr_ret != RTN_BROADCAST) + if (!inet_addr_valid_or_nonlocal(net, inet, addr->sin_addr.s_addr, + chk_addr_ret)) goto out; snum = ntohs(addr->sin_port); @@ -1454,19 +1452,18 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb) proto = iph->protocol; - rcu_read_lock(); ops = rcu_dereference(inet_offloads[proto]); if (!ops || !ops->callbacks.gro_receive) - goto out_unlock; + goto out; if (*(u8 *)iph != 0x45) - goto out_unlock; + goto out; if (ip_is_fragment(iph)) - goto out_unlock; + goto out; if (unlikely(ip_fast_csum((u8 *)iph, 5))) - goto out_unlock; + goto out; id = ntohl(*(__be32 *)&iph->id); flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF)); @@ -1543,9 +1540,6 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb) pp = indirect_call_gro_receive(tcp4_gro_receive, udp4_gro_receive, ops->callbacks.gro_receive, head, skb); -out_unlock: - rcu_read_unlock(); - out: skb_gro_flush_final(skb, pp, flush); @@ -1618,10 +1612,9 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff) csum_replace2(&iph->check, iph->tot_len, newlen); iph->tot_len = newlen; - rcu_read_lock(); ops = rcu_dereference(inet_offloads[proto]); if (WARN_ON(!ops || !ops->callbacks.gro_complete)) - goto out_unlock; + goto out; /* Only need to add sizeof(*iph) to get to the next hdr below * because any hdr with option will have been flushed in @@ -1631,9 +1624,7 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff) tcp4_gro_complete, udp4_gro_complete, skb, nhoff + sizeof(*iph)); -out_unlock: - rcu_read_unlock(); - +out: return err; } diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 857a144b1ea9..4db0325f6e1a 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -1299,21 +1299,6 @@ static struct packet_type arp_packet_type __read_mostly = { .func = arp_rcv, }; -static int arp_proc_init(void); - -void __init arp_init(void) -{ - neigh_table_init(NEIGH_ARP_TABLE, &arp_tbl); - - dev_add_pack(&arp_packet_type); - arp_proc_init(); -#ifdef CONFIG_SYSCTL - neigh_sysctl_register(NULL, &arp_tbl.parms, NULL); -#endif - register_netdevice_notifier(&arp_netdev_notifier); -} - -#ifdef CONFIG_PROC_FS #if IS_ENABLED(CONFIG_AX25) /* ------------------------------------------------------------------------ */ @@ -1451,16 +1436,14 @@ static struct pernet_operations arp_net_ops = { .exit = arp_net_exit, }; -static int __init arp_proc_init(void) +void __init arp_init(void) { - return register_pernet_subsys(&arp_net_ops); -} - -#else /* CONFIG_PROC_FS */ + neigh_table_init(NEIGH_ARP_TABLE, &arp_tbl); -static int __init arp_proc_init(void) -{ - return 0; + dev_add_pack(&arp_packet_type); + register_pernet_subsys(&arp_net_ops); +#ifdef CONFIG_SYSCTL + neigh_sysctl_register(NULL, &arp_tbl.parms, NULL); +#endif + register_netdevice_notifier(&arp_netdev_notifier); } - -#endif /* CONFIG_PROC_FS */ diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index 8e4e9aa12130..d87f02a6e934 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c @@ -16,6 +16,7 @@ #include <crypto/authenc.h> #include <linux/err.h> #include <linux/module.h> +#include <net/gro.h> #include <net/ip.h> #include <net/xfrm.h> #include <net/esp.h> diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 8fcbc6258ec5..0d085cc8d96c 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -9,6 +9,7 @@ #include <linux/types.h> #include <linux/kernel.h> #include <net/genetlink.h> +#include <net/gro.h> #include <net/gue.h> #include <net/fou.h> #include <net/ip.h> @@ -246,17 +247,14 @@ static struct sk_buff *fou_gro_receive(struct sock *sk, /* Flag this frame as already having an outer encap header */ NAPI_GRO_CB(skb)->is_fou = 1; - rcu_read_lock(); offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; ops = rcu_dereference(offloads[proto]); if (!ops || !ops->callbacks.gro_receive) - goto out_unlock; + goto out; pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); -out_unlock: - rcu_read_unlock(); - +out: return pp; } @@ -268,19 +266,16 @@ static int fou_gro_complete(struct sock *sk, struct sk_buff *skb, const struct net_offload *ops; int err = -ENOSYS; - rcu_read_lock(); offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; ops = rcu_dereference(offloads[proto]); if (WARN_ON(!ops || !ops->callbacks.gro_complete)) - goto out_unlock; + goto out; err = ops->callbacks.gro_complete(skb, nhoff); skb_set_inner_mac_header(skb, nhoff); -out_unlock: - rcu_read_unlock(); - +out: return err; } @@ -438,17 +433,14 @@ next_proto: /* Flag this frame as already having an outer encap header */ NAPI_GRO_CB(skb)->is_fou = 1; - rcu_read_lock(); offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; ops = rcu_dereference(offloads[proto]); if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive)) - goto out_unlock; + goto out; pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); flush = 0; -out_unlock: - rcu_read_unlock(); out: skb_gro_flush_final_remcsum(skb, pp, flush, &grc); @@ -485,18 +477,16 @@ static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) return err; } - rcu_read_lock(); offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; ops = rcu_dereference(offloads[proto]); if (WARN_ON(!ops || !ops->callbacks.gro_complete)) - goto out_unlock; + goto out; err = ops->callbacks.gro_complete(skb, nhoff + guehlen); skb_set_inner_mac_header(skb, nhoff + guehlen); -out_unlock: - rcu_read_unlock(); +out: return err; } diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 1121a9d5fed9..07073fa35205 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -10,6 +10,7 @@ #include <linux/init.h> #include <net/protocol.h> #include <net/gre.h> +#include <net/gro.h> static struct sk_buff *gre_gso_segment(struct sk_buff *skb, netdev_features_t features) @@ -162,10 +163,9 @@ static struct sk_buff *gre_gro_receive(struct list_head *head, type = greh->protocol; - rcu_read_lock(); ptype = gro_find_receive_by_type(type); if (!ptype) - goto out_unlock; + goto out; grehlen = GRE_HEADER_SECTION; @@ -179,13 +179,13 @@ static struct sk_buff *gre_gro_receive(struct list_head *head, if (skb_gro_header_hard(skb, hlen)) { greh = skb_gro_header_slow(skb, hlen, off); if (unlikely(!greh)) - goto out_unlock; + goto out; } /* Don't bother verifying checksum if we're going to flush anyway. */ if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) { if (skb_gro_checksum_simple_validate(skb)) - goto out_unlock; + goto out; skb_gro_checksum_try_convert(skb, IPPROTO_GRE, null_compute_pseudo); @@ -229,8 +229,6 @@ static struct sk_buff *gre_gro_receive(struct list_head *head, pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); flush = 0; -out_unlock: - rcu_read_unlock(); out: skb_gro_flush_final(skb, pp, flush); @@ -255,13 +253,10 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff) if (greh->flags & GRE_CSUM) grehlen += GRE_HEADER_SECTION; - rcu_read_lock(); ptype = gro_find_complete_by_type(type); if (ptype) err = ptype->callbacks.gro_complete(skb, nhoff + grehlen); - rcu_read_unlock(); - skb_set_inner_mac_header(skb, nhoff + grehlen); return err; diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index d2e2b3d18c66..2ad3c7b42d6d 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -2558,7 +2558,6 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, msf->imsf_fmode = pmc->sfmode; psl = rtnl_dereference(pmc->sflist); if (!psl) { - len = 0; count = 0; } else { count = psl->sl_count; diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index f7fea3a7c5e6..23da67a3fc06 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -1035,7 +1035,7 @@ void inet_csk_prepare_forced_close(struct sock *sk) } EXPORT_SYMBOL(inet_csk_prepare_forced_close); -int inet_csk_listen_start(struct sock *sk, int backlog) +int inet_csk_listen_start(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); struct inet_sock *inet = inet_sk(sk); diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 75737267746f..30ab717ff1b8 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -307,7 +307,7 @@ static inline struct sock *inet_lookup_run_bpf(struct net *net, struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, __be32 saddr, __be16 sport, - __be32 daddr, u16 hnum) + __be32 daddr, u16 hnum, const int dif) { struct sock *sk, *reuse_sk; bool no_reuseport; @@ -315,8 +315,8 @@ static inline struct sock *inet_lookup_run_bpf(struct net *net, if (hashinfo != &tcp_hashinfo) return NULL; /* only TCP is supported */ - no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_TCP, - saddr, sport, daddr, hnum, &sk); + no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_TCP, saddr, sport, + daddr, hnum, dif, &sk); if (no_reuseport || IS_ERR_OR_NULL(sk)) return sk; @@ -340,7 +340,7 @@ struct sock *__inet_lookup_listener(struct net *net, /* Lookup redirect from BPF */ if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { result = inet_lookup_run_bpf(net, hashinfo, skb, doff, - saddr, sport, daddr, hnum); + saddr, sport, daddr, hnum, dif); if (result) goto done; } diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 9bca57ef8b83..57c1d8431386 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -672,7 +672,6 @@ struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state) struct sk_buff *skb2; struct iphdr *iph; - len = state->left; /* IF: it doesn't fit, use 'mtu' - the data space left */ if (len > state->mtu) len = state->mtu; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 38d29b175ca6..445a9ecaefa1 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -576,7 +576,7 @@ out: return err; } -static void __ip_sock_set_tos(struct sock *sk, int val) +void __ip_sock_set_tos(struct sock *sk, int val) { if (sk->sk_type == SOCK_STREAM) { val &= ~INET_ECN_MASK; diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 1e44a43acfe2..e540b0dcf085 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -311,15 +311,11 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n", sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port)); - if (addr->sin_addr.s_addr == htonl(INADDR_ANY)) - chk_addr_ret = RTN_LOCAL; - else - chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr); - - if ((!inet_can_nonlocal_bind(net, isk) && - chk_addr_ret != RTN_LOCAL) || - chk_addr_ret == RTN_MULTICAST || - chk_addr_ret == RTN_BROADCAST) + chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr); + + if (!inet_addr_valid_or_nonlocal(net, inet_sk(sk), + addr->sin_addr.s_addr, + chk_addr_ret)) return -EADDRNOTAVAIL; #if IS_ENABLED(CONFIG_IPV6) diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index bb446e60cf58..a53f256bf9d3 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -99,8 +99,8 @@ int raw_hash_sk(struct sock *sk) write_lock_bh(&h->lock); sk_add_node(sk, head); - sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); write_unlock_bh(&h->lock); + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); return 0; } @@ -717,6 +717,7 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct inet_sock *inet = inet_sk(sk); struct sockaddr_in *addr = (struct sockaddr_in *) uaddr; + struct net *net = sock_net(sk); u32 tb_id = RT_TABLE_LOCAL; int ret = -EINVAL; int chk_addr_ret; @@ -725,16 +726,16 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) goto out; if (sk->sk_bound_dev_if) - tb_id = l3mdev_fib_table_by_index(sock_net(sk), - sk->sk_bound_dev_if) ? : tb_id; + tb_id = l3mdev_fib_table_by_index(net, + sk->sk_bound_dev_if) ? : tb_id; - chk_addr_ret = inet_addr_type_table(sock_net(sk), addr->sin_addr.s_addr, - tb_id); + chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id); ret = -EADDRNOTAVAIL; - if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL && - chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) + if (!inet_addr_valid_or_nonlocal(net, inet, addr->sin_addr.s_addr, + chk_addr_ret)) goto out; + inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr; if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) inet->inet_saddr = 0; /* Use device */ diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 0b4103b1e622..243a0c52be42 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -602,7 +602,7 @@ static void fnhe_remove_oldest(struct fnhe_hash_bucket *hash) static u32 fnhe_hashfun(__be32 daddr) { - static siphash_key_t fnhe_hash_key __read_mostly; + static siphash_aligned_key_t fnhe_hash_key; u64 hval; net_get_random_once(&fnhe_hash_key, sizeof(fnhe_hash_key)); diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 8696dc343ad2..2cb3b852d148 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -14,7 +14,7 @@ #include <net/tcp.h> #include <net/route.h> -static siphash_key_t syncookie_secret[2] __read_mostly; +static siphash_aligned_key_t syncookie_secret[2]; #define COOKIEBITS 24 /* Upper bits store count */ #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index bbb3d39c69af..6ab82e1a1d41 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -292,7 +292,7 @@ EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count); long sysctl_tcp_mem[3] __read_mostly; EXPORT_SYMBOL(sysctl_tcp_mem); -atomic_long_t tcp_memory_allocated; /* Current allocated memory. */ +atomic_long_t tcp_memory_allocated ____cacheline_aligned_in_smp; /* Current allocated memory. */ EXPORT_SYMBOL(tcp_memory_allocated); #if IS_ENABLED(CONFIG_SMC) @@ -303,7 +303,7 @@ EXPORT_SYMBOL(tcp_have_smc); /* * Current number of TCP sockets. */ -struct percpu_counter tcp_sockets_allocated; +struct percpu_counter tcp_sockets_allocated ____cacheline_aligned_in_smp; EXPORT_SYMBOL(tcp_sockets_allocated); /* @@ -456,7 +456,6 @@ void tcp_init_sock(struct sock *sk) WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]); sk_sockets_allocated_inc(sk); - sk->sk_route_forced_caps = NETIF_F_GSO; } EXPORT_SYMBOL(tcp_init_sock); @@ -546,10 +545,11 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) if (state != TCP_SYN_SENT && (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) { int target = sock_rcvlowat(sk, 0, INT_MAX); + u16 urg_data = READ_ONCE(tp->urg_data); - if (READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) && - !sock_flag(sk, SOCK_URGINLINE) && - tp->urg_data) + if (unlikely(urg_data) && + READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) && + !sock_flag(sk, SOCK_URGINLINE)) target++; if (tcp_stream_is_readable(sk, target)) @@ -574,7 +574,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) } else mask |= EPOLLOUT | EPOLLWRNORM; - if (tp->urg_data & TCP_URG_VALID) + if (urg_data & TCP_URG_VALID) mask |= EPOLLPRI; } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) { /* Active TCP fastopen socket with defer_connect @@ -608,7 +608,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) unlock_sock_fast(sk, slow); break; case SIOCATMARK: - answ = tp->urg_data && + answ = READ_ONCE(tp->urg_data) && READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq); break; case SIOCOUTQ: @@ -1466,7 +1466,7 @@ static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) char c = tp->urg_data; if (!(flags & MSG_PEEK)) - tp->urg_data = TCP_URG_READ; + WRITE_ONCE(tp->urg_data, TCP_URG_READ); /* Read urgent data. */ msg->msg_flags |= MSG_OOB; @@ -1580,6 +1580,36 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied) tcp_send_ack(sk); } +void __sk_defer_free_flush(struct sock *sk) +{ + struct llist_node *head; + struct sk_buff *skb, *n; + + head = llist_del_all(&sk->defer_list); + llist_for_each_entry_safe(skb, n, head, ll_node) { + prefetch(n); + skb_mark_not_on_list(skb); + __kfree_skb(skb); + } +} +EXPORT_SYMBOL(__sk_defer_free_flush); + +static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb) +{ + __skb_unlink(skb, &sk->sk_receive_queue); + if (likely(skb->destructor == sock_rfree)) { + sock_rfree(skb); + skb->destructor = NULL; + skb->sk = NULL; + if (!skb_queue_empty(&sk->sk_receive_queue) || + !llist_empty(&sk->defer_list)) { + llist_add(&skb->ll_node, &sk->defer_list); + return; + } + } + __kfree_skb(skb); +} + static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) { struct sk_buff *skb; @@ -1599,7 +1629,7 @@ static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) * splitted a fat GRO packet, while we released socket lock * in skb_splice_bits() */ - sk_eat_skb(sk, skb); + tcp_eat_recv_skb(sk, skb); } return NULL; } @@ -1633,7 +1663,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, len = skb->len - offset; /* Stop reading if we hit a patch of urgent data */ - if (tp->urg_data) { + if (unlikely(tp->urg_data)) { u32 urg_offset = tp->urg_seq - seq; if (urg_offset < len) len = urg_offset; @@ -1665,11 +1695,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, continue; } if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { - sk_eat_skb(sk, skb); + tcp_eat_recv_skb(sk, skb); ++seq; break; } - sk_eat_skb(sk, skb); + tcp_eat_recv_skb(sk, skb); if (!desc->count) break; WRITE_ONCE(tp->copied_seq, seq); @@ -2329,7 +2359,7 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, u32 offset; /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ - if (tp->urg_data && tp->urg_seq == *seq) { + if (unlikely(tp->urg_data) && tp->urg_seq == *seq) { if (copied) break; if (signal_pending(current)) { @@ -2372,10 +2402,10 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, break; if (copied) { - if (sk->sk_err || + if (!timeo || + sk->sk_err || sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN) || - !timeo || signal_pending(current)) break; } else { @@ -2409,13 +2439,12 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, } } - tcp_cleanup_rbuf(sk, copied); - if (copied >= target) { /* Do not sleep, just process backlog. */ - release_sock(sk); - lock_sock(sk); + __sk_flush_backlog(sk); } else { + tcp_cleanup_rbuf(sk, copied); + sk_defer_free_flush(sk); sk_wait_data(sk, &timeo, last); } @@ -2435,7 +2464,7 @@ found_ok_skb: used = len; /* Do we have urgent data here? */ - if (tp->urg_data) { + if (unlikely(tp->urg_data)) { u32 urg_offset = tp->urg_seq - *seq; if (urg_offset < used) { if (!urg_offset) { @@ -2469,8 +2498,8 @@ found_ok_skb: tcp_rcv_space_adjust(sk); skip_copy: - if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) { - tp->urg_data = 0; + if (unlikely(tp->urg_data) && after(tp->copied_seq, tp->urg_seq)) { + WRITE_ONCE(tp->urg_data, 0); tcp_fast_path_check(sk); } @@ -2485,14 +2514,14 @@ skip_copy: if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) goto found_fin_ok; if (!(flags & MSG_PEEK)) - sk_eat_skb(sk, skb); + tcp_eat_recv_skb(sk, skb); continue; found_fin_ok: /* Process the FIN. */ WRITE_ONCE(*seq, *seq + 1); if (!(flags & MSG_PEEK)) - sk_eat_skb(sk, skb); + tcp_eat_recv_skb(sk, skb); break; } while (len > 0); @@ -2534,6 +2563,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, ret = tcp_recvmsg_locked(sk, msg, len, nonblock, flags, &tss, &cmsg_flags); release_sock(sk); + sk_defer_free_flush(sk); if (cmsg_flags && ret >= 0) { if (cmsg_flags & TCP_CMSG_TS) @@ -2964,7 +2994,7 @@ int tcp_disconnect(struct sock *sk, int flags) tcp_clear_xmit_timers(sk); __skb_queue_purge(&sk->sk_receive_queue); WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); - tp->urg_data = 0; + WRITE_ONCE(tp->urg_data, 0); tcp_write_queue_purge(sk); tcp_fastopen_active_disable_ofo_check(sk); skb_rbtree_purge(&tp->out_of_order_queue); @@ -3059,7 +3089,7 @@ int tcp_disconnect(struct sock *sk, int flags) sk->sk_frag.page = NULL; sk->sk_frag.offset = 0; } - + sk_defer_free_flush(sk); sk_error_report(sk); return 0; } @@ -3774,10 +3804,12 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) tcp_get_info_chrono_stats(tp, info); info->tcpi_segs_out = tp->segs_out; - info->tcpi_segs_in = tp->segs_in; + + /* segs_in and data_segs_in can be updated from tcp_segs_in() from BH */ + info->tcpi_segs_in = READ_ONCE(tp->segs_in); + info->tcpi_data_segs_in = READ_ONCE(tp->data_segs_in); info->tcpi_min_rtt = tcp_min_rtt(tp); - info->tcpi_data_segs_in = tp->data_segs_in; info->tcpi_data_segs_out = tp->data_segs_out; info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0; @@ -4186,6 +4218,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level, err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname, &zc, &len, err); release_sock(sk); + sk_defer_free_flush(sk); if (len >= offsetofend(struct tcp_zerocopy_receive, msg_flags)) goto zerocopy_rcv_cmsg; switch (len) { diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 246ab7b5e857..3658b9c3dd2b 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5591,7 +5591,7 @@ static void tcp_check_urg(struct sock *sk, const struct tcphdr *th) } } - tp->urg_data = TCP_URG_NOTYET; + WRITE_ONCE(tp->urg_data, TCP_URG_NOTYET); WRITE_ONCE(tp->urg_seq, ptr); /* Disable header prediction. */ @@ -5604,11 +5604,11 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *t struct tcp_sock *tp = tcp_sk(sk); /* Check if we get a new urgent pointer - normally not. */ - if (th->urg) + if (unlikely(th->urg)) tcp_check_urg(sk, th); /* Do we wait for any urgent data? - normally not... */ - if (tp->urg_data == TCP_URG_NOTYET) { + if (unlikely(tp->urg_data == TCP_URG_NOTYET)) { u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - th->syn; @@ -5617,7 +5617,7 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *t u8 tmp; if (skb_copy_bits(skb, ptr, &tmp, 1)) BUG(); - tp->urg_data = TCP_URG_VALID | tmp; + WRITE_ONCE(tp->urg_data, TCP_URG_VALID | tmp); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk); } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 13d868c43284..3dd19a2bf06c 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1182,7 +1182,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, if (!md5sig) return -ENOMEM; - sk_nocaps_add(sk, NETIF_F_GSO_MASK); + sk_gso_disable(sk); INIT_HLIST_HEAD(&md5sig->head); rcu_assign_pointer(tp->md5sig_info, md5sig); } @@ -1620,7 +1620,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, */ tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index, key->flags, key->key, key->keylen, GFP_ATOMIC); - sk_nocaps_add(newsk, NETIF_F_GSO_MASK); + sk_gso_disable(newsk); } #endif @@ -1800,8 +1800,7 @@ int tcp_v4_early_demux(struct sk_buff *skb) bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) { - u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf); - u32 tail_gso_size, tail_gso_segs; + u32 limit, tail_gso_size, tail_gso_segs; struct skb_shared_info *shinfo; const struct tcphdr *th; struct tcphdr *thtail; @@ -1909,7 +1908,7 @@ no_coalesce: * to reduce memory overhead, so add a little headroom here. * Few sockets backlog are possibly concurrently non empty. */ - limit += 64*1024; + limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf) + 64*1024; if (unlikely(sk_add_backlog(sk, skb, limit))) { bh_unlock_sock(sk); @@ -2103,6 +2102,7 @@ process: sk_incoming_cpu_update(sk); + sk_defer_free_flush(sk); bh_lock_sock_nested(sk); tcp_segs_in(tcp_sk(sk), skb); ret = 0; diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index fc61cd3fea65..30abde86db45 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -8,6 +8,7 @@ #include <linux/indirect_call_wrapper.h> #include <linux/skbuff.h> +#include <net/gro.h> #include <net/tcp.h> #include <net/protocol.h> diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 2e6e5a70168e..5079832af5c1 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1359,7 +1359,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, #ifdef CONFIG_TCP_MD5SIG /* Calculate the MD5 hash, as we have all we need now */ if (md5) { - sk_nocaps_add(sk, NETIF_F_GSO_MASK); + sk_gso_disable(sk); tp->af_specific->calc_md5_hash(opts.hash_location, md5, sk, skb); } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 8bcecdd6aeda..799b663daf87 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -122,7 +122,7 @@ EXPORT_SYMBOL(udp_table); long sysctl_udp_mem[3] __read_mostly; EXPORT_SYMBOL(sysctl_udp_mem); -atomic_long_t udp_memory_allocated; +atomic_long_t udp_memory_allocated ____cacheline_aligned_in_smp; EXPORT_SYMBOL(udp_memory_allocated); #define MAX_UDP_PORTS 65536 @@ -459,7 +459,7 @@ static struct sock *udp4_lookup_run_bpf(struct net *net, struct udp_table *udptable, struct sk_buff *skb, __be32 saddr, __be16 sport, - __be32 daddr, u16 hnum) + __be32 daddr, u16 hnum, const int dif) { struct sock *sk, *reuse_sk; bool no_reuseport; @@ -467,8 +467,8 @@ static struct sock *udp4_lookup_run_bpf(struct net *net, if (udptable != &udp_table) return NULL; /* only UDP is supported */ - no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_UDP, - saddr, sport, daddr, hnum, &sk); + no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_UDP, saddr, sport, + daddr, hnum, dif, &sk); if (no_reuseport || IS_ERR_OR_NULL(sk)) return sk; @@ -504,7 +504,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, /* Lookup redirect from BPF */ if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { sk = udp4_lookup_run_bpf(net, udptable, skb, - saddr, sport, daddr, hnum); + saddr, sport, daddr, hnum, dif); if (sk) { result = sk; goto done; diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 86d32a1e62ac..6d1a4bec2614 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -7,6 +7,7 @@ */ #include <linux/skbuff.h> +#include <net/gro.h> #include <net/udp.h> #include <net/protocol.h> #include <net/inet_common.h> @@ -424,6 +425,33 @@ out: return segs; } +static int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb) +{ + if (unlikely(p->len + skb->len >= 65536)) + return -E2BIG; + + if (NAPI_GRO_CB(p)->last == p) + skb_shinfo(p)->frag_list = skb; + else + NAPI_GRO_CB(p)->last->next = skb; + + skb_pull(skb, skb_gro_offset(skb)); + + NAPI_GRO_CB(p)->last = skb; + NAPI_GRO_CB(p)->count++; + p->data_len += skb->len; + + /* sk owenrship - if any - completely transferred to the aggregated packet */ + skb->destructor = NULL; + p->truesize += skb->truesize; + p->len += skb->len; + + NAPI_GRO_CB(skb)->same_flow = 1; + + return 0; +} + + #define UDP_GRO_CNT_MAX 64 static struct sk_buff *udp_gro_receive_segment(struct list_head *head, struct sk_buff *skb) @@ -600,13 +628,11 @@ struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb) inet_gro_compute_pseudo); skip: NAPI_GRO_CB(skb)->is_ipv6 = 0; - rcu_read_lock(); if (static_branch_unlikely(&udp_encap_needed_key)) sk = udp4_gro_lookup_skb(skb, uh->source, uh->dest); pp = udp_gro_receive(head, skb, uh, sk); - rcu_read_unlock(); return pp; flush: @@ -641,7 +667,6 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff, uh->len = newlen; - rcu_read_lock(); sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb, udp4_lib_lookup_skb, skb, uh->source, uh->dest); if (sk && udp_sk(sk)->gro_complete) { @@ -662,7 +687,6 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff, } else { err = udp_gro_complete_segment(skb); } - rcu_read_unlock(); if (skb->remcsum_offload) skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM; diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index dab4a047590b..d1636425654e 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -337,11 +337,8 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, chk_addr_ret = inet_addr_type_dev_table(net, dev, v4addr); rcu_read_unlock(); - if (!inet_can_nonlocal_bind(net, inet) && - v4addr != htonl(INADDR_ANY) && - chk_addr_ret != RTN_LOCAL && - chk_addr_ret != RTN_MULTICAST && - chk_addr_ret != RTN_BROADCAST) { + if (!inet_addr_valid_or_nonlocal(net, inet, v4addr, + chk_addr_ret)) { err = -EADDRNOTAVAIL; goto out; } diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 828e62514260..b5995c1f4d7a 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -175,7 +175,6 @@ static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *des * See 11.3.2 of RFC 3775 for details. */ if (opt[off] == IPV6_TLV_HAO) { - struct in6_addr final_addr; struct ipv6_destopt_hao *hao; hao = (struct ipv6_destopt_hao *)&opt[off]; @@ -184,9 +183,7 @@ static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *des hao->length); goto bad; } - final_addr = hao->addr; - hao->addr = iph->saddr; - iph->saddr = final_addr; + swap(hao->addr, iph->saddr); } break; } diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index a349d4798077..ba5e81cd569c 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -16,6 +16,7 @@ #include <crypto/authenc.h> #include <linux/err.h> #include <linux/module.h> +#include <net/gro.h> #include <net/ip.h> #include <net/xfrm.h> #include <net/esp.h> diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 38ece3b7b839..77e34aec7e82 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -686,7 +686,6 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb) struct net *net = dev_net(skb->dev); int accept_source_route = net->ipv6.devconf_all->accept_source_route; - idev = __in6_dev_get(skb->dev); if (idev && accept_source_route > idev->cnf.accept_source_route) accept_source_route = idev->cnf.accept_source_route; diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 67c9114835c8..4514444e96c8 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -165,7 +165,7 @@ static inline struct sock *inet6_lookup_run_bpf(struct net *net, const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, - const u16 hnum) + const u16 hnum, const int dif) { struct sock *sk, *reuse_sk; bool no_reuseport; @@ -173,8 +173,8 @@ static inline struct sock *inet6_lookup_run_bpf(struct net *net, if (hashinfo != &tcp_hashinfo) return NULL; /* only TCP is supported */ - no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_TCP, - saddr, sport, daddr, hnum, &sk); + no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_TCP, saddr, sport, + daddr, hnum, dif, &sk); if (no_reuseport || IS_ERR_OR_NULL(sk)) return sk; @@ -198,7 +198,7 @@ struct sock *inet6_lookup_listener(struct net *net, /* Lookup redirect from BPF */ if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { result = inet6_lookup_run_bpf(net, hashinfo, skb, doff, - saddr, sport, daddr, hnum); + saddr, sport, daddr, hnum, dif); if (result) goto done; } diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 1b9827ff8ccf..48674888f2dc 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -208,7 +208,6 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head, flush += ntohs(iph->payload_len) != skb_gro_len(skb); - rcu_read_lock(); proto = iph->nexthdr; ops = rcu_dereference(inet6_offloads[proto]); if (!ops || !ops->callbacks.gro_receive) { @@ -221,7 +220,7 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head, ops = rcu_dereference(inet6_offloads[proto]); if (!ops || !ops->callbacks.gro_receive) - goto out_unlock; + goto out; iph = ipv6_hdr(skb); } @@ -279,9 +278,6 @@ not_same_flow: pp = indirect_call_gro_receive_l4(tcp6_gro_receive, udp6_gro_receive, ops->callbacks.gro_receive, head, skb); -out_unlock: - rcu_read_unlock(); - out: skb_gro_flush_final(skb, pp, flush); @@ -331,18 +327,14 @@ INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff) iph->payload_len = htons(skb->len - nhoff - sizeof(*iph)); - rcu_read_lock(); - nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops); if (WARN_ON(!ops || !ops->callbacks.gro_complete)) - goto out_unlock; + goto out; err = INDIRECT_CALL_L4(ops->callbacks.gro_complete, tcp6_gro_complete, udp6_gro_complete, skb, nhoff); -out_unlock: - rcu_read_unlock(); - +out: return err; } diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index ff4e83e2a506..2995f8d89e7e 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -977,7 +977,7 @@ slow_path: fail_toobig: if (skb->sk && dst_allfrag(skb_dst(skb))) - sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK); + sk_gso_disable(skb->sk); icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); err = -EMSGSIZE; diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 41efca817db4..a733803a710c 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -471,10 +471,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, if (sk->sk_protocol == IPPROTO_TCP) { struct inet_connection_sock *icsk = inet_csk(sk); - local_bh_disable(); + sock_prot_inuse_add(net, sk->sk_prot, -1); sock_prot_inuse_add(net, &tcp_prot, 1); - local_bh_enable(); + sk->sk_prot = &tcp_prot; icsk->icsk_af_ops = &ipv4_specific; sk->sk_socket->ops = &inet_stream_ops; @@ -485,10 +485,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, if (sk->sk_protocol == IPPROTO_UDPLITE) prot = &udplite_prot; - local_bh_disable(); + sock_prot_inuse_add(net, sk->sk_prot, -1); sock_prot_inuse_add(net, prot, 1); - local_bh_enable(); + sk->sk_prot = prot; sk->sk_socket->ops = &inet_dgram_ops; sk->sk_family = PF_INET; @@ -599,7 +599,14 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, /* RFC 3542, 6.5: default traffic class of 0x0 */ if (val == -1) val = 0; - np->tclass = val; + if (sk->sk_type == SOCK_STREAM) { + val &= ~INET_ECN_MASK; + val |= np->tclass & INET_ECN_MASK; + } + if (np->tclass != val) { + np->tclass = val; + sk_dst_reset(sk); + } retv = 0; break; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 42d60c76d30a..62f1e16eea2b 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -328,9 +328,7 @@ static const struct rt6_info ip6_blk_hole_entry_template = { static void rt6_info_init(struct rt6_info *rt) { - struct dst_entry *dst = &rt->dst; - - memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); + memset_after(rt, 0, dst); INIT_LIST_HEAD(&rt->rt6i_uncached); } @@ -1485,7 +1483,7 @@ static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket) static u32 rt6_exception_hash(const struct in6_addr *dst, const struct in6_addr *src) { - static siphash_key_t rt6_exception_key __read_mostly; + static siphash_aligned_key_t rt6_exception_key; struct { struct in6_addr dst; struct in6_addr src; diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index e8cfb9e997bf..d1b61d00368e 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -20,7 +20,7 @@ #define COOKIEBITS 24 /* Upper bits store count */ #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) -static siphash_key_t syncookie6_secret[2] __read_mostly; +static siphash_aligned_key_t syncookie6_secret[2]; /* RFC 2460, Section 8.3: * [ipv6 tcp] MSS must be computed as the maximum packet size minus 60 [..] diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 551fce49841d..3b7d6ede1364 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -72,7 +72,7 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb); static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, struct request_sock *req); -static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); +INDIRECT_CALLABLE_SCOPE int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); static const struct inet_connection_sock_af_ops ipv6_mapped; const struct inet_connection_sock_af_ops ipv6_specific; @@ -1466,7 +1466,8 @@ INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *, * This is because we cannot sleep with the original spinlock * held. */ -static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) +INDIRECT_CALLABLE_SCOPE +int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) { struct ipv6_pinfo *np = tcp_inet6_sk(sk); struct sk_buff *opt_skb = NULL; @@ -1757,6 +1758,7 @@ process: sk_incoming_cpu_update(sk); + sk_defer_free_flush(sk); bh_lock_sock_nested(sk); tcp_segs_in(tcp_sk(sk), skb); ret = 0; @@ -1893,9 +1895,7 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = { INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb) { - struct ipv6_pinfo *np = inet6_sk(sk); - - __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr); + __tcp_v6_send_check(skb, &sk->sk_v6_rcv_saddr, &sk->sk_v6_daddr); } const struct inet_connection_sock_af_ops ipv6_specific = { diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c index 1796856bc24f..39db5a226855 100644 --- a/net/ipv6/tcpv6_offload.c +++ b/net/ipv6/tcpv6_offload.c @@ -7,6 +7,7 @@ */ #include <linux/indirect_call_wrapper.h> #include <linux/skbuff.h> +#include <net/gro.h> #include <net/protocol.h> #include <net/tcp.h> #include <net/ip6_checksum.h> diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index e43b31d25fb6..6a0e569f0bb8 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -195,7 +195,7 @@ static inline struct sock *udp6_lookup_run_bpf(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, - u16 hnum) + u16 hnum, const int dif) { struct sock *sk, *reuse_sk; bool no_reuseport; @@ -203,8 +203,8 @@ static inline struct sock *udp6_lookup_run_bpf(struct net *net, if (udptable != &udp_table) return NULL; /* only UDP is supported */ - no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP, - saddr, sport, daddr, hnum, &sk); + no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP, saddr, sport, + daddr, hnum, dif, &sk); if (no_reuseport || IS_ERR_OR_NULL(sk)) return sk; @@ -240,7 +240,7 @@ struct sock *__udp6_lib_lookup(struct net *net, /* Lookup redirect from BPF */ if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { sk = udp6_lookup_run_bpf(net, udptable, skb, - saddr, sport, daddr, hnum); + saddr, sport, daddr, hnum, dif); if (sk) { result = sk; goto done; diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index b3d9ed96e5ea..7720d04ed396 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -13,6 +13,7 @@ #include <net/udp.h> #include <net/ip6_checksum.h> #include "ip6_offload.h" +#include <net/gro.h> static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, netdev_features_t features) @@ -144,13 +145,11 @@ struct sk_buff *udp6_gro_receive(struct list_head *head, struct sk_buff *skb) skip: NAPI_GRO_CB(skb)->is_ipv6 = 1; - rcu_read_lock(); if (static_branch_unlikely(&udpv6_encap_needed_key)) sk = udp6_gro_lookup_skb(skb, uh->source, uh->dest); pp = udp_gro_receive(head, skb, uh, sk); - rcu_read_unlock(); return pp; flush: diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 18316ee3c692..49ecbe8d176a 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -142,7 +142,7 @@ static inline size_t iucv_msg_length(struct iucv_message *msg) * iucv_sock_in_state() - check for specific states * @sk: sock structure * @state: first iucv sk state - * @state: second iucv sk state + * @state2: second iucv sk state * * Returns true if the socket in either in the first or second state. */ @@ -172,7 +172,7 @@ static inline int iucv_below_msglim(struct sock *sk) (atomic_read(&iucv->pendings) <= 0)); } -/** +/* * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit */ static void iucv_sock_wake_msglim(struct sock *sk) @@ -187,7 +187,7 @@ static void iucv_sock_wake_msglim(struct sock *sk) rcu_read_unlock(); } -/** +/* * afiucv_hs_send() - send a message through HiperSockets transport */ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, @@ -473,7 +473,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, atomic_set(&iucv->msg_recv, 0); iucv->path = NULL; iucv->sk_txnotify = afiucv_hs_callback_txnotify; - memset(&iucv->src_user_id , 0, 32); + memset(&iucv->init, 0, sizeof(iucv->init)); if (pr_iucv) iucv->transport = AF_IUCV_TRANS_IUCV; else @@ -1831,9 +1831,9 @@ static void afiucv_swap_src_dest(struct sk_buff *skb) memset(skb->data, 0, ETH_HLEN); } -/** +/* * afiucv_hs_callback_syn - react on received SYN - **/ + */ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb) { struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb); @@ -1896,9 +1896,9 @@ out: return NET_RX_SUCCESS; } -/** +/* * afiucv_hs_callback_synack() - react on received SYN-ACK - **/ + */ static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb) { struct iucv_sock *iucv = iucv_sk(sk); @@ -1917,9 +1917,9 @@ static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb) return NET_RX_SUCCESS; } -/** +/* * afiucv_hs_callback_synfin() - react on received SYN_FIN - **/ + */ static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb) { struct iucv_sock *iucv = iucv_sk(sk); @@ -1937,9 +1937,9 @@ static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb) return NET_RX_SUCCESS; } -/** +/* * afiucv_hs_callback_fin() - react on received FIN - **/ + */ static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb) { struct iucv_sock *iucv = iucv_sk(sk); @@ -1960,9 +1960,9 @@ static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb) return NET_RX_SUCCESS; } -/** +/* * afiucv_hs_callback_win() - react on received WIN - **/ + */ static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb) { struct iucv_sock *iucv = iucv_sk(sk); @@ -1978,9 +1978,9 @@ static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb) return NET_RX_SUCCESS; } -/** +/* * afiucv_hs_callback_rx() - react on received data - **/ + */ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) { struct iucv_sock *iucv = iucv_sk(sk); @@ -2022,11 +2022,11 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) return NET_RX_SUCCESS; } -/** +/* * afiucv_hs_rcv() - base function for arriving data through HiperSockets * transport * called from netif RX softirq - **/ + */ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { @@ -2128,10 +2128,10 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, return err; } -/** +/* * afiucv_hs_callback_txnotify() - handle send notifications from HiperSockets * transport - **/ + */ static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify n) { struct iucv_sock *iucv = iucv_sk(sk); diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index f3343a8541a5..8f4d49a7d3e8 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -276,8 +276,8 @@ static union iucv_param *iucv_param[NR_CPUS]; static union iucv_param *iucv_param_irq[NR_CPUS]; /** - * iucv_call_b2f0 - * @code: identifier of IUCV call to CP. + * __iucv_call_b2f0 + * @command: identifier of IUCV call to CP. * @parm: pointer to a struct iucv_parm block * * Calls CP to execute IUCV commands. @@ -309,7 +309,7 @@ static inline int iucv_call_b2f0(int command, union iucv_param *parm) return ccode == 1 ? parm->ctrl.iprcode : ccode; } -/** +/* * iucv_query_maxconn * * Determines the maximum number of connections that may be established. @@ -493,8 +493,8 @@ static void iucv_retrieve_cpu(void *data) cpumask_clear_cpu(cpu, &iucv_buffer_cpumask); } -/** - * iucv_setmask_smp +/* + * iucv_setmask_mp * * Allow iucv interrupts on all cpus. */ @@ -512,7 +512,7 @@ static void iucv_setmask_mp(void) cpus_read_unlock(); } -/** +/* * iucv_setmask_up * * Allow iucv interrupts on a single cpu. @@ -529,7 +529,7 @@ static void iucv_setmask_up(void) smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); } -/** +/* * iucv_enable * * This function makes iucv ready for use. It allocates the pathid @@ -564,7 +564,7 @@ out: return rc; } -/** +/* * iucv_disable * * This function shuts down iucv. It disables iucv interrupts, retrieves @@ -1347,8 +1347,9 @@ EXPORT_SYMBOL(iucv_message_send); * @srccls: source class of message * @buffer: address of send buffer or address of struct iucv_array * @size: length of send buffer - * @ansbuf: address of answer buffer or address of struct iucv_array + * @answer: address of answer buffer or address of struct iucv_array * @asize: size of reply buffer + * @residual: ignored * * This function transmits data to another application. Data to be * transmitted is in a buffer. The receiver of the send is expected to @@ -1400,13 +1401,6 @@ out: } EXPORT_SYMBOL(iucv_message_send2way); -/** - * iucv_path_pending - * @data: Pointer to external interrupt buffer - * - * Process connection pending work item. Called from tasklet while holding - * iucv_table_lock. - */ struct iucv_path_pending { u16 ippathid; u8 ipflags1; @@ -1420,6 +1414,13 @@ struct iucv_path_pending { u8 res4[3]; } __packed; +/** + * iucv_path_pending + * @data: Pointer to external interrupt buffer + * + * Process connection pending work item. Called from tasklet while holding + * iucv_table_lock. + */ static void iucv_path_pending(struct iucv_irq_data *data) { struct iucv_path_pending *ipp = (void *) data; @@ -1461,13 +1462,6 @@ out_sever: iucv_sever_pathid(ipp->ippathid, error); } -/** - * iucv_path_complete - * @data: Pointer to external interrupt buffer - * - * Process connection complete work item. Called from tasklet while holding - * iucv_table_lock. - */ struct iucv_path_complete { u16 ippathid; u8 ipflags1; @@ -1481,6 +1475,13 @@ struct iucv_path_complete { u8 res4[3]; } __packed; +/** + * iucv_path_complete + * @data: Pointer to external interrupt buffer + * + * Process connection complete work item. Called from tasklet while holding + * iucv_table_lock. + */ static void iucv_path_complete(struct iucv_irq_data *data) { struct iucv_path_complete *ipc = (void *) data; @@ -1492,13 +1493,6 @@ static void iucv_path_complete(struct iucv_irq_data *data) path->handler->path_complete(path, ipc->ipuser); } -/** - * iucv_path_severed - * @data: Pointer to external interrupt buffer - * - * Process connection severed work item. Called from tasklet while holding - * iucv_table_lock. - */ struct iucv_path_severed { u16 ippathid; u8 res1; @@ -1511,6 +1505,13 @@ struct iucv_path_severed { u8 res5[3]; } __packed; +/** + * iucv_path_severed + * @data: Pointer to external interrupt buffer + * + * Process connection severed work item. Called from tasklet while holding + * iucv_table_lock. + */ static void iucv_path_severed(struct iucv_irq_data *data) { struct iucv_path_severed *ips = (void *) data; @@ -1528,13 +1529,6 @@ static void iucv_path_severed(struct iucv_irq_data *data) } } -/** - * iucv_path_quiesced - * @data: Pointer to external interrupt buffer - * - * Process connection quiesced work item. Called from tasklet while holding - * iucv_table_lock. - */ struct iucv_path_quiesced { u16 ippathid; u8 res1; @@ -1547,6 +1541,13 @@ struct iucv_path_quiesced { u8 res5[3]; } __packed; +/** + * iucv_path_quiesced + * @data: Pointer to external interrupt buffer + * + * Process connection quiesced work item. Called from tasklet while holding + * iucv_table_lock. + */ static void iucv_path_quiesced(struct iucv_irq_data *data) { struct iucv_path_quiesced *ipq = (void *) data; @@ -1556,13 +1557,6 @@ static void iucv_path_quiesced(struct iucv_irq_data *data) path->handler->path_quiesced(path, ipq->ipuser); } -/** - * iucv_path_resumed - * @data: Pointer to external interrupt buffer - * - * Process connection resumed work item. Called from tasklet while holding - * iucv_table_lock. - */ struct iucv_path_resumed { u16 ippathid; u8 res1; @@ -1575,6 +1569,13 @@ struct iucv_path_resumed { u8 res5[3]; } __packed; +/** + * iucv_path_resumed + * @data: Pointer to external interrupt buffer + * + * Process connection resumed work item. Called from tasklet while holding + * iucv_table_lock. + */ static void iucv_path_resumed(struct iucv_irq_data *data) { struct iucv_path_resumed *ipr = (void *) data; @@ -1584,13 +1585,6 @@ static void iucv_path_resumed(struct iucv_irq_data *data) path->handler->path_resumed(path, ipr->ipuser); } -/** - * iucv_message_complete - * @data: Pointer to external interrupt buffer - * - * Process message complete work item. Called from tasklet while holding - * iucv_table_lock. - */ struct iucv_message_complete { u16 ippathid; u8 ipflags1; @@ -1606,6 +1600,13 @@ struct iucv_message_complete { u8 res2[3]; } __packed; +/** + * iucv_message_complete + * @data: Pointer to external interrupt buffer + * + * Process message complete work item. Called from tasklet while holding + * iucv_table_lock. + */ static void iucv_message_complete(struct iucv_irq_data *data) { struct iucv_message_complete *imc = (void *) data; @@ -1624,13 +1625,6 @@ static void iucv_message_complete(struct iucv_irq_data *data) } } -/** - * iucv_message_pending - * @data: Pointer to external interrupt buffer - * - * Process message pending work item. Called from tasklet while holding - * iucv_table_lock. - */ struct iucv_message_pending { u16 ippathid; u8 ipflags1; @@ -1653,6 +1647,13 @@ struct iucv_message_pending { u8 res2[3]; } __packed; +/** + * iucv_message_pending + * @data: Pointer to external interrupt buffer + * + * Process message pending work item. Called from tasklet while holding + * iucv_table_lock. + */ static void iucv_message_pending(struct iucv_irq_data *data) { struct iucv_message_pending *imp = (void *) data; @@ -1673,7 +1674,7 @@ static void iucv_message_pending(struct iucv_irq_data *data) } } -/** +/* * iucv_tasklet_fn: * * This tasklet loops over the queue of irq buffers created by @@ -1717,7 +1718,7 @@ static void iucv_tasklet_fn(unsigned long ignored) spin_unlock(&iucv_table_lock); } -/** +/* * iucv_work_fn: * * This work function loops over the queue of path pending irq blocks @@ -1748,9 +1749,8 @@ static void iucv_work_fn(struct work_struct *work) spin_unlock_bh(&iucv_table_lock); } -/** +/* * iucv_external_interrupt - * @code: irq code * * Handles external interrupts coming in from CP. * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn(). diff --git a/net/mac80211/ethtool.c b/net/mac80211/ethtool.c index 99a2e30b3833..b2253df54413 100644 --- a/net/mac80211/ethtool.c +++ b/net/mac80211/ethtool.c @@ -14,7 +14,9 @@ #include "driver-ops.h" static int ieee80211_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *rp) + struct ethtool_ringparam *rp, + struct kernel_ethtool_ringparam *kernel_rp, + struct netlink_ext_ack *extack) { struct ieee80211_local *local = wiphy_priv(dev->ieee80211_ptr->wiphy); @@ -25,7 +27,9 @@ static int ieee80211_set_ringparam(struct net_device *dev, } static void ieee80211_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *rp) + struct ethtool_ringparam *rp, + struct kernel_ethtool_ringparam *kernel_rp, + struct netlink_ext_ack *extack) { struct ieee80211_local *local = wiphy_priv(dev->ieee80211_ptr->wiphy); diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c index 36fac3daf86a..86ad15abf897 100644 --- a/net/mctp/test/route-test.c +++ b/net/mctp/test/route-test.c @@ -150,11 +150,6 @@ static void mctp_test_fragment(struct kunit *test) rt = mctp_test_create_route(&init_net, NULL, 10, mtu); KUNIT_ASSERT_TRUE(test, rt); - /* The refcount would usually be incremented as part of a route lookup, - * but we're setting the route directly here. - */ - refcount_inc(&rt->rt.refs); - rc = mctp_do_fragment_route(&rt->rt, skb, mtu, MCTP_TAG_OWNER); KUNIT_EXPECT_FALSE(test, rc); diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index c82a76d2d0bf..b100048e43fe 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -48,7 +48,7 @@ enum { MPTCP_CMSG_TS = BIT(0), }; -static struct percpu_counter mptcp_sockets_allocated; +static struct percpu_counter mptcp_sockets_allocated ____cacheline_aligned_in_smp; static void __mptcp_destroy_sock(struct sock *sk); static void __mptcp_check_send_data_fin(struct sock *sk); diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c index 0f1e661c2032..fb43e145cb57 100644 --- a/net/mptcp/sockopt.c +++ b/net/mptcp/sockopt.c @@ -390,6 +390,8 @@ static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname, switch (optname) { case IPV6_V6ONLY: + case IPV6_TRANSPARENT: + case IPV6_FREEBIND: lock_sock(sk); ssock = __mptcp_nmpc_socket(msk); if (!ssock) { @@ -398,8 +400,24 @@ static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname, } ret = tcp_setsockopt(ssock->sk, SOL_IPV6, optname, optval, optlen); - if (ret == 0) + if (ret != 0) { + release_sock(sk); + return ret; + } + + sockopt_seq_inc(msk); + + switch (optname) { + case IPV6_V6ONLY: sk->sk_ipv6only = ssock->sk->sk_ipv6only; + break; + case IPV6_TRANSPARENT: + inet_sk(sk)->transparent = inet_sk(ssock->sk)->transparent; + break; + case IPV6_FREEBIND: + inet_sk(sk)->freebind = inet_sk(ssock->sk)->freebind; + break; + } release_sock(sk); break; @@ -598,6 +616,85 @@ static int mptcp_setsockopt_sol_tcp_congestion(struct mptcp_sock *msk, sockptr_t return ret; } +static int mptcp_setsockopt_sol_ip_set_transparent(struct mptcp_sock *msk, int optname, + sockptr_t optval, unsigned int optlen) +{ + struct sock *sk = (struct sock *)msk; + struct inet_sock *issk; + struct socket *ssock; + int err; + + err = ip_setsockopt(sk, SOL_IP, optname, optval, optlen); + if (err != 0) + return err; + + lock_sock(sk); + + ssock = __mptcp_nmpc_socket(msk); + if (!ssock) { + release_sock(sk); + return -EINVAL; + } + + issk = inet_sk(ssock->sk); + + switch (optname) { + case IP_FREEBIND: + issk->freebind = inet_sk(sk)->freebind; + break; + case IP_TRANSPARENT: + issk->transparent = inet_sk(sk)->transparent; + break; + default: + release_sock(sk); + WARN_ON_ONCE(1); + return -EOPNOTSUPP; + } + + sockopt_seq_inc(msk); + release_sock(sk); + return 0; +} + +static int mptcp_setsockopt_v4_set_tos(struct mptcp_sock *msk, int optname, + sockptr_t optval, unsigned int optlen) +{ + struct mptcp_subflow_context *subflow; + struct sock *sk = (struct sock *)msk; + int err, val; + + err = ip_setsockopt(sk, SOL_IP, optname, optval, optlen); + + if (err != 0) + return err; + + lock_sock(sk); + sockopt_seq_inc(msk); + val = inet_sk(sk)->tos; + mptcp_for_each_subflow(msk, subflow) { + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + + __ip_sock_set_tos(ssk, val); + } + release_sock(sk); + + return err; +} + +static int mptcp_setsockopt_v4(struct mptcp_sock *msk, int optname, + sockptr_t optval, unsigned int optlen) +{ + switch (optname) { + case IP_FREEBIND: + case IP_TRANSPARENT: + return mptcp_setsockopt_sol_ip_set_transparent(msk, optname, optval, optlen); + case IP_TOS: + return mptcp_setsockopt_v4_set_tos(msk, optname, optval, optlen); + } + + return -EOPNOTSUPP; +} + static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname, sockptr_t optval, unsigned int optlen) { @@ -637,6 +734,9 @@ int mptcp_setsockopt(struct sock *sk, int level, int optname, if (ssk) return tcp_setsockopt(ssk, level, optname, optval, optlen); + if (level == SOL_IP) + return mptcp_setsockopt_v4(msk, optname, optval, optlen); + if (level == SOL_IPV6) return mptcp_setsockopt_v6(msk, optname, optval, optlen); @@ -1003,6 +1103,7 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk) ssk->sk_priority = sk->sk_priority; ssk->sk_bound_dev_if = sk->sk_bound_dev_if; ssk->sk_incoming_cpu = sk->sk_incoming_cpu; + __ip_sock_set_tos(ssk, inet_sk(sk)->tos); if (sk->sk_userlocks & tx_rx_locks) { ssk->sk_userlocks |= sk->sk_userlocks & tx_rx_locks; @@ -1028,6 +1129,9 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk) if (inet_csk(sk)->icsk_ca_ops != inet_csk(ssk)->icsk_ca_ops) tcp_set_congestion_control(ssk, msk->ca_name, false, true); + + inet_sk(ssk)->transparent = inet_sk(sk)->transparent; + inet_sk(ssk)->freebind = inet_sk(sk)->freebind; } static void __mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk) diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 6172f380dfb7..b8dd3441f7d0 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -1425,6 +1425,8 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc, if (addr.ss_family == AF_INET6) addrlen = sizeof(struct sockaddr_in6); #endif + mptcp_sockopt_sync(msk, ssk); + ssk->sk_bound_dev_if = ifindex; err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen); if (err) @@ -1441,7 +1443,6 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc, mptcp_info2sockaddr(remote, &addr, ssk->sk_family); mptcp_add_pending_subflow(msk, subflow); - mptcp_sockopt_sync(msk, ssk); err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK); if (err && err != -EINPROGRESS) goto failed_unlink; @@ -1534,9 +1535,7 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock) */ sf->sk->sk_net_refcnt = 1; get_net(net); -#ifdef CONFIG_PROC_FS - this_cpu_add(*net->core.sock_inuse, 1); -#endif + sock_inuse_add(net, 1); err = tcp_set_ulp(sf->sk, "mptcp"); release_sock(sf->sk); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 770a63103c7a..054ee9d25efe 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -189,7 +189,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); unsigned int nf_conntrack_max __read_mostly; EXPORT_SYMBOL_GPL(nf_conntrack_max); seqcount_spinlock_t nf_conntrack_generation __read_mostly; -static siphash_key_t nf_conntrack_hash_rnd __read_mostly; +static siphash_aligned_key_t nf_conntrack_hash_rnd; static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, unsigned int zoneid, @@ -482,7 +482,7 @@ EXPORT_SYMBOL_GPL(nf_ct_invert_tuple); */ u32 nf_ct_get_id(const struct nf_conn *ct) { - static __read_mostly siphash_key_t ct_id_seed; + static siphash_aligned_key_t ct_id_seed; unsigned long a, b, c, d; net_get_random_once(&ct_id_seed, sizeof(ct_id_seed)); diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index f562eeef4234..1e89b595ecd0 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -41,7 +41,7 @@ EXPORT_SYMBOL_GPL(nf_ct_expect_hash); unsigned int nf_ct_expect_max __read_mostly; static struct kmem_cache *nf_ct_expect_cachep __read_mostly; -static siphash_key_t nf_ct_expect_hashrnd __read_mostly; +static siphash_aligned_key_t nf_ct_expect_hashrnd; /* nf_conntrack_expect helper functions */ void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp, diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index c7708bde057c..849fa7f4353c 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -2995,7 +2995,7 @@ static const union nf_inet_addr any_addr; static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp) { - static __read_mostly siphash_key_t exp_id_seed; + static siphash_aligned_key_t exp_id_seed; unsigned long a, b, c, d; net_get_random_once(&exp_id_seed, sizeof(exp_id_seed)); diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 4d50d51db796..ab9f6c75524d 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -34,7 +34,7 @@ static unsigned int nat_net_id __read_mostly; static struct hlist_head *nf_nat_bysource __read_mostly; static unsigned int nf_nat_htable_size __read_mostly; -static siphash_key_t nf_nat_hash_rnd __read_mostly; +static siphash_aligned_key_t nf_nat_hash_rnd; struct nf_nat_lookup_hook_priv { struct nf_hook_entries __rcu *entries; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 4c575324a985..1a19d179e913 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -707,9 +707,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol, if (err < 0) goto out_module; - local_bh_disable(); sock_prot_inuse_add(net, &netlink_proto, 1); - local_bh_enable(); nlk = nlk_sk(sock->sk); nlk->module = module; @@ -809,9 +807,7 @@ static int netlink_release(struct socket *sock) netlink_table_ungrab(); } - local_bh_disable(); sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1); - local_bh_enable(); call_rcu(&nlk->rcu, deferred_put_nlk_sk); return 0; } diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 46943a18a10d..a1ffdb48cc47 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -3102,9 +3102,7 @@ static int packet_release(struct socket *sock) sk_del_node_init_rcu(sk); mutex_unlock(&net->packet.sklist_lock); - preempt_disable(); sock_prot_inuse_add(net, sk->sk_prot, -1); - preempt_enable(); spin_lock(&po->bind_lock); unregister_prot_hook(sk, false); @@ -3368,9 +3366,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol, sk_add_node_tail_rcu(sk, &net->packet.sklist); mutex_unlock(&net->packet.sklist_lock); - preempt_disable(); sock_prot_inuse_add(net, &packet_proto, 1); - preempt_enable(); return 0; out2: diff --git a/net/rds/send.c b/net/rds/send.c index 53444397de66..0c5504068e3c 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -272,7 +272,7 @@ restart: /* Unfortunately, the way Infiniband deals with * RDMA to a bad MR key is by moving the entire - * queue pair to error state. We cold possibly + * queue pair to error state. We could possibly * recover from that, but right now we drop the * connection. * Therefore, we never retransmit messages with RDMA ops. diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 3b0f62095803..d33804d41c5c 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -434,9 +434,9 @@ unsigned long dev_trans_start(struct net_device *dev) dev = vlan_dev_real_dev(dev); else if (netif_is_macvlan(dev)) dev = macvlan_dev_real_dev(dev); - res = netdev_get_tx_queue(dev, 0)->trans_start; + res = READ_ONCE(netdev_get_tx_queue(dev, 0)->trans_start); for (i = 1; i < dev->num_tx_queues; i++) { - val = netdev_get_tx_queue(dev, i)->trans_start; + val = READ_ONCE(netdev_get_tx_queue(dev, i)->trans_start); if (val && time_after(val, res)) res = val; } @@ -445,11 +445,62 @@ unsigned long dev_trans_start(struct net_device *dev) } EXPORT_SYMBOL(dev_trans_start); +static void netif_freeze_queues(struct net_device *dev) +{ + unsigned int i; + int cpu; + + cpu = smp_processor_id(); + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + /* We are the only thread of execution doing a + * freeze, but we have to grab the _xmit_lock in + * order to synchronize with threads which are in + * the ->hard_start_xmit() handler and already + * checked the frozen bit. + */ + __netif_tx_lock(txq, cpu); + set_bit(__QUEUE_STATE_FROZEN, &txq->state); + __netif_tx_unlock(txq); + } +} + +void netif_tx_lock(struct net_device *dev) +{ + spin_lock(&dev->tx_global_lock); + netif_freeze_queues(dev); +} +EXPORT_SYMBOL(netif_tx_lock); + +static void netif_unfreeze_queues(struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + /* No need to grab the _xmit_lock here. If the + * queue is not stopped for another reason, we + * force a schedule. + */ + clear_bit(__QUEUE_STATE_FROZEN, &txq->state); + netif_schedule_queue(txq); + } +} + +void netif_tx_unlock(struct net_device *dev) +{ + netif_unfreeze_queues(dev); + spin_unlock(&dev->tx_global_lock); +} +EXPORT_SYMBOL(netif_tx_unlock); + static void dev_watchdog(struct timer_list *t) { struct net_device *dev = from_timer(dev, t, watchdog_timer); - netif_tx_lock(dev); + spin_lock(&dev->tx_global_lock); if (!qdisc_tx_is_noop(dev)) { if (netif_device_present(dev) && netif_running(dev) && @@ -462,21 +513,23 @@ static void dev_watchdog(struct timer_list *t) struct netdev_queue *txq; txq = netdev_get_tx_queue(dev, i); - trans_start = txq->trans_start; + trans_start = READ_ONCE(txq->trans_start); if (netif_xmit_stopped(txq) && time_after(jiffies, (trans_start + dev->watchdog_timeo))) { some_queue_timedout = 1; - txq->trans_timeout++; + atomic_long_inc(&txq->trans_timeout); break; } } - if (some_queue_timedout) { + if (unlikely(some_queue_timedout)) { trace_net_dev_xmit_timeout(dev, i); WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", dev->name, netdev_drivername(dev), i); + netif_freeze_queues(dev); dev->netdev_ops->ndo_tx_timeout(dev, i); + netif_unfreeze_queues(dev); } if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + @@ -484,7 +537,7 @@ static void dev_watchdog(struct timer_list *t) dev_hold(dev); } } - netif_tx_unlock(dev); + spin_unlock(&dev->tx_global_lock); dev_put(dev); } @@ -1148,7 +1201,7 @@ static void transition_one_qdisc(struct net_device *dev, rcu_assign_pointer(dev_queue->qdisc, new_qdisc); if (need_watchdog_p) { - dev_queue->trans_start = 0; + WRITE_ONCE(dev_queue->trans_start, 0); *need_watchdog_p = 1; } } diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index ecbb10db1111..ed4ccef5d6a8 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -208,17 +208,17 @@ static bool loss_4state(struct netem_sched_data *q) * next state and if the next packet has to be transmitted or lost. * The four states correspond to: * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period - * LOST_IN_BURST_PERIOD => isolated losses within a gap period - * LOST_IN_GAP_PERIOD => lost packets within a burst period - * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period + * LOST_IN_GAP_PERIOD => isolated losses within a gap period + * LOST_IN_BURST_PERIOD => lost packets within a burst period + * TX_IN_BURST_PERIOD => successfully transmitted packets within a burst period */ switch (clg->state) { case TX_IN_GAP_PERIOD: if (rnd < clg->a4) { - clg->state = LOST_IN_BURST_PERIOD; + clg->state = LOST_IN_GAP_PERIOD; return true; } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) { - clg->state = LOST_IN_GAP_PERIOD; + clg->state = LOST_IN_BURST_PERIOD; return true; } else if (clg->a1 + clg->a4 < rnd) { clg->state = TX_IN_GAP_PERIOD; @@ -227,24 +227,24 @@ static bool loss_4state(struct netem_sched_data *q) break; case TX_IN_BURST_PERIOD: if (rnd < clg->a5) { - clg->state = LOST_IN_GAP_PERIOD; + clg->state = LOST_IN_BURST_PERIOD; return true; } else { clg->state = TX_IN_BURST_PERIOD; } break; - case LOST_IN_GAP_PERIOD: + case LOST_IN_BURST_PERIOD: if (rnd < clg->a3) clg->state = TX_IN_BURST_PERIOD; else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) { clg->state = TX_IN_GAP_PERIOD; } else if (clg->a2 + clg->a3 < rnd) { - clg->state = LOST_IN_GAP_PERIOD; + clg->state = LOST_IN_BURST_PERIOD; return true; } break; - case LOST_IN_BURST_PERIOD: + case LOST_IN_GAP_PERIOD: clg->state = TX_IN_GAP_PERIOD; break; } diff --git a/net/sctp/output.c b/net/sctp/output.c index cdfdbd353c67..72fe6669c50d 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -134,7 +134,7 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag, dst_hold(tp->dst); sk_setup_caps(sk, tp->dst); } - packet->max_size = sk_can_gso(sk) ? tp->dst->dev->gso_max_size + packet->max_size = sk_can_gso(sk) ? READ_ONCE(tp->dst->dev->gso_max_size) : asoc->pathmtu; rcu_read_unlock(); } diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index ff47091c385e..a18609f608fb 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -547,6 +547,9 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, sctp_assoc_update_retran_path(transport->asoc); transport->asoc->rtx_data_chunks += transport->asoc->unack_data; + if (transport->pl.state == SCTP_PL_COMPLETE && + transport->asoc->unack_data) + sctp_transport_reset_probe_timer(transport); break; case SCTP_RTXR_FAST_RTX: SCTP_INC_STATS(net, SCTP_MIB_FAST_RETRANSMITS); diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 354c1c4de19b..cc544a97c4af 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -1124,12 +1124,11 @@ enum sctp_disposition sctp_sf_send_probe(struct net *net, if (!sctp_transport_pl_enabled(transport)) return SCTP_DISPOSITION_CONSUME; - if (sctp_transport_pl_send(transport)) { - reply = sctp_make_heartbeat(asoc, transport, transport->pl.probe_size); - if (!reply) - return SCTP_DISPOSITION_NOMEM; - sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); - } + sctp_transport_pl_send(transport); + reply = sctp_make_heartbeat(asoc, transport, transport->pl.probe_size); + if (!reply) + return SCTP_DISPOSITION_NOMEM; + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); sctp_add_cmd_sf(commands, SCTP_CMD_PROBE_TIMER_UPDATE, SCTP_TRANSPORT(transport)); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 33391254fa82..055a6d3ec6e2 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -5068,12 +5068,9 @@ static int sctp_init_sock(struct sock *sk) SCTP_DBG_OBJCNT_INC(sock); - local_bh_disable(); sk_sockets_allocated_inc(sk); sock_prot_inuse_add(net, sk->sk_prot, 1); - local_bh_enable(); - return 0; } @@ -5099,10 +5096,8 @@ static void sctp_destroy_sock(struct sock *sk) list_del(&sp->auto_asconf_list); } sctp_endpoint_free(sp->ep); - local_bh_disable(); sk_sockets_allocated_dec(sk); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); - local_bh_enable(); } /* Triggered when there are no references on the socket anymore */ diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 133f1719bf1b..f8fd98784977 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -213,13 +213,18 @@ void sctp_transport_reset_reconf_timer(struct sctp_transport *transport) void sctp_transport_reset_probe_timer(struct sctp_transport *transport) { - if (timer_pending(&transport->probe_timer)) - return; if (!mod_timer(&transport->probe_timer, jiffies + transport->probe_interval)) sctp_transport_hold(transport); } +void sctp_transport_reset_raise_timer(struct sctp_transport *transport) +{ + if (!mod_timer(&transport->probe_timer, + jiffies + transport->probe_interval * 30)) + sctp_transport_hold(transport); +} + /* This transport has been assigned to an association. * Initialize fields from the association or from the sock itself. * Register the reference count in the association. @@ -258,12 +263,11 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk) sctp_transport_pl_update(transport); } -bool sctp_transport_pl_send(struct sctp_transport *t) +void sctp_transport_pl_send(struct sctp_transport *t) { if (t->pl.probe_count < SCTP_MAX_PROBES) goto out; - t->pl.last_rtx_chunks = t->asoc->rtx_data_chunks; t->pl.probe_count = 0; if (t->pl.state == SCTP_PL_BASE) { if (t->pl.probe_size == SCTP_BASE_PLPMTU) { /* BASE_PLPMTU Confirmation Failed */ @@ -298,17 +302,9 @@ bool sctp_transport_pl_send(struct sctp_transport *t) } out: - if (t->pl.state == SCTP_PL_COMPLETE && t->pl.raise_count < 30 && - !t->pl.probe_count && t->pl.last_rtx_chunks == t->asoc->rtx_data_chunks) { - t->pl.raise_count++; - return false; - } - pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n", __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high); - t->pl.probe_count++; - return true; } bool sctp_transport_pl_recv(struct sctp_transport *t) @@ -316,7 +312,6 @@ bool sctp_transport_pl_recv(struct sctp_transport *t) pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n", __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high); - t->pl.last_rtx_chunks = t->asoc->rtx_data_chunks; t->pl.pmtu = t->pl.probe_size; t->pl.probe_count = 0; if (t->pl.state == SCTP_PL_BASE) { @@ -338,14 +333,14 @@ bool sctp_transport_pl_recv(struct sctp_transport *t) t->pl.probe_size += SCTP_PL_MIN_STEP; if (t->pl.probe_size >= t->pl.probe_high) { t->pl.probe_high = 0; - t->pl.raise_count = 0; t->pl.state = SCTP_PL_COMPLETE; /* Search -> Search Complete */ t->pl.probe_size = t->pl.pmtu; t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); sctp_assoc_sync_pmtu(t->asoc); + sctp_transport_reset_raise_timer(t); } - } else if (t->pl.state == SCTP_PL_COMPLETE && t->pl.raise_count == 30) { + } else if (t->pl.state == SCTP_PL_COMPLETE) { /* Raise probe_size again after 30 * interval in Search Complete */ t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */ t->pl.probe_size += SCTP_PL_MIN_STEP; @@ -393,6 +388,7 @@ static bool sctp_transport_pl_toobig(struct sctp_transport *t, u32 pmtu) t->pl.probe_high = 0; t->pl.pmtu = SCTP_BASE_PLPMTU; t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); + sctp_transport_reset_probe_timer(t); return true; } } diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 230072f9ec48..67a78bbf305f 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -89,8 +89,8 @@ int smc_hash_sk(struct sock *sk) write_lock_bh(&h->lock); sk_add_node(sk, head); - sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); write_unlock_bh(&h->lock); + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); return 0; } diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c index b4d9419a015b..81116312b753 100644 --- a/net/tipc/crypto.c +++ b/net/tipc/crypto.c @@ -761,21 +761,10 @@ static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb, skb_tailroom(skb), tailen); } - if (unlikely(!skb_cloned(skb) && tailen <= skb_tailroom(skb))) { - nsg = 1; - trailer = skb; - } else { - /* TODO: We could avoid skb_cow_data() if skb has no frag_list - * e.g. by skb_fill_page_desc() to add another page to the skb - * with the wanted tailen... However, page skbs look not often, - * so take it easy now! - * Cloned skbs e.g. from link_xmit() seems no choice though :( - */ - nsg = skb_cow_data(skb, tailen, &trailer); - if (unlikely(nsg < 0)) { - pr_err("TX: skb_cow_data() returned %d\n", nsg); - return nsg; - } + nsg = skb_cow_data(skb, tailen, &trailer); + if (unlikely(nsg < 0)) { + pr_err("TX: skb_cow_data() returned %d\n", nsg); + return nsg; } pskb_put(skb, trailer, tailen); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index b0bfc78e421c..b9ff1f31f3af 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -522,9 +522,7 @@ static void unix_sock_destructor(struct sock *sk) unix_release_addr(u->addr); atomic_long_dec(&unix_nr_socks); - local_bh_disable(); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); - local_bh_enable(); #ifdef UNIX_REFCNT_DEBUG pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk, atomic_long_read(&unix_nr_socks)); @@ -889,9 +887,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, memset(&u->scm_stat, 0, sizeof(struct scm_stat)); unix_insert_socket(unix_sockets_unbound(sk), sk); - local_bh_disable(); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); - local_bh_enable(); return sk; diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index f16074eb53c7..28ef3f4465ae 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -794,9 +794,7 @@ static int xsk_release(struct socket *sock) sk_del_node_init_rcu(sk); mutex_unlock(&net->xdp.lock); - local_bh_disable(); sock_prot_inuse_add(net, sk->sk_prot, -1); - local_bh_enable(); xsk_delete_from_maps(xs); mutex_lock(&xs->mutex); @@ -1396,9 +1394,7 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol, sk_add_node_rcu(sk, &net->xdp.list); mutex_unlock(&net->xdp.lock); - local_bh_disable(); sock_prot_inuse_add(net, &xsk_proto, 1); - local_bh_enable(); return 0; } |