Lines Matching defs:hdev

37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
40 req->hdev = hdev;
49 bool hci_req_status_pend(struct hci_dev *hdev)
51 return hdev->req_status == HCI_REQ_PEND;
57 struct hci_dev *hdev = req->hdev;
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
87 queue_work(hdev->workqueue, &hdev->cmd_work);
102 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
105 BT_DBG("%s result 0x%2.2x", hdev->name, result);
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
111 kfree_skb(hdev->req_skb);
112 hdev->req_skb = skb_get(skb);
114 wake_up_interruptible(&hdev->req_wait_q);
118 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
120 BT_DBG("%s err 0x%2.2x", hdev->name, err);
122 if (hdev->req_status == HCI_REQ_PEND) {
123 hdev->req_result = err;
124 hdev->req_status = HCI_REQ_CANCELED;
125 wake_up_interruptible(&hdev->req_wait_q);
129 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
136 BT_DBG("%s", hdev->name);
138 hci_req_init(&req, hdev);
142 hdev->req_status = HCI_REQ_PEND;
148 err = wait_event_interruptible_timeout(hdev->req_wait_q,
149 hdev->req_status != HCI_REQ_PEND, timeout);
154 switch (hdev->req_status) {
156 err = -bt_to_errno(hdev->req_result);
160 err = -hdev->req_result;
168 hdev->req_status = hdev->req_result = 0;
169 skb = hdev->req_skb;
170 hdev->req_skb = NULL;
172 BT_DBG("%s end: err %d", hdev->name, err);
186 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
189 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
194 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
201 BT_DBG("%s start", hdev->name);
203 hci_req_init(&req, hdev);
205 hdev->req_status = HCI_REQ_PEND;
216 hdev->req_status = 0;
235 err = wait_event_interruptible_timeout(hdev->req_wait_q,
236 hdev->req_status != HCI_REQ_PEND, timeout);
241 switch (hdev->req_status) {
243 err = -bt_to_errno(hdev->req_result);
245 *hci_status = hdev->req_result;
249 err = -hdev->req_result;
261 kfree_skb(hdev->req_skb);
262 hdev->req_skb = NULL;
263 hdev->req_status = hdev->req_result = 0;
265 BT_DBG("%s end: err %d", hdev->name, err);
270 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
277 hci_req_sync_lock(hdev);
282 if (test_bit(HCI_UP, &hdev->flags))
283 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
286 hci_req_sync_unlock(hdev);
291 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
321 struct hci_dev *hdev = req->hdev;
324 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
332 skb = hci_prepare_cmd(hdev, opcode, plen, param);
334 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
356 struct hci_dev *hdev = req->hdev;
360 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
363 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
372 type = hdev->def_page_scan_type;
373 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
376 acp.window = cpu_to_le16(hdev->def_page_scan_window);
378 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
379 __cpu_to_le16(hdev->page_scan_window) != acp.window)
383 if (hdev->page_scan_type != type)
387 static void start_interleave_scan(struct hci_dev *hdev)
389 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
390 queue_delayed_work(hdev->req_workqueue,
391 &hdev->interleave_scan, 0);
394 static bool is_interleave_scanning(struct hci_dev *hdev)
396 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
399 static void cancel_interleave_scan(struct hci_dev *hdev)
401 bt_dev_dbg(hdev, "cancelling interleave scan");
403 cancel_delayed_work_sync(&hdev->interleave_scan);
405 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
411 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
417 bool use_interleaving = hci_is_adv_monitoring(hdev) &&
418 !(list_empty(&hdev->pend_le_conns) &&
419 list_empty(&hdev->pend_le_reports));
420 bool is_interleaving = is_interleave_scanning(hdev);
423 start_interleave_scan(hdev);
424 bt_dev_dbg(hdev, "starting interleave scan");
429 cancel_interleave_scan(hdev);
434 /* This function controls the background scanning based on hdev->pend_le_conns
438 * This function requires the caller holds hdev->lock.
442 struct hci_dev *hdev = req->hdev;
444 if (!test_bit(HCI_UP, &hdev->flags) ||
445 test_bit(HCI_INIT, &hdev->flags) ||
446 hci_dev_test_flag(hdev, HCI_SETUP) ||
447 hci_dev_test_flag(hdev, HCI_CONFIG) ||
448 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
449 hci_dev_test_flag(hdev, HCI_UNREGISTER))
453 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
457 if (hdev->discovery.state != DISCOVERY_STOPPED)
467 hci_discovery_filter_clear(hdev);
469 BT_DBG("%s ADV monitoring is %s", hdev->name,
470 hci_is_adv_monitoring(hdev) ? "on" : "off");
472 if (list_empty(&hdev->pend_le_conns) &&
473 list_empty(&hdev->pend_le_reports) &&
474 !hci_is_adv_monitoring(hdev)) {
481 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
486 BT_DBG("%s stopping background scanning", hdev->name);
496 if (hci_lookup_le_connect(hdev))
502 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
506 bt_dev_dbg(hdev, "starting background scanning");
512 struct hci_dev *hdev = req->hdev;
515 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
522 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
530 list_for_each_entry(uuid, &hdev->uuids, list) {
564 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
572 list_for_each_entry(uuid, &hdev->uuids, list) {
597 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
605 list_for_each_entry(uuid, &hdev->uuids, list) {
630 static void create_eir(struct hci_dev *hdev, u8 *data)
635 name_len = strlen(hdev->dev_name);
648 memcpy(ptr + 2, hdev->dev_name, name_len);
653 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
656 ptr[2] = (u8) hdev->inq_tx_power;
661 if (hdev->devid_source > 0) {
665 put_unaligned_le16(hdev->devid_source, ptr + 2);
666 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
667 put_unaligned_le16(hdev->devid_product, ptr + 6);
668 put_unaligned_le16(hdev->devid_version, ptr + 8);
673 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
674 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
675 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
680 struct hci_dev *hdev = req->hdev;
683 if (!hdev_is_powered(hdev))
686 if (!lmp_ext_inq_capable(hdev))
689 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
692 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
697 create_eir(hdev, cp.data);
699 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
702 memcpy(hdev->eir, cp.data, sizeof(cp.data));
709 struct hci_dev *hdev = req->hdev;
711 if (hdev->scanning_paused) {
712 bt_dev_dbg(hdev, "Scanning is paused for suspend");
716 if (use_ext_scan(hdev)) {
732 if (use_ll_privacy(hdev) &&
733 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
734 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
749 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
753 if (use_ll_privacy(req->hdev) &&
754 hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
757 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
776 struct hci_dev *hdev = req->hdev;
779 if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
784 if (*num_entries >= hdev->le_accept_list_size)
789 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
790 hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
795 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
803 bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
807 if (use_ll_privacy(hdev) &&
808 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
811 irk = hci_find_irk_by_addr(hdev, &params->addr,
820 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
821 memcpy(cp.local_irk, hdev->irk, 16);
835 struct hci_dev *hdev = req->hdev;
845 bool allow_rpa = hdev->suspended;
847 if (use_ll_privacy(hdev) &&
848 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
857 list_for_each_entry(b, &hdev->le_accept_list, list) {
858 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
861 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
875 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
876 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
893 list_for_each_entry(params, &hdev->pend_le_conns, action) {
902 list_for_each_entry(params, &hdev->pend_le_reports, action) {
916 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
917 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
924 static bool scan_use_rpa(struct hci_dev *hdev)
926 return hci_dev_test_flag(hdev, HCI_PRIVACY);
933 struct hci_dev *hdev = req->hdev;
935 if (hdev->scanning_paused) {
936 bt_dev_dbg(hdev, "Scanning is paused for suspend");
940 if (use_ll_privacy(hdev) &&
941 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
951 if (use_ext_scan(hdev)) {
967 if (scan_1m(hdev) || scan_2m(hdev)) {
979 if (scan_coded(hdev)) {
1022 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1024 struct hci_conn_hash *h = &hdev->conn_hash;
1048 struct hci_dev *hdev = req->hdev;
1055 if (hdev->scanning_paused) {
1056 bt_dev_dbg(hdev, "Scanning is paused for suspend");
1066 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1070 if (__hci_update_interleaved_scan(hdev))
1073 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
1089 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1090 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1093 if (hdev->suspended) {
1094 window = hdev->le_scan_window_suspend;
1095 interval = hdev->le_scan_int_suspend;
1096 } else if (hci_is_le_conn_scanning(hdev)) {
1097 window = hdev->le_scan_window_connect;
1098 interval = hdev->le_scan_int_connect;
1099 } else if (hci_is_adv_monitoring(hdev)) {
1100 window = hdev->le_scan_window_adv_monitor;
1101 interval = hdev->le_scan_int_adv_monitor;
1103 window = hdev->le_scan_window;
1104 interval = hdev->le_scan_interval;
1107 bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
1113 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
1121 adv_instance = hci_find_adv_instance(hdev, instance);
1150 struct hci_dev *hdev = req->hdev;
1156 list_for_each_entry(b, &hdev->accept_list, list) {
1167 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1178 if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
1185 set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
1188 static void cancel_adv_timeout(struct hci_dev *hdev)
1190 if (hdev->adv_instance_timeout) {
1191 hdev->adv_instance_timeout = 0;
1192 cancel_delayed_work(&hdev->adv_instance_expire);
1196 /* This function requires the caller holds hdev->lock */
1199 bt_dev_dbg(req->hdev, "Suspending advertising instances");
1207 if (!ext_adv_capable(req->hdev))
1208 cancel_adv_timeout(req->hdev);
1211 /* This function requires the caller holds hdev->lock */
1216 bt_dev_dbg(req->hdev, "Resuming advertising instances");
1218 if (ext_adv_capable(req->hdev)) {
1220 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1230 req->hdev->cur_adv_instance,
1235 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1237 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1239 if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1240 test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1241 wake_up(&hdev->suspend_wait_q);
1246 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1254 if (next == hdev->suspend_state) {
1255 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1259 hdev->suspend_state = next;
1260 hci_req_init(&req, hdev);
1264 hdev->suspended = true;
1267 old_state = hdev->discovery.state;
1269 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1270 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1271 queue_work(hdev->req_workqueue, &hdev->discov_update);
1274 hdev->discovery_paused = true;
1275 hdev->discovery_old_state = old_state;
1278 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1280 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1281 cancel_delayed_work(&hdev->discov_off);
1282 queue_delayed_work(hdev->req_workqueue,
1283 &hdev->discov_off, 0);
1287 if (hdev->adv_instance_cnt)
1290 hdev->advertising_paused = true;
1291 hdev->advertising_old_state = old_state;
1297 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1301 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1304 hdev->scanning_paused = true;
1311 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1317 bt_dev_dbg(hdev,
1320 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1324 hdev->scanning_paused = false;
1330 hdev->scanning_paused = true;
1333 hdev->suspended = false;
1334 hdev->scanning_paused = false;
1341 hdev->advertising_paused = false;
1342 if (hdev->advertising_old_state) {
1344 hdev->suspend_tasks);
1345 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1346 queue_work(hdev->req_workqueue,
1347 &hdev->discoverable_update);
1348 hdev->advertising_old_state = 0;
1352 if (hdev->adv_instance_cnt)
1356 hdev->discovery_paused = false;
1357 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1358 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1359 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1360 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1361 queue_work(hdev->req_workqueue, &hdev->discov_update);
1367 hdev->suspend_state = next;
1370 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1371 wake_up(&hdev->suspend_wait_q);
1374 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
1376 u8 instance = hdev->cur_adv_instance;
1383 adv_instance = hci_find_adv_instance(hdev, instance);
1395 if (ext_adv_capable(req->hdev)) {
1405 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1419 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1422 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1424 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1430 adv_instance = hci_find_adv_instance(hdev, instance);
1439 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1442 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1446 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1453 hci_dev_test_flag(hdev, HCI_BONDABLE))
1462 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1465 if (hci_conn_num(hdev, LE_LINK) == 0)
1469 if (hdev->conn_hash.le_num_slave > 0) {
1471 if (!connectable && !(hdev->le_states[2] & 0x10))
1477 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1478 !(hdev->le_states[2] & 0x20)))
1483 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1485 if (!connectable && !(hdev->le_states[2] & 0x02))
1491 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1492 !(hdev->le_states[2] & 0x08)))
1501 struct hci_dev *hdev = req->hdev;
1508 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1514 mgmt_get_connectable(hdev);
1516 if (!is_advertising_allowed(hdev, connectable))
1519 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1527 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1534 adv_use_rpa(hdev, flags),
1543 adv_min_interval = hdev->le_adv_min_interval;
1544 adv_max_interval = hdev->le_adv_max_interval;
1546 if (get_cur_adv_instance_scan_rsp_len(hdev))
1551 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1552 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1556 adv_min_interval = hdev->le_adv_min_interval;
1557 adv_max_interval = hdev->le_adv_max_interval;
1564 cp.channel_map = hdev->le_adv_channel_map;
1571 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1581 complete_len = strlen(hdev->dev_name);
1584 hdev->dev_name, complete_len + 1);
1587 short_len = strlen(hdev->short_name);
1590 hdev->short_name, short_len + 1);
1598 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1608 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1610 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1613 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1617 if (hdev->appearance) {
1618 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1621 return append_local_name(hdev, ptr, scan_rsp_len);
1624 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1631 adv_instance = hci_find_adv_instance(hdev, instance);
1637 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1638 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1647 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1654 struct hci_dev *hdev = req->hdev;
1657 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1660 if (ext_adv_capable(hdev)) {
1669 len = create_instance_scan_rsp_data(hdev, instance,
1672 len = create_default_scan_rsp_data(hdev, pdu.data);
1674 if (hdev->scan_rsp_data_len == len &&
1675 !memcmp(pdu.data, hdev->scan_rsp_data, len))
1678 memcpy(hdev->scan_rsp_data, pdu.data, len);
1679 hdev->scan_rsp_data_len = len;
1694 len = create_instance_scan_rsp_data(hdev, instance,
1697 len = create_default_scan_rsp_data(hdev, cp.data);
1699 if (hdev->scan_rsp_data_len == len &&
1700 !memcmp(cp.data, hdev->scan_rsp_data, len))
1703 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1704 hdev->scan_rsp_data_len = len;
1712 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1720 adv_instance = hci_find_adv_instance(hdev, instance);
1725 instance_flags = get_adv_instance_flags(hdev, instance);
1744 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1752 flags |= mgmt_get_adv_discov_flags(hdev);
1778 if (ext_adv_capable(hdev)) {
1782 adv_tx_power = hdev->adv_tx_power;
1784 adv_tx_power = hdev->adv_tx_power;
1803 struct hci_dev *hdev = req->hdev;
1806 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1809 if (ext_adv_capable(hdev)) {
1817 len = create_instance_adv_data(hdev, instance, pdu.data);
1820 if (hdev->adv_data_len == len &&
1821 memcmp(pdu.data, hdev->adv_data, len) == 0)
1824 memcpy(hdev->adv_data, pdu.data, len);
1825 hdev->adv_data_len = len;
1839 len = create_instance_adv_data(hdev, instance, cp.data);
1842 if (hdev->adv_data_len == len &&
1843 memcmp(cp.data, hdev->adv_data, len) == 0)
1846 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1847 hdev->adv_data_len = len;
1855 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1859 hci_req_init(&req, hdev);
1865 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1868 BT_DBG("%s status %u", hdev->name, status);
1871 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1876 if (!use_ll_privacy(hdev) &&
1877 !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1880 hci_req_init(&req, hdev);
1887 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1889 BT_DBG("%s status %u", hdev->name, status);
1892 void hci_req_reenable_advertising(struct hci_dev *hdev)
1896 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1897 list_empty(&hdev->adv_instances))
1900 hci_req_init(&req, hdev);
1902 if (hdev->cur_adv_instance) {
1903 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1906 if (ext_adv_capable(hdev)) {
1920 struct hci_dev *hdev = container_of(work, struct hci_dev,
1926 BT_DBG("%s", hdev->name);
1928 hci_dev_lock(hdev);
1930 hdev->adv_instance_timeout = 0;
1932 instance = hdev->cur_adv_instance;
1936 hci_req_init(&req, hdev);
1938 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1940 if (list_empty(&hdev->adv_instances))
1946 hci_dev_unlock(hdev);
1952 struct hci_dev *hdev = req->hdev;
1955 hci_dev_lock(hdev);
1957 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1961 switch (hdev->interleave_scan_state) {
1963 bt_dev_dbg(hdev, "next state: allowlist");
1964 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1967 bt_dev_dbg(hdev, "next state: no filter");
1968 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1975 hci_dev_unlock(hdev);
1982 struct hci_dev *hdev = container_of(work, struct hci_dev,
1987 if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
1988 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
1989 } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
1990 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
1992 bt_dev_err(hdev, "unexpected error");
1996 hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
2000 if (is_interleave_scanning(hdev))
2001 queue_delayed_work(hdev->req_workqueue,
2002 &hdev->interleave_scan, timeout);
2005 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
2022 if (use_ll_privacy(hdev))
2029 !bacmp(&adv_instance->random_addr, &hdev->rpa))
2034 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2035 !bacmp(&hdev->random_addr, &hdev->rpa))
2039 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2041 bt_dev_err(hdev, "failed to generate new RPA");
2045 bacpy(rand_addr, &hdev->rpa);
2047 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2049 queue_delayed_work(hdev->workqueue,
2052 queue_delayed_work(hdev->workqueue,
2053 &hdev->rpa_expired, to);
2076 if (bacmp(&hdev->bdaddr, &nrpa))
2100 struct hci_dev *hdev = req->hdev;
2110 adv_instance = hci_find_adv_instance(hdev, instance);
2117 flags = get_adv_instance_flags(hdev, instance);
2123 mgmt_get_connectable(hdev);
2125 if (!is_advertising_allowed(hdev, connectable))
2132 err = hci_get_random_address(hdev, !connectable,
2133 adv_use_rpa(hdev, flags), adv_instance,
2141 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2142 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2151 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
2164 cp.channel_map = hdev->le_adv_channel_map;
2191 if (!bacmp(&random_addr, &hdev->random_addr))
2210 struct hci_dev *hdev = req->hdev;
2217 adv_instance = hci_find_adv_instance(hdev, instance);
2255 struct hci_dev *hdev = req->hdev;
2262 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2284 struct hci_dev *hdev = req->hdev;
2287 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2297 struct hci_dev *hdev = req->hdev;
2298 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2320 struct hci_dev *hdev = req->hdev;
2324 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2325 list_empty(&hdev->adv_instances))
2328 if (hdev->adv_instance_timeout)
2331 adv_instance = hci_find_adv_instance(hdev, instance);
2357 if (!ext_adv_capable(hdev)) {
2358 hdev->adv_instance_timeout = timeout;
2359 queue_delayed_work(hdev->req_workqueue,
2360 &hdev->adv_instance_expire,
2368 if (!force && hdev->cur_adv_instance == instance &&
2369 hci_dev_test_flag(hdev, HCI_LE_ADV))
2372 hdev->cur_adv_instance = instance;
2373 if (ext_adv_capable(hdev)) {
2395 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2404 if (!instance || hdev->cur_adv_instance == instance)
2405 cancel_adv_timeout(hdev);
2411 if (instance && hdev->cur_adv_instance == instance)
2412 next_instance = hci_get_next_instance(hdev, instance);
2415 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2421 err = hci_remove_adv_instance(hdev, rem_inst);
2423 mgmt_advertising_removed(sk, hdev, rem_inst);
2426 adv_instance = hci_find_adv_instance(hdev, instance);
2435 err = hci_remove_adv_instance(hdev, instance);
2437 mgmt_advertising_removed(sk, hdev, instance);
2441 if (!req || !hdev_is_powered(hdev) ||
2442 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2445 if (next_instance && !ext_adv_capable(hdev))
2452 struct hci_dev *hdev = req->hdev;
2464 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2465 hci_lookup_le_connect(hdev)) {
2467 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2477 struct hci_dev *hdev = req->hdev;
2490 if (use_ll_privacy(hdev))
2495 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2496 !bacmp(&hdev->random_addr, &hdev->rpa))
2499 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2501 bt_dev_err(hdev, "failed to generate new RPA");
2505 set_random_addr(req, &hdev->rpa);
2507 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2508 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2531 if (bacmp(&hdev->bdaddr, &nrpa))
2549 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2550 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2551 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2552 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2554 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2556 &hdev->static_addr);
2568 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
2572 list_for_each_entry(b, &hdev->accept_list, list) {
2575 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2588 struct hci_dev *hdev = req->hdev;
2591 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2594 if (!hdev_is_powered(hdev))
2597 if (mgmt_powering_down(hdev))
2600 if (hdev->scanning_paused)
2603 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2604 disconnected_accept_list_entries(hdev))
2609 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2612 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2613 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2621 hci_dev_lock(req->hdev);
2623 hci_dev_unlock(req->hdev);
2629 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2631 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2636 struct hci_dev *hdev = req->hdev;
2638 hci_dev_lock(hdev);
2646 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2647 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2650 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2651 !list_empty(&hdev->adv_instances)) {
2652 if (ext_adv_capable(hdev))
2653 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2660 hci_dev_unlock(hdev);
2667 struct hci_dev *hdev = container_of(work, struct hci_dev,
2671 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2672 mgmt_set_connectable_complete(hdev, status);
2675 static u8 get_service_classes(struct hci_dev *hdev)
2680 list_for_each_entry(uuid, &hdev->uuids, list)
2688 struct hci_dev *hdev = req->hdev;
2691 BT_DBG("%s", hdev->name);
2693 if (!hdev_is_powered(hdev))
2696 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2699 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2702 cod[0] = hdev->minor_class;
2703 cod[1] = hdev->major_class;
2704 cod[2] = get_service_classes(hdev);
2706 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2709 if (memcmp(cod, hdev->dev_class, 3) == 0)
2717 struct hci_dev *hdev = req->hdev;
2720 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2723 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2725 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2746 struct hci_dev *hdev = req->hdev;
2748 hci_dev_lock(hdev);
2750 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2759 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2765 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2766 if (ext_adv_capable(hdev))
2773 hci_dev_unlock(hdev);
2780 struct hci_dev *hdev = container_of(work, struct hci_dev,
2784 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2785 mgmt_set_discoverable_complete(hdev, status);
2819 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2857 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2868 hci_req_init(&req, conn->hdev);
2874 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2883 hci_dev_lock(req->hdev);
2885 hci_dev_unlock(req->hdev);
2891 struct hci_dev *hdev = container_of(work, struct hci_dev,
2897 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2901 hci_dev_lock(hdev);
2903 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2907 hci_dev_unlock(hdev);
2923 BT_DBG("%s", req->hdev->name);
2925 hci_dev_lock(req->hdev);
2926 hci_inquiry_cache_flush(req->hdev);
2927 hci_dev_unlock(req->hdev);
2931 if (req->hdev->discovery.limited)
2945 struct hci_dev *hdev = container_of(work, struct hci_dev,
2949 BT_DBG("%s", hdev->name);
2951 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2954 cancel_delayed_work(&hdev->le_scan_restart);
2956 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2958 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2963 hdev->discovery.scan_start = 0;
2973 if (hdev->discovery.type == DISCOV_TYPE_LE)
2976 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2979 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2980 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2981 hdev->discovery.state != DISCOVERY_RESOLVING)
2987 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2990 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2997 hci_dev_lock(hdev);
2998 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2999 hci_dev_unlock(hdev);
3004 struct hci_dev *hdev = req->hdev;
3007 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3010 if (hdev->scanning_paused) {
3011 bt_dev_dbg(hdev, "Scanning is paused for suspend");
3017 if (use_ext_scan(hdev)) {
3040 struct hci_dev *hdev = container_of(work, struct hci_dev,
3045 BT_DBG("%s", hdev->name);
3047 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
3049 bt_dev_err(hdev, "failed to restart LE scan: status %d",
3054 hci_dev_lock(hdev);
3056 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3057 !hdev->discovery.scan_start)
3060 /* When the scan was started, hdev->le_scan_disable has been queued
3065 duration = hdev->discovery.scan_duration;
3066 scan_start = hdev->discovery.scan_start;
3081 queue_delayed_work(hdev->req_workqueue,
3082 &hdev->le_scan_disable, timeout);
3085 hci_dev_unlock(hdev);
3091 struct hci_dev *hdev = req->hdev;
3099 BT_DBG("%s", hdev->name);
3105 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
3112 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
3118 hdev->le_scan_window_discovery, own_addr_type,
3127 BT_DBG("%s", req->hdev->name);
3136 static void start_discovery(struct hci_dev *hdev, u8 *status)
3140 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
3142 switch (hdev->discovery.type) {
3144 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
3145 hci_req_sync(hdev, bredr_inquiry,
3159 &hdev->quirks)) {
3165 hci_req_sync(hdev, interleaved_discov,
3166 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
3171 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3172 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3177 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3188 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
3195 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3196 hdev->discovery.result_filtering) {
3197 hdev->discovery.scan_start = jiffies;
3198 hdev->discovery.scan_duration = timeout;
3201 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3207 struct hci_dev *hdev = req->hdev;
3208 struct discovery_state *d = &hdev->discovery;
3213 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
3216 if (test_bit(HCI_INQUIRY, &hdev->flags))
3219 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3220 cancel_delayed_work(&hdev->le_scan_disable);
3227 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3238 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3254 hci_dev_lock(req->hdev);
3256 hci_dev_unlock(req->hdev);
3263 struct hci_dev *hdev = container_of(work, struct hci_dev,
3267 switch (hdev->discovery.state) {
3269 start_discovery(hdev, &status);
3270 mgmt_start_discovery_complete(hdev, status);
3272 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3274 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3277 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3278 mgmt_stop_discovery_complete(hdev, status);
3280 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3290 struct hci_dev *hdev = container_of(work, struct hci_dev,
3293 BT_DBG("%s", hdev->name);
3295 hci_dev_lock(hdev);
3302 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3303 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3304 hdev->discov_timeout = 0;
3306 hci_dev_unlock(hdev);
3308 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3309 mgmt_new_settings(hdev);
3314 struct hci_dev *hdev = req->hdev;
3317 hci_dev_lock(hdev);
3319 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3320 !lmp_host_ssp_capable(hdev)) {
3325 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3333 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3334 lmp_bredr_capable(hdev)) {
3343 if (cp.le != lmp_host_le_capable(hdev) ||
3344 cp.simul != lmp_host_le_br_capable(hdev))
3349 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3354 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3355 list_empty(&hdev->adv_instances)) {
3358 if (ext_adv_capable(hdev)) {
3370 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3371 if (!ext_adv_capable(hdev))
3377 } else if (!list_empty(&hdev->adv_instances)) {
3380 adv_instance = list_first_entry(&hdev->adv_instances,
3388 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3389 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3393 if (lmp_bredr_capable(hdev)) {
3394 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3404 hci_dev_unlock(hdev);
3408 int __hci_req_hci_power_on(struct hci_dev *hdev)
3415 smp_register(hdev);
3417 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3421 void hci_request_setup(struct hci_dev *hdev)
3423 INIT_WORK(&hdev->discov_update, discov_update);
3424 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3425 INIT_WORK(&hdev->scan_update, scan_update_work);
3426 INIT_WORK(&hdev->connectable_update, connectable_update_work);
3427 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3428 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3429 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3430 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3431 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3432 INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
3435 void hci_request_cancel_all(struct hci_dev *hdev)
3437 hci_req_sync_cancel(hdev, ENODEV);
3439 cancel_work_sync(&hdev->discov_update);
3440 cancel_work_sync(&hdev->bg_scan_update);
3441 cancel_work_sync(&hdev->scan_update);
3442 cancel_work_sync(&hdev->connectable_update);
3443 cancel_work_sync(&hdev->discoverable_update);
3444 cancel_delayed_work_sync(&hdev->discov_off);
3445 cancel_delayed_work_sync(&hdev->le_scan_disable);
3446 cancel_delayed_work_sync(&hdev->le_scan_restart);
3448 if (hdev->adv_instance_timeout) {
3449 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3450 hdev->adv_instance_timeout = 0;
3453 cancel_interleave_scan(hdev);