1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28 #include <linux/crypto.h>
29 #include <crypto/algapi.h>
30
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/mgmt.h>
34
35 #include "hci_request.h"
36 #include "hci_debugfs.h"
37 #include "a2mp.h"
38 #include "amp.h"
39 #include "smp.h"
40 #include "msft.h"
41
42 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
43 "\x00\x00\x00\x00\x00\x00\x00\x00"
44
45 /* Handle HCI Event packets */
46
hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb, u8 *new_status)47 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
48 u8 *new_status)
49 {
50 __u8 status = *((__u8 *) skb->data);
51
52 BT_DBG("%s status 0x%2.2x", hdev->name, status);
53
54 /* It is possible that we receive Inquiry Complete event right
55 * before we receive Inquiry Cancel Command Complete event, in
56 * which case the latter event should have status of Command
57 * Disallowed (0x0c). This should not be treated as error, since
58 * we actually achieve what Inquiry Cancel wants to achieve,
59 * which is to end the last Inquiry session.
60 */
61 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
62 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
63 status = 0x00;
64 }
65
66 *new_status = status;
67
68 if (status)
69 return;
70
71 clear_bit(HCI_INQUIRY, &hdev->flags);
72 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
73 wake_up_bit(&hdev->flags, HCI_INQUIRY);
74
75 hci_dev_lock(hdev);
76 /* Set discovery state to stopped if we're not doing LE active
77 * scanning.
78 */
79 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
80 hdev->le_scan_type != LE_SCAN_ACTIVE)
81 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
82 hci_dev_unlock(hdev);
83
84 hci_conn_check_pending(hdev);
85 }
86
hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)87 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
88 {
89 __u8 status = *((__u8 *) skb->data);
90
91 BT_DBG("%s status 0x%2.2x", hdev->name, status);
92
93 if (status)
94 return;
95
96 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
97 }
98
hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)99 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
100 {
101 __u8 status = *((__u8 *) skb->data);
102
103 BT_DBG("%s status 0x%2.2x", hdev->name, status);
104
105 if (status)
106 return;
107
108 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
109
110 hci_conn_check_pending(hdev);
111 }
112
hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)113 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
114 struct sk_buff *skb)
115 {
116 BT_DBG("%s", hdev->name);
117 }
118
hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)119 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
120 {
121 struct hci_rp_role_discovery *rp = (void *) skb->data;
122 struct hci_conn *conn;
123
124 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
125
126 if (rp->status)
127 return;
128
129 hci_dev_lock(hdev);
130
131 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
132 if (conn)
133 conn->role = rp->role;
134
135 hci_dev_unlock(hdev);
136 }
137
hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)138 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
139 {
140 struct hci_rp_read_link_policy *rp = (void *) skb->data;
141 struct hci_conn *conn;
142
143 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
144
145 if (rp->status)
146 return;
147
148 hci_dev_lock(hdev);
149
150 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
151 if (conn)
152 conn->link_policy = __le16_to_cpu(rp->policy);
153
154 hci_dev_unlock(hdev);
155 }
156
hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)157 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
158 {
159 struct hci_rp_write_link_policy *rp = (void *) skb->data;
160 struct hci_conn *conn;
161 void *sent;
162
163 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
164
165 if (rp->status)
166 return;
167
168 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
169 if (!sent)
170 return;
171
172 hci_dev_lock(hdev);
173
174 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
175 if (conn)
176 conn->link_policy = get_unaligned_le16(sent + 2);
177
178 hci_dev_unlock(hdev);
179 }
180
hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)181 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
182 struct sk_buff *skb)
183 {
184 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
185
186 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
187
188 if (rp->status)
189 return;
190
191 hdev->link_policy = __le16_to_cpu(rp->policy);
192 }
193
hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)194 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
195 struct sk_buff *skb)
196 {
197 __u8 status = *((__u8 *) skb->data);
198 void *sent;
199
200 BT_DBG("%s status 0x%2.2x", hdev->name, status);
201
202 if (status)
203 return;
204
205 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
206 if (!sent)
207 return;
208
209 hdev->link_policy = get_unaligned_le16(sent);
210 }
211
hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)212 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
213 {
214 __u8 status = *((__u8 *) skb->data);
215
216 BT_DBG("%s status 0x%2.2x", hdev->name, status);
217
218 clear_bit(HCI_RESET, &hdev->flags);
219
220 if (status)
221 return;
222
223 /* Reset all non-persistent flags */
224 hci_dev_clear_volatile_flags(hdev);
225
226 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
227
228 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
229 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
230
231 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
232 hdev->adv_data_len = 0;
233
234 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
235 hdev->scan_rsp_data_len = 0;
236
237 hdev->le_scan_type = LE_SCAN_PASSIVE;
238
239 hdev->ssp_debug_mode = 0;
240
241 hci_bdaddr_list_clear(&hdev->le_accept_list);
242 hci_bdaddr_list_clear(&hdev->le_resolv_list);
243 }
244
hci_cc_read_stored_link_key(struct hci_dev *hdev, struct sk_buff *skb)245 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
246 struct sk_buff *skb)
247 {
248 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
249 struct hci_cp_read_stored_link_key *sent;
250
251 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
252
253 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
254 if (!sent)
255 return;
256
257 if (!rp->status && sent->read_all == 0x01) {
258 hdev->stored_max_keys = rp->max_keys;
259 hdev->stored_num_keys = rp->num_keys;
260 }
261 }
262
hci_cc_delete_stored_link_key(struct hci_dev *hdev, struct sk_buff *skb)263 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
264 struct sk_buff *skb)
265 {
266 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
267
268 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
269
270 if (rp->status)
271 return;
272
273 if (rp->num_keys <= hdev->stored_num_keys)
274 hdev->stored_num_keys -= rp->num_keys;
275 else
276 hdev->stored_num_keys = 0;
277 }
278
hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)279 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
280 {
281 __u8 status = *((__u8 *) skb->data);
282 void *sent;
283
284 BT_DBG("%s status 0x%2.2x", hdev->name, status);
285
286 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
287 if (!sent)
288 return;
289
290 hci_dev_lock(hdev);
291
292 if (hci_dev_test_flag(hdev, HCI_MGMT))
293 mgmt_set_local_name_complete(hdev, sent, status);
294 else if (!status)
295 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
296
297 hci_dev_unlock(hdev);
298 }
299
hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)300 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
301 {
302 struct hci_rp_read_local_name *rp = (void *) skb->data;
303
304 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
305
306 if (rp->status)
307 return;
308
309 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
310 hci_dev_test_flag(hdev, HCI_CONFIG))
311 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
312 }
313
hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)314 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
315 {
316 __u8 status = *((__u8 *) skb->data);
317 void *sent;
318
319 BT_DBG("%s status 0x%2.2x", hdev->name, status);
320
321 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
322 if (!sent)
323 return;
324
325 hci_dev_lock(hdev);
326
327 if (!status) {
328 __u8 param = *((__u8 *) sent);
329
330 if (param == AUTH_ENABLED)
331 set_bit(HCI_AUTH, &hdev->flags);
332 else
333 clear_bit(HCI_AUTH, &hdev->flags);
334 }
335
336 if (hci_dev_test_flag(hdev, HCI_MGMT))
337 mgmt_auth_enable_complete(hdev, status);
338
339 hci_dev_unlock(hdev);
340 }
341
hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)342 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
343 {
344 __u8 status = *((__u8 *) skb->data);
345 __u8 param;
346 void *sent;
347
348 BT_DBG("%s status 0x%2.2x", hdev->name, status);
349
350 if (status)
351 return;
352
353 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
354 if (!sent)
355 return;
356
357 param = *((__u8 *) sent);
358
359 if (param)
360 set_bit(HCI_ENCRYPT, &hdev->flags);
361 else
362 clear_bit(HCI_ENCRYPT, &hdev->flags);
363 }
364
hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)365 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
366 {
367 __u8 status = *((__u8 *) skb->data);
368 __u8 param;
369 void *sent;
370
371 BT_DBG("%s status 0x%2.2x", hdev->name, status);
372
373 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
374 if (!sent)
375 return;
376
377 param = *((__u8 *) sent);
378
379 hci_dev_lock(hdev);
380
381 if (status) {
382 hdev->discov_timeout = 0;
383 goto done;
384 }
385
386 if (param & SCAN_INQUIRY)
387 set_bit(HCI_ISCAN, &hdev->flags);
388 else
389 clear_bit(HCI_ISCAN, &hdev->flags);
390
391 if (param & SCAN_PAGE)
392 set_bit(HCI_PSCAN, &hdev->flags);
393 else
394 clear_bit(HCI_PSCAN, &hdev->flags);
395
396 done:
397 hci_dev_unlock(hdev);
398 }
399
hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)400 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
401 {
402 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
403
404 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
405
406 if (rp->status)
407 return;
408
409 memcpy(hdev->dev_class, rp->dev_class, 3);
410
411 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
412 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
413 }
414
hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)415 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
416 {
417 __u8 status = *((__u8 *) skb->data);
418 void *sent;
419
420 BT_DBG("%s status 0x%2.2x", hdev->name, status);
421
422 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
423 if (!sent)
424 return;
425
426 hci_dev_lock(hdev);
427
428 if (status == 0)
429 memcpy(hdev->dev_class, sent, 3);
430
431 if (hci_dev_test_flag(hdev, HCI_MGMT))
432 mgmt_set_class_of_dev_complete(hdev, sent, status);
433
434 hci_dev_unlock(hdev);
435 }
436
hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)437 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
438 {
439 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
440 __u16 setting;
441
442 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
443
444 if (rp->status)
445 return;
446
447 setting = __le16_to_cpu(rp->voice_setting);
448
449 if (hdev->voice_setting == setting)
450 return;
451
452 hdev->voice_setting = setting;
453
454 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
455
456 if (hdev->notify)
457 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
458 }
459
hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)460 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
461 struct sk_buff *skb)
462 {
463 __u8 status = *((__u8 *) skb->data);
464 __u16 setting;
465 void *sent;
466
467 BT_DBG("%s status 0x%2.2x", hdev->name, status);
468
469 if (status)
470 return;
471
472 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
473 if (!sent)
474 return;
475
476 setting = get_unaligned_le16(sent);
477
478 if (hdev->voice_setting == setting)
479 return;
480
481 hdev->voice_setting = setting;
482
483 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
484
485 if (hdev->notify)
486 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
487 }
488
hci_cc_read_num_supported_iac(struct hci_dev *hdev, struct sk_buff *skb)489 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
490 struct sk_buff *skb)
491 {
492 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
493
494 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
495
496 if (rp->status)
497 return;
498
499 hdev->num_iac = rp->num_iac;
500
501 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
502 }
503
hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)504 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
505 {
506 __u8 status = *((__u8 *) skb->data);
507 struct hci_cp_write_ssp_mode *sent;
508
509 BT_DBG("%s status 0x%2.2x", hdev->name, status);
510
511 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
512 if (!sent)
513 return;
514
515 hci_dev_lock(hdev);
516
517 if (!status) {
518 if (sent->mode)
519 hdev->features[1][0] |= LMP_HOST_SSP;
520 else
521 hdev->features[1][0] &= ~LMP_HOST_SSP;
522 }
523
524 if (hci_dev_test_flag(hdev, HCI_MGMT))
525 mgmt_ssp_enable_complete(hdev, sent->mode, status);
526 else if (!status) {
527 if (sent->mode)
528 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
529 else
530 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
531 }
532
533 hci_dev_unlock(hdev);
534 }
535
hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)536 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
537 {
538 u8 status = *((u8 *) skb->data);
539 struct hci_cp_write_sc_support *sent;
540
541 BT_DBG("%s status 0x%2.2x", hdev->name, status);
542
543 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
544 if (!sent)
545 return;
546
547 hci_dev_lock(hdev);
548
549 if (!status) {
550 if (sent->support)
551 hdev->features[1][0] |= LMP_HOST_SC;
552 else
553 hdev->features[1][0] &= ~LMP_HOST_SC;
554 }
555
556 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
557 if (sent->support)
558 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
559 else
560 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
561 }
562
563 hci_dev_unlock(hdev);
564 }
565
hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)566 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
567 {
568 struct hci_rp_read_local_version *rp = (void *) skb->data;
569
570 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
571
572 if (rp->status)
573 return;
574
575 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
576 hci_dev_test_flag(hdev, HCI_CONFIG)) {
577 hdev->hci_ver = rp->hci_ver;
578 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
579 hdev->lmp_ver = rp->lmp_ver;
580 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
581 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
582 }
583 }
584
hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)585 static void hci_cc_read_local_commands(struct hci_dev *hdev,
586 struct sk_buff *skb)
587 {
588 struct hci_rp_read_local_commands *rp = (void *) skb->data;
589
590 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
591
592 if (rp->status)
593 return;
594
595 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
596 hci_dev_test_flag(hdev, HCI_CONFIG))
597 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
598 }
599
hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, struct sk_buff *skb)600 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
601 struct sk_buff *skb)
602 {
603 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
604 struct hci_conn *conn;
605
606 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
607
608 if (rp->status)
609 return;
610
611 hci_dev_lock(hdev);
612
613 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
614 if (conn)
615 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
616
617 hci_dev_unlock(hdev);
618 }
619
hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, struct sk_buff *skb)620 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
621 struct sk_buff *skb)
622 {
623 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
624 struct hci_conn *conn;
625 void *sent;
626
627 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
628
629 if (rp->status)
630 return;
631
632 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
633 if (!sent)
634 return;
635
636 hci_dev_lock(hdev);
637
638 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
639 if (conn)
640 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
641
642 hci_dev_unlock(hdev);
643 }
644
hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)645 static void hci_cc_read_local_features(struct hci_dev *hdev,
646 struct sk_buff *skb)
647 {
648 struct hci_rp_read_local_features *rp = (void *) skb->data;
649
650 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
651
652 if (rp->status)
653 return;
654
655 memcpy(hdev->features, rp->features, 8);
656
657 /* Adjust default settings according to features
658 * supported by device. */
659
660 if (hdev->features[0][0] & LMP_3SLOT)
661 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
662
663 if (hdev->features[0][0] & LMP_5SLOT)
664 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
665
666 if (hdev->features[0][1] & LMP_HV2) {
667 hdev->pkt_type |= (HCI_HV2);
668 hdev->esco_type |= (ESCO_HV2);
669 }
670
671 if (hdev->features[0][1] & LMP_HV3) {
672 hdev->pkt_type |= (HCI_HV3);
673 hdev->esco_type |= (ESCO_HV3);
674 }
675
676 if (lmp_esco_capable(hdev))
677 hdev->esco_type |= (ESCO_EV3);
678
679 if (hdev->features[0][4] & LMP_EV4)
680 hdev->esco_type |= (ESCO_EV4);
681
682 if (hdev->features[0][4] & LMP_EV5)
683 hdev->esco_type |= (ESCO_EV5);
684
685 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
686 hdev->esco_type |= (ESCO_2EV3);
687
688 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
689 hdev->esco_type |= (ESCO_3EV3);
690
691 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
692 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
693 }
694
hci_cc_read_local_ext_features(struct hci_dev *hdev, struct sk_buff *skb)695 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
696 struct sk_buff *skb)
697 {
698 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
699
700 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
701
702 if (rp->status)
703 return;
704
705 if (hdev->max_page < rp->max_page)
706 hdev->max_page = rp->max_page;
707
708 if (rp->page < HCI_MAX_PAGES)
709 memcpy(hdev->features[rp->page], rp->features, 8);
710 }
711
hci_cc_read_flow_control_mode(struct hci_dev *hdev, struct sk_buff *skb)712 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
713 struct sk_buff *skb)
714 {
715 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
716
717 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
718
719 if (rp->status)
720 return;
721
722 hdev->flow_ctl_mode = rp->mode;
723 }
724
hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)725 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
726 {
727 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
728
729 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
730
731 if (rp->status)
732 return;
733
734 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
735 hdev->sco_mtu = rp->sco_mtu;
736 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
737 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
738
739 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
740 hdev->sco_mtu = 64;
741 hdev->sco_pkts = 8;
742 }
743
744 hdev->acl_cnt = hdev->acl_pkts;
745 hdev->sco_cnt = hdev->sco_pkts;
746
747 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
748 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
749 }
750
hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)751 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
752 {
753 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
754
755 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
756
757 if (rp->status)
758 return;
759
760 if (test_bit(HCI_INIT, &hdev->flags))
761 bacpy(&hdev->bdaddr, &rp->bdaddr);
762
763 if (hci_dev_test_flag(hdev, HCI_SETUP))
764 bacpy(&hdev->setup_addr, &rp->bdaddr);
765 }
766
hci_cc_read_local_pairing_opts(struct hci_dev *hdev, struct sk_buff *skb)767 static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev,
768 struct sk_buff *skb)
769 {
770 struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data;
771
772 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
773
774 if (rp->status)
775 return;
776
777 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
778 hci_dev_test_flag(hdev, HCI_CONFIG)) {
779 hdev->pairing_opts = rp->pairing_opts;
780 hdev->max_enc_key_size = rp->max_key_size;
781 }
782 }
783
hci_cc_read_page_scan_activity(struct hci_dev *hdev, struct sk_buff *skb)784 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
785 struct sk_buff *skb)
786 {
787 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
788
789 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
790
791 if (rp->status)
792 return;
793
794 if (test_bit(HCI_INIT, &hdev->flags)) {
795 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
796 hdev->page_scan_window = __le16_to_cpu(rp->window);
797 }
798 }
799
hci_cc_write_page_scan_activity(struct hci_dev *hdev, struct sk_buff *skb)800 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
801 struct sk_buff *skb)
802 {
803 u8 status = *((u8 *) skb->data);
804 struct hci_cp_write_page_scan_activity *sent;
805
806 BT_DBG("%s status 0x%2.2x", hdev->name, status);
807
808 if (status)
809 return;
810
811 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
812 if (!sent)
813 return;
814
815 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
816 hdev->page_scan_window = __le16_to_cpu(sent->window);
817 }
818
hci_cc_read_page_scan_type(struct hci_dev *hdev, struct sk_buff *skb)819 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
820 struct sk_buff *skb)
821 {
822 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
823
824 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
825
826 if (rp->status)
827 return;
828
829 if (test_bit(HCI_INIT, &hdev->flags))
830 hdev->page_scan_type = rp->type;
831 }
832
hci_cc_write_page_scan_type(struct hci_dev *hdev, struct sk_buff *skb)833 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
834 struct sk_buff *skb)
835 {
836 u8 status = *((u8 *) skb->data);
837 u8 *type;
838
839 BT_DBG("%s status 0x%2.2x", hdev->name, status);
840
841 if (status)
842 return;
843
844 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
845 if (type)
846 hdev->page_scan_type = *type;
847 }
848
hci_cc_read_data_block_size(struct hci_dev *hdev, struct sk_buff *skb)849 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
850 struct sk_buff *skb)
851 {
852 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
853
854 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
855
856 if (rp->status)
857 return;
858
859 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
860 hdev->block_len = __le16_to_cpu(rp->block_len);
861 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
862
863 hdev->block_cnt = hdev->num_blocks;
864
865 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
866 hdev->block_cnt, hdev->block_len);
867 }
868
hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)869 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
870 {
871 struct hci_rp_read_clock *rp = (void *) skb->data;
872 struct hci_cp_read_clock *cp;
873 struct hci_conn *conn;
874
875 BT_DBG("%s", hdev->name);
876
877 if (skb->len < sizeof(*rp))
878 return;
879
880 if (rp->status)
881 return;
882
883 hci_dev_lock(hdev);
884
885 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
886 if (!cp)
887 goto unlock;
888
889 if (cp->which == 0x00) {
890 hdev->clock = le32_to_cpu(rp->clock);
891 goto unlock;
892 }
893
894 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
895 if (conn) {
896 conn->clock = le32_to_cpu(rp->clock);
897 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
898 }
899
900 unlock:
901 hci_dev_unlock(hdev);
902 }
903
hci_cc_read_local_amp_info(struct hci_dev *hdev, struct sk_buff *skb)904 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
905 struct sk_buff *skb)
906 {
907 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
908
909 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
910
911 if (rp->status)
912 return;
913
914 hdev->amp_status = rp->amp_status;
915 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
916 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
917 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
918 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
919 hdev->amp_type = rp->amp_type;
920 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
921 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
922 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
923 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
924 }
925
hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, struct sk_buff *skb)926 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
927 struct sk_buff *skb)
928 {
929 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
930
931 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
932
933 if (rp->status)
934 return;
935
936 hdev->inq_tx_power = rp->tx_power;
937 }
938
hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, struct sk_buff *skb)939 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
940 struct sk_buff *skb)
941 {
942 struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
943
944 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
945
946 if (rp->status)
947 return;
948
949 hdev->err_data_reporting = rp->err_data_reporting;
950 }
951
hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, struct sk_buff *skb)952 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
953 struct sk_buff *skb)
954 {
955 __u8 status = *((__u8 *)skb->data);
956 struct hci_cp_write_def_err_data_reporting *cp;
957
958 BT_DBG("%s status 0x%2.2x", hdev->name, status);
959
960 if (status)
961 return;
962
963 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
964 if (!cp)
965 return;
966
967 hdev->err_data_reporting = cp->err_data_reporting;
968 }
969
hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)970 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
971 {
972 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
973 struct hci_cp_pin_code_reply *cp;
974 struct hci_conn *conn;
975
976 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
977
978 hci_dev_lock(hdev);
979
980 if (hci_dev_test_flag(hdev, HCI_MGMT))
981 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
982
983 if (rp->status)
984 goto unlock;
985
986 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
987 if (!cp)
988 goto unlock;
989
990 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
991 if (conn)
992 conn->pin_length = cp->pin_len;
993
994 unlock:
995 hci_dev_unlock(hdev);
996 }
997
hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)998 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
999 {
1000 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
1001
1002 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1003
1004 hci_dev_lock(hdev);
1005
1006 if (hci_dev_test_flag(hdev, HCI_MGMT))
1007 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1008 rp->status);
1009
1010 hci_dev_unlock(hdev);
1011 }
1012
hci_cc_le_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)1013 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1014 struct sk_buff *skb)
1015 {
1016 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1017
1018 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1019
1020 if (rp->status)
1021 return;
1022
1023 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1024 hdev->le_pkts = rp->le_max_pkt;
1025
1026 hdev->le_cnt = hdev->le_pkts;
1027
1028 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1029 }
1030
hci_cc_le_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)1031 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1032 struct sk_buff *skb)
1033 {
1034 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1035
1036 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1037
1038 if (rp->status)
1039 return;
1040
1041 memcpy(hdev->le_features, rp->features, 8);
1042 }
1043
hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, struct sk_buff *skb)1044 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1045 struct sk_buff *skb)
1046 {
1047 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1048
1049 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1050
1051 if (rp->status)
1052 return;
1053
1054 hdev->adv_tx_power = rp->tx_power;
1055 }
1056
hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)1057 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1058 {
1059 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1060
1061 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1062
1063 hci_dev_lock(hdev);
1064
1065 if (hci_dev_test_flag(hdev, HCI_MGMT))
1066 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1067 rp->status);
1068
1069 hci_dev_unlock(hdev);
1070 }
1071
hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)1072 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1073 struct sk_buff *skb)
1074 {
1075 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1076
1077 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1078
1079 hci_dev_lock(hdev);
1080
1081 if (hci_dev_test_flag(hdev, HCI_MGMT))
1082 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1083 ACL_LINK, 0, rp->status);
1084
1085 hci_dev_unlock(hdev);
1086 }
1087
hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)1088 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1089 {
1090 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1091
1092 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1093
1094 hci_dev_lock(hdev);
1095
1096 if (hci_dev_test_flag(hdev, HCI_MGMT))
1097 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1098 0, rp->status);
1099
1100 hci_dev_unlock(hdev);
1101 }
1102
hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)1103 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1104 struct sk_buff *skb)
1105 {
1106 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1107
1108 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1109
1110 hci_dev_lock(hdev);
1111
1112 if (hci_dev_test_flag(hdev, HCI_MGMT))
1113 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1114 ACL_LINK, 0, rp->status);
1115
1116 hci_dev_unlock(hdev);
1117 }
1118
hci_cc_read_local_oob_data(struct hci_dev *hdev, struct sk_buff *skb)1119 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1120 struct sk_buff *skb)
1121 {
1122 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1123
1124 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1125 }
1126
hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, struct sk_buff *skb)1127 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1128 struct sk_buff *skb)
1129 {
1130 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1131
1132 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1133 }
1134
hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)1135 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1136 {
1137 __u8 status = *((__u8 *) skb->data);
1138 bdaddr_t *sent;
1139
1140 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1141
1142 if (status)
1143 return;
1144
1145 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1146 if (!sent)
1147 return;
1148
1149 hci_dev_lock(hdev);
1150
1151 bacpy(&hdev->random_addr, sent);
1152
1153 hci_dev_unlock(hdev);
1154 }
1155
hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)1156 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1157 {
1158 __u8 status = *((__u8 *) skb->data);
1159 struct hci_cp_le_set_default_phy *cp;
1160
1161 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1162
1163 if (status)
1164 return;
1165
1166 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1167 if (!cp)
1168 return;
1169
1170 hci_dev_lock(hdev);
1171
1172 hdev->le_tx_def_phys = cp->tx_phys;
1173 hdev->le_rx_def_phys = cp->rx_phys;
1174
1175 hci_dev_unlock(hdev);
1176 }
1177
hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)1178 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1179 struct sk_buff *skb)
1180 {
1181 __u8 status = *((__u8 *) skb->data);
1182 struct hci_cp_le_set_adv_set_rand_addr *cp;
1183 struct adv_info *adv_instance;
1184
1185 if (status)
1186 return;
1187
1188 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1189 if (!cp)
1190 return;
1191
1192 hci_dev_lock(hdev);
1193
1194 if (!hdev->cur_adv_instance) {
1195 /* Store in hdev for instance 0 (Set adv and Directed advs) */
1196 bacpy(&hdev->random_addr, &cp->bdaddr);
1197 } else {
1198 adv_instance = hci_find_adv_instance(hdev,
1199 hdev->cur_adv_instance);
1200 if (adv_instance)
1201 bacpy(&adv_instance->random_addr, &cp->bdaddr);
1202 }
1203
1204 hci_dev_unlock(hdev);
1205 }
1206
hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)1207 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1208 {
1209 __u8 *sent, status = *((__u8 *) skb->data);
1210
1211 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1212
1213 if (status)
1214 return;
1215
1216 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1217 if (!sent)
1218 return;
1219
1220 hci_dev_lock(hdev);
1221
1222 /* If we're doing connection initiation as peripheral. Set a
1223 * timeout in case something goes wrong.
1224 */
1225 if (*sent) {
1226 struct hci_conn *conn;
1227
1228 hci_dev_set_flag(hdev, HCI_LE_ADV);
1229
1230 conn = hci_lookup_le_connect(hdev);
1231 if (conn)
1232 queue_delayed_work(hdev->workqueue,
1233 &conn->le_conn_timeout,
1234 conn->conn_timeout);
1235 } else {
1236 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1237 }
1238
1239 hci_dev_unlock(hdev);
1240 }
1241
hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)1242 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1243 struct sk_buff *skb)
1244 {
1245 struct hci_cp_le_set_ext_adv_enable *cp;
1246 __u8 status = *((__u8 *) skb->data);
1247
1248 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1249
1250 if (status)
1251 return;
1252
1253 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1254 if (!cp)
1255 return;
1256
1257 hci_dev_lock(hdev);
1258
1259 if (cp->enable) {
1260 struct hci_conn *conn;
1261
1262 hci_dev_set_flag(hdev, HCI_LE_ADV);
1263
1264 conn = hci_lookup_le_connect(hdev);
1265 if (conn)
1266 queue_delayed_work(hdev->workqueue,
1267 &conn->le_conn_timeout,
1268 conn->conn_timeout);
1269 } else {
1270 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1271 }
1272
1273 hci_dev_unlock(hdev);
1274 }
1275
hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)1276 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1277 {
1278 struct hci_cp_le_set_scan_param *cp;
1279 __u8 status = *((__u8 *) skb->data);
1280
1281 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1282
1283 if (status)
1284 return;
1285
1286 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1287 if (!cp)
1288 return;
1289
1290 hci_dev_lock(hdev);
1291
1292 hdev->le_scan_type = cp->type;
1293
1294 hci_dev_unlock(hdev);
1295 }
1296
hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, struct sk_buff *skb)1297 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1298 struct sk_buff *skb)
1299 {
1300 struct hci_cp_le_set_ext_scan_params *cp;
1301 __u8 status = *((__u8 *) skb->data);
1302 struct hci_cp_le_scan_phy_params *phy_param;
1303
1304 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1305
1306 if (status)
1307 return;
1308
1309 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1310 if (!cp)
1311 return;
1312
1313 phy_param = (void *)cp->data;
1314
1315 hci_dev_lock(hdev);
1316
1317 hdev->le_scan_type = phy_param->type;
1318
1319 hci_dev_unlock(hdev);
1320 }
1321
has_pending_adv_report(struct hci_dev *hdev)1322 static bool has_pending_adv_report(struct hci_dev *hdev)
1323 {
1324 struct discovery_state *d = &hdev->discovery;
1325
1326 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1327 }
1328
clear_pending_adv_report(struct hci_dev *hdev)1329 static void clear_pending_adv_report(struct hci_dev *hdev)
1330 {
1331 struct discovery_state *d = &hdev->discovery;
1332
1333 bacpy(&d->last_adv_addr, BDADDR_ANY);
1334 d->last_adv_data_len = 0;
1335 }
1336
store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type, s8 rssi, u32 flags, u8 *data, u8 len)1337 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1338 u8 bdaddr_type, s8 rssi, u32 flags,
1339 u8 *data, u8 len)
1340 {
1341 struct discovery_state *d = &hdev->discovery;
1342
1343 if (len > HCI_MAX_AD_LENGTH)
1344 return;
1345
1346 bacpy(&d->last_adv_addr, bdaddr);
1347 d->last_adv_addr_type = bdaddr_type;
1348 d->last_adv_rssi = rssi;
1349 d->last_adv_flags = flags;
1350 memcpy(d->last_adv_data, data, len);
1351 d->last_adv_data_len = len;
1352 }
1353
le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)1354 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1355 {
1356 hci_dev_lock(hdev);
1357
1358 switch (enable) {
1359 case LE_SCAN_ENABLE:
1360 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1361 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1362 clear_pending_adv_report(hdev);
1363 break;
1364
1365 case LE_SCAN_DISABLE:
1366 /* We do this here instead of when setting DISCOVERY_STOPPED
1367 * since the latter would potentially require waiting for
1368 * inquiry to stop too.
1369 */
1370 if (has_pending_adv_report(hdev)) {
1371 struct discovery_state *d = &hdev->discovery;
1372
1373 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1374 d->last_adv_addr_type, NULL,
1375 d->last_adv_rssi, d->last_adv_flags,
1376 d->last_adv_data,
1377 d->last_adv_data_len, NULL, 0);
1378 }
1379
1380 /* Cancel this timer so that we don't try to disable scanning
1381 * when it's already disabled.
1382 */
1383 cancel_delayed_work(&hdev->le_scan_disable);
1384
1385 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1386
1387 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1388 * interrupted scanning due to a connect request. Mark
1389 * therefore discovery as stopped. If this was not
1390 * because of a connect request advertising might have
1391 * been disabled because of active scanning, so
1392 * re-enable it again if necessary.
1393 */
1394 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1395 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1396 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1397 hdev->discovery.state == DISCOVERY_FINDING)
1398 hci_req_reenable_advertising(hdev);
1399
1400 break;
1401
1402 default:
1403 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1404 enable);
1405 break;
1406 }
1407
1408 hci_dev_unlock(hdev);
1409 }
1410
hci_cc_le_set_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)1411 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1412 struct sk_buff *skb)
1413 {
1414 struct hci_cp_le_set_scan_enable *cp;
1415 __u8 status = *((__u8 *) skb->data);
1416
1417 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1418
1419 if (status)
1420 return;
1421
1422 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1423 if (!cp)
1424 return;
1425
1426 le_set_scan_enable_complete(hdev, cp->enable);
1427 }
1428
hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)1429 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1430 struct sk_buff *skb)
1431 {
1432 struct hci_cp_le_set_ext_scan_enable *cp;
1433 __u8 status = *((__u8 *) skb->data);
1434
1435 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1436
1437 if (status)
1438 return;
1439
1440 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1441 if (!cp)
1442 return;
1443
1444 le_set_scan_enable_complete(hdev, cp->enable);
1445 }
1446
hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, struct sk_buff *skb)1447 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1448 struct sk_buff *skb)
1449 {
1450 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1451
1452 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1453 rp->num_of_sets);
1454
1455 if (rp->status)
1456 return;
1457
1458 hdev->le_num_of_adv_sets = rp->num_of_sets;
1459 }
1460
hci_cc_le_read_accept_list_size(struct hci_dev *hdev, struct sk_buff *skb)1461 static void hci_cc_le_read_accept_list_size(struct hci_dev *hdev,
1462 struct sk_buff *skb)
1463 {
1464 struct hci_rp_le_read_accept_list_size *rp = (void *)skb->data;
1465
1466 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1467
1468 if (rp->status)
1469 return;
1470
1471 hdev->le_accept_list_size = rp->size;
1472 }
1473
hci_cc_le_clear_accept_list(struct hci_dev *hdev, struct sk_buff *skb)1474 static void hci_cc_le_clear_accept_list(struct hci_dev *hdev,
1475 struct sk_buff *skb)
1476 {
1477 __u8 status = *((__u8 *) skb->data);
1478
1479 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1480
1481 if (status)
1482 return;
1483
1484 hci_bdaddr_list_clear(&hdev->le_accept_list);
1485 }
1486
hci_cc_le_add_to_accept_list(struct hci_dev *hdev, struct sk_buff *skb)1487 static void hci_cc_le_add_to_accept_list(struct hci_dev *hdev,
1488 struct sk_buff *skb)
1489 {
1490 struct hci_cp_le_add_to_accept_list *sent;
1491 __u8 status = *((__u8 *) skb->data);
1492
1493 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1494
1495 if (status)
1496 return;
1497
1498 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1499 if (!sent)
1500 return;
1501
1502 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1503 sent->bdaddr_type);
1504 }
1505
hci_cc_le_del_from_accept_list(struct hci_dev *hdev, struct sk_buff *skb)1506 static void hci_cc_le_del_from_accept_list(struct hci_dev *hdev,
1507 struct sk_buff *skb)
1508 {
1509 struct hci_cp_le_del_from_accept_list *sent;
1510 __u8 status = *((__u8 *) skb->data);
1511
1512 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1513
1514 if (status)
1515 return;
1516
1517 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1518 if (!sent)
1519 return;
1520
1521 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1522 sent->bdaddr_type);
1523 }
1524
hci_cc_le_read_supported_states(struct hci_dev *hdev, struct sk_buff *skb)1525 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1526 struct sk_buff *skb)
1527 {
1528 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1529
1530 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1531
1532 if (rp->status)
1533 return;
1534
1535 memcpy(hdev->le_states, rp->le_states, 8);
1536 }
1537
hci_cc_le_read_def_data_len(struct hci_dev *hdev, struct sk_buff *skb)1538 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1539 struct sk_buff *skb)
1540 {
1541 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1542
1543 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1544
1545 if (rp->status)
1546 return;
1547
1548 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1549 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1550 }
1551
hci_cc_le_write_def_data_len(struct hci_dev *hdev, struct sk_buff *skb)1552 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1553 struct sk_buff *skb)
1554 {
1555 struct hci_cp_le_write_def_data_len *sent;
1556 __u8 status = *((__u8 *) skb->data);
1557
1558 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1559
1560 if (status)
1561 return;
1562
1563 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1564 if (!sent)
1565 return;
1566
1567 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1568 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1569 }
1570
hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, struct sk_buff *skb)1571 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1572 struct sk_buff *skb)
1573 {
1574 struct hci_cp_le_add_to_resolv_list *sent;
1575 __u8 status = *((__u8 *) skb->data);
1576
1577 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1578
1579 if (status)
1580 return;
1581
1582 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1583 if (!sent)
1584 return;
1585
1586 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1587 sent->bdaddr_type, sent->peer_irk,
1588 sent->local_irk);
1589 }
1590
hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, struct sk_buff *skb)1591 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1592 struct sk_buff *skb)
1593 {
1594 struct hci_cp_le_del_from_resolv_list *sent;
1595 __u8 status = *((__u8 *) skb->data);
1596
1597 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1598
1599 if (status)
1600 return;
1601
1602 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1603 if (!sent)
1604 return;
1605
1606 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1607 sent->bdaddr_type);
1608 }
1609
hci_cc_le_clear_resolv_list(struct hci_dev *hdev, struct sk_buff *skb)1610 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1611 struct sk_buff *skb)
1612 {
1613 __u8 status = *((__u8 *) skb->data);
1614
1615 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1616
1617 if (status)
1618 return;
1619
1620 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1621 }
1622
hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, struct sk_buff *skb)1623 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1624 struct sk_buff *skb)
1625 {
1626 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1627
1628 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1629
1630 if (rp->status)
1631 return;
1632
1633 hdev->le_resolv_list_size = rp->size;
1634 }
1635
hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, struct sk_buff *skb)1636 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1637 struct sk_buff *skb)
1638 {
1639 __u8 *sent, status = *((__u8 *) skb->data);
1640
1641 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1642
1643 if (status)
1644 return;
1645
1646 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1647 if (!sent)
1648 return;
1649
1650 hci_dev_lock(hdev);
1651
1652 if (*sent)
1653 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1654 else
1655 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1656
1657 hci_dev_unlock(hdev);
1658 }
1659
hci_cc_le_read_max_data_len(struct hci_dev *hdev, struct sk_buff *skb)1660 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1661 struct sk_buff *skb)
1662 {
1663 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1664
1665 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1666
1667 if (rp->status)
1668 return;
1669
1670 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1671 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1672 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1673 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1674 }
1675
hci_cc_write_le_host_supported(struct hci_dev *hdev, struct sk_buff *skb)1676 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1677 struct sk_buff *skb)
1678 {
1679 struct hci_cp_write_le_host_supported *sent;
1680 __u8 status = *((__u8 *) skb->data);
1681
1682 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1683
1684 if (status)
1685 return;
1686
1687 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1688 if (!sent)
1689 return;
1690
1691 hci_dev_lock(hdev);
1692
1693 if (sent->le) {
1694 hdev->features[1][0] |= LMP_HOST_LE;
1695 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1696 } else {
1697 hdev->features[1][0] &= ~LMP_HOST_LE;
1698 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1699 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1700 }
1701
1702 if (sent->simul)
1703 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1704 else
1705 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1706
1707 hci_dev_unlock(hdev);
1708 }
1709
hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)1710 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1711 {
1712 struct hci_cp_le_set_adv_param *cp;
1713 u8 status = *((u8 *) skb->data);
1714
1715 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1716
1717 if (status)
1718 return;
1719
1720 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1721 if (!cp)
1722 return;
1723
1724 hci_dev_lock(hdev);
1725 hdev->adv_addr_type = cp->own_address_type;
1726 hci_dev_unlock(hdev);
1727 }
1728
hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)1729 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1730 {
1731 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1732 struct hci_cp_le_set_ext_adv_params *cp;
1733 struct adv_info *adv_instance;
1734
1735 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1736
1737 if (rp->status)
1738 return;
1739
1740 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1741 if (!cp)
1742 return;
1743
1744 hci_dev_lock(hdev);
1745 hdev->adv_addr_type = cp->own_addr_type;
1746 if (!hdev->cur_adv_instance) {
1747 /* Store in hdev for instance 0 */
1748 hdev->adv_tx_power = rp->tx_power;
1749 } else {
1750 adv_instance = hci_find_adv_instance(hdev,
1751 hdev->cur_adv_instance);
1752 if (adv_instance)
1753 adv_instance->tx_power = rp->tx_power;
1754 }
1755 /* Update adv data as tx power is known now */
1756 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1757 hci_dev_unlock(hdev);
1758 }
1759
hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)1760 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1761 {
1762 struct hci_rp_read_rssi *rp = (void *) skb->data;
1763 struct hci_conn *conn;
1764
1765 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1766
1767 if (rp->status)
1768 return;
1769
1770 hci_dev_lock(hdev);
1771
1772 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1773 if (conn)
1774 conn->rssi = rp->rssi;
1775
1776 hci_dev_unlock(hdev);
1777 }
1778
hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)1779 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1780 {
1781 struct hci_cp_read_tx_power *sent;
1782 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1783 struct hci_conn *conn;
1784
1785 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1786
1787 if (rp->status)
1788 return;
1789
1790 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1791 if (!sent)
1792 return;
1793
1794 hci_dev_lock(hdev);
1795
1796 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1797 if (!conn)
1798 goto unlock;
1799
1800 switch (sent->type) {
1801 case 0x00:
1802 conn->tx_power = rp->tx_power;
1803 break;
1804 case 0x01:
1805 conn->max_tx_power = rp->tx_power;
1806 break;
1807 }
1808
1809 unlock:
1810 hci_dev_unlock(hdev);
1811 }
1812
hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)1813 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1814 {
1815 u8 status = *((u8 *) skb->data);
1816 u8 *mode;
1817
1818 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1819
1820 if (status)
1821 return;
1822
1823 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1824 if (mode)
1825 hdev->ssp_debug_mode = *mode;
1826 }
1827
hci_cs_inquiry(struct hci_dev *hdev, __u8 status)1828 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1829 {
1830 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1831
1832 if (status) {
1833 hci_conn_check_pending(hdev);
1834 return;
1835 }
1836
1837 if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
1838 set_bit(HCI_INQUIRY, &hdev->flags);
1839 }
1840
hci_cs_create_conn(struct hci_dev *hdev, __u8 status)1841 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1842 {
1843 struct hci_cp_create_conn *cp;
1844 struct hci_conn *conn;
1845
1846 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1847
1848 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1849 if (!cp)
1850 return;
1851
1852 hci_dev_lock(hdev);
1853
1854 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1855
1856 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1857
1858 if (status) {
1859 if (conn && conn->state == BT_CONNECT) {
1860 if (status != 0x0c || conn->attempt > 2) {
1861 conn->state = BT_CLOSED;
1862 hci_connect_cfm(conn, status);
1863 hci_conn_del(conn);
1864 } else
1865 conn->state = BT_CONNECT2;
1866 }
1867 } else {
1868 if (!conn) {
1869 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1870 HCI_ROLE_MASTER);
1871 if (!conn)
1872 bt_dev_err(hdev, "no memory for new connection");
1873 }
1874 }
1875
1876 hci_dev_unlock(hdev);
1877 }
1878
hci_cs_add_sco(struct hci_dev *hdev, __u8 status)1879 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1880 {
1881 struct hci_cp_add_sco *cp;
1882 struct hci_conn *acl, *sco;
1883 __u16 handle;
1884
1885 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1886
1887 if (!status)
1888 return;
1889
1890 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1891 if (!cp)
1892 return;
1893
1894 handle = __le16_to_cpu(cp->handle);
1895
1896 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1897
1898 hci_dev_lock(hdev);
1899
1900 acl = hci_conn_hash_lookup_handle(hdev, handle);
1901 if (acl) {
1902 sco = acl->link;
1903 if (sco) {
1904 sco->state = BT_CLOSED;
1905
1906 hci_connect_cfm(sco, status);
1907 hci_conn_del(sco);
1908 }
1909 }
1910
1911 hci_dev_unlock(hdev);
1912 }
1913
hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)1914 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1915 {
1916 struct hci_cp_auth_requested *cp;
1917 struct hci_conn *conn;
1918
1919 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1920
1921 if (!status)
1922 return;
1923
1924 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1925 if (!cp)
1926 return;
1927
1928 hci_dev_lock(hdev);
1929
1930 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1931 if (conn) {
1932 if (conn->state == BT_CONFIG) {
1933 hci_connect_cfm(conn, status);
1934 hci_conn_drop(conn);
1935 }
1936 }
1937
1938 hci_dev_unlock(hdev);
1939 }
1940
hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)1941 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1942 {
1943 struct hci_cp_set_conn_encrypt *cp;
1944 struct hci_conn *conn;
1945
1946 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1947
1948 if (!status)
1949 return;
1950
1951 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1952 if (!cp)
1953 return;
1954
1955 hci_dev_lock(hdev);
1956
1957 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1958 if (conn) {
1959 if (conn->state == BT_CONFIG) {
1960 hci_connect_cfm(conn, status);
1961 hci_conn_drop(conn);
1962 }
1963 }
1964
1965 hci_dev_unlock(hdev);
1966 }
1967
hci_outgoing_auth_needed(struct hci_dev *hdev, struct hci_conn *conn)1968 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1969 struct hci_conn *conn)
1970 {
1971 if (conn->state != BT_CONFIG || !conn->out)
1972 return 0;
1973
1974 if (conn->pending_sec_level == BT_SECURITY_SDP)
1975 return 0;
1976
1977 /* Only request authentication for SSP connections or non-SSP
1978 * devices with sec_level MEDIUM or HIGH or if MITM protection
1979 * is requested.
1980 */
1981 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1982 conn->pending_sec_level != BT_SECURITY_FIPS &&
1983 conn->pending_sec_level != BT_SECURITY_HIGH &&
1984 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1985 return 0;
1986
1987 return 1;
1988 }
1989
hci_resolve_name(struct hci_dev *hdev, struct inquiry_entry *e)1990 static int hci_resolve_name(struct hci_dev *hdev,
1991 struct inquiry_entry *e)
1992 {
1993 struct hci_cp_remote_name_req cp;
1994
1995 memset(&cp, 0, sizeof(cp));
1996
1997 bacpy(&cp.bdaddr, &e->data.bdaddr);
1998 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1999 cp.pscan_mode = e->data.pscan_mode;
2000 cp.clock_offset = e->data.clock_offset;
2001
2002 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2003 }
2004
hci_resolve_next_name(struct hci_dev *hdev)2005 static bool hci_resolve_next_name(struct hci_dev *hdev)
2006 {
2007 struct discovery_state *discov = &hdev->discovery;
2008 struct inquiry_entry *e;
2009
2010 if (list_empty(&discov->resolve))
2011 return false;
2012
2013 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2014 if (!e)
2015 return false;
2016
2017 if (hci_resolve_name(hdev, e) == 0) {
2018 e->name_state = NAME_PENDING;
2019 return true;
2020 }
2021
2022 return false;
2023 }
2024
hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, bdaddr_t *bdaddr, u8 *name, u8 name_len)2025 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2026 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2027 {
2028 struct discovery_state *discov = &hdev->discovery;
2029 struct inquiry_entry *e;
2030
2031 /* Update the mgmt connected state if necessary. Be careful with
2032 * conn objects that exist but are not (yet) connected however.
2033 * Only those in BT_CONFIG or BT_CONNECTED states can be
2034 * considered connected.
2035 */
2036 if (conn &&
2037 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2038 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2039 mgmt_device_connected(hdev, conn, 0, name, name_len);
2040
2041 if (discov->state == DISCOVERY_STOPPED)
2042 return;
2043
2044 if (discov->state == DISCOVERY_STOPPING)
2045 goto discov_complete;
2046
2047 if (discov->state != DISCOVERY_RESOLVING)
2048 return;
2049
2050 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2051 /* If the device was not found in a list of found devices names of which
2052 * are pending. there is no need to continue resolving a next name as it
2053 * will be done upon receiving another Remote Name Request Complete
2054 * Event */
2055 if (!e)
2056 return;
2057
2058 list_del(&e->list);
2059 if (name) {
2060 e->name_state = NAME_KNOWN;
2061 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2062 e->data.rssi, name, name_len);
2063 } else {
2064 e->name_state = NAME_NOT_KNOWN;
2065 }
2066
2067 if (hci_resolve_next_name(hdev))
2068 return;
2069
2070 discov_complete:
2071 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2072 }
2073
hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)2074 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2075 {
2076 struct hci_cp_remote_name_req *cp;
2077 struct hci_conn *conn;
2078
2079 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2080
2081 /* If successful wait for the name req complete event before
2082 * checking for the need to do authentication */
2083 if (!status)
2084 return;
2085
2086 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2087 if (!cp)
2088 return;
2089
2090 hci_dev_lock(hdev);
2091
2092 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2093
2094 if (hci_dev_test_flag(hdev, HCI_MGMT))
2095 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2096
2097 if (!conn)
2098 goto unlock;
2099
2100 if (!hci_outgoing_auth_needed(hdev, conn))
2101 goto unlock;
2102
2103 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2104 struct hci_cp_auth_requested auth_cp;
2105
2106 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2107
2108 auth_cp.handle = __cpu_to_le16(conn->handle);
2109 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2110 sizeof(auth_cp), &auth_cp);
2111 }
2112
2113 unlock:
2114 hci_dev_unlock(hdev);
2115 }
2116
hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)2117 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2118 {
2119 struct hci_cp_read_remote_features *cp;
2120 struct hci_conn *conn;
2121
2122 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2123
2124 if (!status)
2125 return;
2126
2127 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2128 if (!cp)
2129 return;
2130
2131 hci_dev_lock(hdev);
2132
2133 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2134 if (conn) {
2135 if (conn->state == BT_CONFIG) {
2136 hci_connect_cfm(conn, status);
2137 hci_conn_drop(conn);
2138 }
2139 }
2140
2141 hci_dev_unlock(hdev);
2142 }
2143
hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)2144 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2145 {
2146 struct hci_cp_read_remote_ext_features *cp;
2147 struct hci_conn *conn;
2148
2149 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2150
2151 if (!status)
2152 return;
2153
2154 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2155 if (!cp)
2156 return;
2157
2158 hci_dev_lock(hdev);
2159
2160 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2161 if (conn) {
2162 if (conn->state == BT_CONFIG) {
2163 hci_connect_cfm(conn, status);
2164 hci_conn_drop(conn);
2165 }
2166 }
2167
2168 hci_dev_unlock(hdev);
2169 }
2170
hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)2171 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2172 {
2173 struct hci_cp_setup_sync_conn *cp;
2174 struct hci_conn *acl, *sco;
2175 __u16 handle;
2176
2177 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2178
2179 if (!status)
2180 return;
2181
2182 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2183 if (!cp)
2184 return;
2185
2186 handle = __le16_to_cpu(cp->handle);
2187
2188 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2189
2190 hci_dev_lock(hdev);
2191
2192 acl = hci_conn_hash_lookup_handle(hdev, handle);
2193 if (acl) {
2194 sco = acl->link;
2195 if (sco) {
2196 sco->state = BT_CLOSED;
2197
2198 hci_connect_cfm(sco, status);
2199 hci_conn_del(sco);
2200 }
2201 }
2202
2203 hci_dev_unlock(hdev);
2204 }
2205
hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)2206 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2207 {
2208 struct hci_cp_sniff_mode *cp;
2209 struct hci_conn *conn;
2210
2211 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2212
2213 if (!status)
2214 return;
2215
2216 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2217 if (!cp)
2218 return;
2219
2220 hci_dev_lock(hdev);
2221
2222 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2223 if (conn) {
2224 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2225
2226 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2227 hci_sco_setup(conn, status);
2228 }
2229
2230 hci_dev_unlock(hdev);
2231 }
2232
hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)2233 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2234 {
2235 struct hci_cp_exit_sniff_mode *cp;
2236 struct hci_conn *conn;
2237
2238 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2239
2240 if (!status)
2241 return;
2242
2243 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2244 if (!cp)
2245 return;
2246
2247 hci_dev_lock(hdev);
2248
2249 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2250 if (conn) {
2251 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2252
2253 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2254 hci_sco_setup(conn, status);
2255 }
2256
2257 hci_dev_unlock(hdev);
2258 }
2259
hci_cs_disconnect(struct hci_dev *hdev, u8 status)2260 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2261 {
2262 struct hci_cp_disconnect *cp;
2263 struct hci_conn *conn;
2264
2265 if (!status)
2266 return;
2267
2268 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2269 if (!cp)
2270 return;
2271
2272 hci_dev_lock(hdev);
2273
2274 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2275 if (conn) {
2276 u8 type = conn->type;
2277
2278 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2279 conn->dst_type, status);
2280
2281 /* If the disconnection failed for any reason, the upper layer
2282 * does not retry to disconnect in current implementation.
2283 * Hence, we need to do some basic cleanup here and re-enable
2284 * advertising if necessary.
2285 */
2286 hci_conn_del(conn);
2287 if (type == LE_LINK)
2288 hci_req_reenable_advertising(hdev);
2289 }
2290
2291 hci_dev_unlock(hdev);
2292 }
2293
cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr, u8 peer_addr_type, u8 own_address_type, u8 filter_policy)2294 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2295 u8 peer_addr_type, u8 own_address_type,
2296 u8 filter_policy)
2297 {
2298 struct hci_conn *conn;
2299
2300 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2301 peer_addr_type);
2302 if (!conn)
2303 return;
2304
2305 /* When using controller based address resolution, then the new
2306 * address types 0x02 and 0x03 are used. These types need to be
2307 * converted back into either public address or random address type
2308 */
2309 if (use_ll_privacy(hdev) &&
2310 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
2311 switch (own_address_type) {
2312 case ADDR_LE_DEV_PUBLIC_RESOLVED:
2313 own_address_type = ADDR_LE_DEV_PUBLIC;
2314 break;
2315 case ADDR_LE_DEV_RANDOM_RESOLVED:
2316 own_address_type = ADDR_LE_DEV_RANDOM;
2317 break;
2318 }
2319 }
2320
2321 /* Store the initiator and responder address information which
2322 * is needed for SMP. These values will not change during the
2323 * lifetime of the connection.
2324 */
2325 conn->init_addr_type = own_address_type;
2326 if (own_address_type == ADDR_LE_DEV_RANDOM)
2327 bacpy(&conn->init_addr, &hdev->random_addr);
2328 else
2329 bacpy(&conn->init_addr, &hdev->bdaddr);
2330
2331 conn->resp_addr_type = peer_addr_type;
2332 bacpy(&conn->resp_addr, peer_addr);
2333
2334 /* We don't want the connection attempt to stick around
2335 * indefinitely since LE doesn't have a page timeout concept
2336 * like BR/EDR. Set a timer for any connection that doesn't use
2337 * the accept list for connecting.
2338 */
2339 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2340 queue_delayed_work(conn->hdev->workqueue,
2341 &conn->le_conn_timeout,
2342 conn->conn_timeout);
2343 }
2344
hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)2345 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2346 {
2347 struct hci_cp_le_create_conn *cp;
2348
2349 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2350
2351 /* All connection failure handling is taken care of by the
2352 * hci_le_conn_failed function which is triggered by the HCI
2353 * request completion callbacks used for connecting.
2354 */
2355 if (status)
2356 return;
2357
2358 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2359 if (!cp)
2360 return;
2361
2362 hci_dev_lock(hdev);
2363
2364 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2365 cp->own_address_type, cp->filter_policy);
2366
2367 hci_dev_unlock(hdev);
2368 }
2369
hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)2370 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2371 {
2372 struct hci_cp_le_ext_create_conn *cp;
2373
2374 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2375
2376 /* All connection failure handling is taken care of by the
2377 * hci_le_conn_failed function which is triggered by the HCI
2378 * request completion callbacks used for connecting.
2379 */
2380 if (status)
2381 return;
2382
2383 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2384 if (!cp)
2385 return;
2386
2387 hci_dev_lock(hdev);
2388
2389 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2390 cp->own_addr_type, cp->filter_policy);
2391
2392 hci_dev_unlock(hdev);
2393 }
2394
hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)2395 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2396 {
2397 struct hci_cp_le_read_remote_features *cp;
2398 struct hci_conn *conn;
2399
2400 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2401
2402 if (!status)
2403 return;
2404
2405 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2406 if (!cp)
2407 return;
2408
2409 hci_dev_lock(hdev);
2410
2411 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2412 if (conn) {
2413 if (conn->state == BT_CONFIG) {
2414 hci_connect_cfm(conn, status);
2415 hci_conn_drop(conn);
2416 }
2417 }
2418
2419 hci_dev_unlock(hdev);
2420 }
2421
hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)2422 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2423 {
2424 struct hci_cp_le_start_enc *cp;
2425 struct hci_conn *conn;
2426
2427 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2428
2429 if (!status)
2430 return;
2431
2432 hci_dev_lock(hdev);
2433
2434 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2435 if (!cp)
2436 goto unlock;
2437
2438 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2439 if (!conn)
2440 goto unlock;
2441
2442 if (conn->state != BT_CONNECTED)
2443 goto unlock;
2444
2445 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2446 hci_conn_drop(conn);
2447
2448 unlock:
2449 hci_dev_unlock(hdev);
2450 }
2451
hci_cs_switch_role(struct hci_dev *hdev, u8 status)2452 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2453 {
2454 struct hci_cp_switch_role *cp;
2455 struct hci_conn *conn;
2456
2457 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2458
2459 if (!status)
2460 return;
2461
2462 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2463 if (!cp)
2464 return;
2465
2466 hci_dev_lock(hdev);
2467
2468 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2469 if (conn)
2470 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2471
2472 hci_dev_unlock(hdev);
2473 }
2474
hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)2475 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2476 {
2477 __u8 status = *((__u8 *) skb->data);
2478 struct discovery_state *discov = &hdev->discovery;
2479 struct inquiry_entry *e;
2480
2481 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2482
2483 hci_conn_check_pending(hdev);
2484
2485 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2486 return;
2487
2488 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2489 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2490
2491 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2492 return;
2493
2494 hci_dev_lock(hdev);
2495
2496 if (discov->state != DISCOVERY_FINDING)
2497 goto unlock;
2498
2499 if (list_empty(&discov->resolve)) {
2500 /* When BR/EDR inquiry is active and no LE scanning is in
2501 * progress, then change discovery state to indicate completion.
2502 *
2503 * When running LE scanning and BR/EDR inquiry simultaneously
2504 * and the LE scan already finished, then change the discovery
2505 * state to indicate completion.
2506 */
2507 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2508 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2509 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2510 goto unlock;
2511 }
2512
2513 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2514 if (e && hci_resolve_name(hdev, e) == 0) {
2515 e->name_state = NAME_PENDING;
2516 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2517 } else {
2518 /* When BR/EDR inquiry is active and no LE scanning is in
2519 * progress, then change discovery state to indicate completion.
2520 *
2521 * When running LE scanning and BR/EDR inquiry simultaneously
2522 * and the LE scan already finished, then change the discovery
2523 * state to indicate completion.
2524 */
2525 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2526 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2527 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2528 }
2529
2530 unlock:
2531 hci_dev_unlock(hdev);
2532 }
2533
hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)2534 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2535 {
2536 struct inquiry_data data;
2537 struct inquiry_info *info = (void *) (skb->data + 1);
2538 int num_rsp = *((__u8 *) skb->data);
2539
2540 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2541
2542 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2543 return;
2544
2545 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2546 return;
2547
2548 hci_dev_lock(hdev);
2549
2550 for (; num_rsp; num_rsp--, info++) {
2551 u32 flags;
2552
2553 bacpy(&data.bdaddr, &info->bdaddr);
2554 data.pscan_rep_mode = info->pscan_rep_mode;
2555 data.pscan_period_mode = info->pscan_period_mode;
2556 data.pscan_mode = info->pscan_mode;
2557 memcpy(data.dev_class, info->dev_class, 3);
2558 data.clock_offset = info->clock_offset;
2559 data.rssi = HCI_RSSI_INVALID;
2560 data.ssp_mode = 0x00;
2561
2562 flags = hci_inquiry_cache_update(hdev, &data, false);
2563
2564 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2565 info->dev_class, HCI_RSSI_INVALID,
2566 flags, NULL, 0, NULL, 0);
2567 }
2568
2569 hci_dev_unlock(hdev);
2570 }
2571
hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)2572 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2573 {
2574 struct hci_ev_conn_complete *ev = (void *) skb->data;
2575 struct hci_conn *conn;
2576
2577 BT_DBG("%s", hdev->name);
2578
2579 hci_dev_lock(hdev);
2580
2581 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2582 if (!conn) {
2583 /* Connection may not exist if auto-connected. Check the bredr
2584 * allowlist to see if this device is allowed to auto connect.
2585 * If link is an ACL type, create a connection class
2586 * automatically.
2587 *
2588 * Auto-connect will only occur if the event filter is
2589 * programmed with a given address. Right now, event filter is
2590 * only used during suspend.
2591 */
2592 if (ev->link_type == ACL_LINK &&
2593 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
2594 &ev->bdaddr,
2595 BDADDR_BREDR)) {
2596 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2597 HCI_ROLE_SLAVE);
2598 if (!conn) {
2599 bt_dev_err(hdev, "no memory for new conn");
2600 goto unlock;
2601 }
2602 } else {
2603 if (ev->link_type != SCO_LINK)
2604 goto unlock;
2605
2606 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
2607 &ev->bdaddr);
2608 if (!conn)
2609 goto unlock;
2610
2611 conn->type = SCO_LINK;
2612 }
2613 }
2614
2615 if (!ev->status) {
2616 conn->handle = __le16_to_cpu(ev->handle);
2617
2618 if (conn->type == ACL_LINK) {
2619 conn->state = BT_CONFIG;
2620 hci_conn_hold(conn);
2621
2622 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2623 !hci_find_link_key(hdev, &ev->bdaddr))
2624 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2625 else
2626 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2627 } else
2628 conn->state = BT_CONNECTED;
2629
2630 hci_debugfs_create_conn(conn);
2631 hci_conn_add_sysfs(conn);
2632
2633 if (test_bit(HCI_AUTH, &hdev->flags))
2634 set_bit(HCI_CONN_AUTH, &conn->flags);
2635
2636 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2637 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2638
2639 /* Get remote features */
2640 if (conn->type == ACL_LINK) {
2641 struct hci_cp_read_remote_features cp;
2642 cp.handle = ev->handle;
2643 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2644 sizeof(cp), &cp);
2645
2646 hci_req_update_scan(hdev);
2647 }
2648
2649 /* Set packet type for incoming connection */
2650 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2651 struct hci_cp_change_conn_ptype cp;
2652 cp.handle = ev->handle;
2653 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2654 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2655 &cp);
2656 }
2657 } else {
2658 conn->state = BT_CLOSED;
2659 if (conn->type == ACL_LINK)
2660 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2661 conn->dst_type, ev->status);
2662 }
2663
2664 if (conn->type == ACL_LINK)
2665 hci_sco_setup(conn, ev->status);
2666
2667 if (ev->status) {
2668 hci_connect_cfm(conn, ev->status);
2669 hci_conn_del(conn);
2670 } else if (ev->link_type == SCO_LINK) {
2671 switch (conn->setting & SCO_AIRMODE_MASK) {
2672 case SCO_AIRMODE_CVSD:
2673 if (hdev->notify)
2674 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
2675 break;
2676 }
2677
2678 hci_connect_cfm(conn, ev->status);
2679 }
2680
2681 unlock:
2682 hci_dev_unlock(hdev);
2683
2684 hci_conn_check_pending(hdev);
2685 }
2686
hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)2687 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2688 {
2689 struct hci_cp_reject_conn_req cp;
2690
2691 bacpy(&cp.bdaddr, bdaddr);
2692 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2693 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2694 }
2695
hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)2696 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2697 {
2698 struct hci_ev_conn_request *ev = (void *) skb->data;
2699 int mask = hdev->link_mode;
2700 struct inquiry_entry *ie;
2701 struct hci_conn *conn;
2702 __u8 flags = 0;
2703
2704 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2705 ev->link_type);
2706
2707 /* Reject incoming connection from device with same BD ADDR against
2708 * CVE-2020-26555
2709 */
2710 if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
2711 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
2712 &ev->bdaddr);
2713 hci_reject_conn(hdev, &ev->bdaddr);
2714 return;
2715 }
2716
2717 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2718 &flags);
2719
2720 if (!(mask & HCI_LM_ACCEPT)) {
2721 hci_reject_conn(hdev, &ev->bdaddr);
2722 return;
2723 }
2724
2725 hci_dev_lock(hdev);
2726
2727 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
2728 BDADDR_BREDR)) {
2729 hci_reject_conn(hdev, &ev->bdaddr);
2730 goto unlock;
2731 }
2732
2733 /* Require HCI_CONNECTABLE or an accept list entry to accept the
2734 * connection. These features are only touched through mgmt so
2735 * only do the checks if HCI_MGMT is set.
2736 */
2737 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2738 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2739 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
2740 BDADDR_BREDR)) {
2741 hci_reject_conn(hdev, &ev->bdaddr);
2742 goto unlock;
2743 }
2744
2745 /* Connection accepted */
2746
2747 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2748 if (ie)
2749 memcpy(ie->data.dev_class, ev->dev_class, 3);
2750
2751 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2752 &ev->bdaddr);
2753 if (!conn) {
2754 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2755 HCI_ROLE_SLAVE);
2756 if (!conn) {
2757 bt_dev_err(hdev, "no memory for new connection");
2758 goto unlock;
2759 }
2760 }
2761
2762 memcpy(conn->dev_class, ev->dev_class, 3);
2763
2764 hci_dev_unlock(hdev);
2765
2766 if (ev->link_type == ACL_LINK ||
2767 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2768 struct hci_cp_accept_conn_req cp;
2769 conn->state = BT_CONNECT;
2770
2771 bacpy(&cp.bdaddr, &ev->bdaddr);
2772
2773 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2774 cp.role = 0x00; /* Become central */
2775 else
2776 cp.role = 0x01; /* Remain peripheral */
2777
2778 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2779 } else if (!(flags & HCI_PROTO_DEFER)) {
2780 struct hci_cp_accept_sync_conn_req cp;
2781 conn->state = BT_CONNECT;
2782
2783 bacpy(&cp.bdaddr, &ev->bdaddr);
2784 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2785
2786 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2787 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2788 cp.max_latency = cpu_to_le16(0xffff);
2789 cp.content_format = cpu_to_le16(hdev->voice_setting);
2790 cp.retrans_effort = 0xff;
2791
2792 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2793 &cp);
2794 } else {
2795 conn->state = BT_CONNECT2;
2796 hci_connect_cfm(conn, 0);
2797 }
2798
2799 return;
2800 unlock:
2801 hci_dev_unlock(hdev);
2802 }
2803
hci_to_mgmt_reason(u8 err)2804 static u8 hci_to_mgmt_reason(u8 err)
2805 {
2806 switch (err) {
2807 case HCI_ERROR_CONNECTION_TIMEOUT:
2808 return MGMT_DEV_DISCONN_TIMEOUT;
2809 case HCI_ERROR_REMOTE_USER_TERM:
2810 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2811 case HCI_ERROR_REMOTE_POWER_OFF:
2812 return MGMT_DEV_DISCONN_REMOTE;
2813 case HCI_ERROR_LOCAL_HOST_TERM:
2814 return MGMT_DEV_DISCONN_LOCAL_HOST;
2815 default:
2816 return MGMT_DEV_DISCONN_UNKNOWN;
2817 }
2818 }
2819
hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)2820 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2821 {
2822 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2823 u8 reason;
2824 struct hci_conn_params *params;
2825 struct hci_conn *conn;
2826 bool mgmt_connected;
2827 u8 type;
2828
2829 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2830
2831 hci_dev_lock(hdev);
2832
2833 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2834 if (!conn)
2835 goto unlock;
2836
2837 if (ev->status) {
2838 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2839 conn->dst_type, ev->status);
2840 goto unlock;
2841 }
2842
2843 conn->state = BT_CLOSED;
2844
2845 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2846
2847 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2848 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2849 else
2850 reason = hci_to_mgmt_reason(ev->reason);
2851
2852 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2853 reason, mgmt_connected);
2854
2855 if (conn->type == ACL_LINK) {
2856 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2857 hci_remove_link_key(hdev, &conn->dst);
2858
2859 hci_req_update_scan(hdev);
2860 }
2861
2862 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2863 if (params) {
2864 switch (params->auto_connect) {
2865 case HCI_AUTO_CONN_LINK_LOSS:
2866 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2867 break;
2868 fallthrough;
2869
2870 case HCI_AUTO_CONN_DIRECT:
2871 case HCI_AUTO_CONN_ALWAYS:
2872 list_del_init(¶ms->action);
2873 list_add(¶ms->action, &hdev->pend_le_conns);
2874 hci_update_background_scan(hdev);
2875 break;
2876
2877 default:
2878 break;
2879 }
2880 }
2881
2882 type = conn->type;
2883
2884 hci_disconn_cfm(conn, ev->reason);
2885 hci_conn_del(conn);
2886
2887 /* The suspend notifier is waiting for all devices to disconnect so
2888 * clear the bit from pending tasks and inform the wait queue.
2889 */
2890 if (list_empty(&hdev->conn_hash.list) &&
2891 test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
2892 wake_up(&hdev->suspend_wait_q);
2893 }
2894
2895 /* Re-enable advertising if necessary, since it might
2896 * have been disabled by the connection. From the
2897 * HCI_LE_Set_Advertise_Enable command description in
2898 * the core specification (v4.0):
2899 * "The Controller shall continue advertising until the Host
2900 * issues an LE_Set_Advertise_Enable command with
2901 * Advertising_Enable set to 0x00 (Advertising is disabled)
2902 * or until a connection is created or until the Advertising
2903 * is timed out due to Directed Advertising."
2904 */
2905 if (type == LE_LINK)
2906 hci_req_reenable_advertising(hdev);
2907
2908 unlock:
2909 hci_dev_unlock(hdev);
2910 }
2911
hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)2912 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2913 {
2914 struct hci_ev_auth_complete *ev = (void *) skb->data;
2915 struct hci_conn *conn;
2916
2917 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2918
2919 hci_dev_lock(hdev);
2920
2921 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2922 if (!conn)
2923 goto unlock;
2924
2925 if (!ev->status) {
2926 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2927 set_bit(HCI_CONN_AUTH, &conn->flags);
2928 conn->sec_level = conn->pending_sec_level;
2929 } else {
2930 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2931 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2932
2933 mgmt_auth_failed(conn, ev->status);
2934 }
2935
2936 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2937
2938 if (conn->state == BT_CONFIG) {
2939 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2940 struct hci_cp_set_conn_encrypt cp;
2941 cp.handle = ev->handle;
2942 cp.encrypt = 0x01;
2943 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2944 &cp);
2945 } else {
2946 conn->state = BT_CONNECTED;
2947 hci_connect_cfm(conn, ev->status);
2948 hci_conn_drop(conn);
2949 }
2950 } else {
2951 hci_auth_cfm(conn, ev->status);
2952
2953 hci_conn_hold(conn);
2954 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2955 hci_conn_drop(conn);
2956 }
2957
2958 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2959 if (!ev->status) {
2960 struct hci_cp_set_conn_encrypt cp;
2961 cp.handle = ev->handle;
2962 cp.encrypt = 0x01;
2963 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2964 &cp);
2965 } else {
2966 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2967 hci_encrypt_cfm(conn, ev->status);
2968 }
2969 }
2970
2971 unlock:
2972 hci_dev_unlock(hdev);
2973 }
2974
hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)2975 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2976 {
2977 struct hci_ev_remote_name *ev = (void *) skb->data;
2978 struct hci_conn *conn;
2979
2980 BT_DBG("%s", hdev->name);
2981
2982 hci_conn_check_pending(hdev);
2983
2984 hci_dev_lock(hdev);
2985
2986 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2987
2988 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2989 goto check_auth;
2990
2991 if (ev->status == 0)
2992 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2993 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2994 else
2995 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2996
2997 check_auth:
2998 if (!conn)
2999 goto unlock;
3000
3001 if (!hci_outgoing_auth_needed(hdev, conn))
3002 goto unlock;
3003
3004 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3005 struct hci_cp_auth_requested cp;
3006
3007 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3008
3009 cp.handle = __cpu_to_le16(conn->handle);
3010 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3011 }
3012
3013 unlock:
3014 hci_dev_unlock(hdev);
3015 }
3016
read_enc_key_size_complete(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb)3017 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
3018 u16 opcode, struct sk_buff *skb)
3019 {
3020 const struct hci_rp_read_enc_key_size *rp;
3021 struct hci_conn *conn;
3022 u16 handle;
3023
3024 BT_DBG("%s status 0x%02x", hdev->name, status);
3025
3026 if (!skb || skb->len < sizeof(*rp)) {
3027 bt_dev_err(hdev, "invalid read key size response");
3028 return;
3029 }
3030
3031 rp = (void *)skb->data;
3032 handle = le16_to_cpu(rp->handle);
3033
3034 hci_dev_lock(hdev);
3035
3036 conn = hci_conn_hash_lookup_handle(hdev, handle);
3037 if (!conn)
3038 goto unlock;
3039
3040 /* While unexpected, the read_enc_key_size command may fail. The most
3041 * secure approach is to then assume the key size is 0 to force a
3042 * disconnection.
3043 */
3044 if (rp->status) {
3045 bt_dev_err(hdev, "failed to read key size for handle %u",
3046 handle);
3047 conn->enc_key_size = 0;
3048 } else {
3049 conn->enc_key_size = rp->key_size;
3050 }
3051
3052 hci_encrypt_cfm(conn, 0);
3053
3054 unlock:
3055 hci_dev_unlock(hdev);
3056 }
3057
hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)3058 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3059 {
3060 struct hci_ev_encrypt_change *ev = (void *) skb->data;
3061 struct hci_conn *conn;
3062
3063 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3064
3065 hci_dev_lock(hdev);
3066
3067 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3068 if (!conn)
3069 goto unlock;
3070
3071 if (!ev->status) {
3072 if (ev->encrypt) {
3073 /* Encryption implies authentication */
3074 set_bit(HCI_CONN_AUTH, &conn->flags);
3075 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3076 conn->sec_level = conn->pending_sec_level;
3077
3078 /* P-256 authentication key implies FIPS */
3079 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3080 set_bit(HCI_CONN_FIPS, &conn->flags);
3081
3082 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3083 conn->type == LE_LINK)
3084 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3085 } else {
3086 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3087 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3088 }
3089 }
3090
3091 /* We should disregard the current RPA and generate a new one
3092 * whenever the encryption procedure fails.
3093 */
3094 if (ev->status && conn->type == LE_LINK) {
3095 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3096 hci_adv_instances_set_rpa_expired(hdev, true);
3097 }
3098
3099 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3100
3101 /* Check link security requirements are met */
3102 if (!hci_conn_check_link_mode(conn))
3103 ev->status = HCI_ERROR_AUTH_FAILURE;
3104
3105 if (ev->status && conn->state == BT_CONNECTED) {
3106 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3107 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3108
3109 /* Notify upper layers so they can cleanup before
3110 * disconnecting.
3111 */
3112 hci_encrypt_cfm(conn, ev->status);
3113 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3114 hci_conn_drop(conn);
3115 goto unlock;
3116 }
3117
3118 /* Try reading the encryption key size for encrypted ACL links */
3119 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3120 struct hci_cp_read_enc_key_size cp;
3121 struct hci_request req;
3122
3123 /* Only send HCI_Read_Encryption_Key_Size if the
3124 * controller really supports it. If it doesn't, assume
3125 * the default size (16).
3126 */
3127 if (!(hdev->commands[20] & 0x10)) {
3128 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3129 goto notify;
3130 }
3131
3132 hci_req_init(&req, hdev);
3133
3134 cp.handle = cpu_to_le16(conn->handle);
3135 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3136
3137 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3138 bt_dev_err(hdev, "sending read key size failed");
3139 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3140 goto notify;
3141 }
3142
3143 goto unlock;
3144 }
3145
3146 /* Set the default Authenticated Payload Timeout after
3147 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3148 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3149 * sent when the link is active and Encryption is enabled, the conn
3150 * type can be either LE or ACL and controller must support LMP Ping.
3151 * Ensure for AES-CCM encryption as well.
3152 */
3153 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3154 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3155 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3156 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3157 struct hci_cp_write_auth_payload_to cp;
3158
3159 cp.handle = cpu_to_le16(conn->handle);
3160 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3161 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3162 sizeof(cp), &cp);
3163 }
3164
3165 notify:
3166 hci_encrypt_cfm(conn, ev->status);
3167
3168 unlock:
3169 hci_dev_unlock(hdev);
3170 }
3171
hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)3172 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3173 struct sk_buff *skb)
3174 {
3175 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3176 struct hci_conn *conn;
3177
3178 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3179
3180 hci_dev_lock(hdev);
3181
3182 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3183 if (conn) {
3184 if (!ev->status)
3185 set_bit(HCI_CONN_SECURE, &conn->flags);
3186
3187 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3188
3189 hci_key_change_cfm(conn, ev->status);
3190 }
3191
3192 hci_dev_unlock(hdev);
3193 }
3194
hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)3195 static void hci_remote_features_evt(struct hci_dev *hdev,
3196 struct sk_buff *skb)
3197 {
3198 struct hci_ev_remote_features *ev = (void *) skb->data;
3199 struct hci_conn *conn;
3200
3201 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3202
3203 hci_dev_lock(hdev);
3204
3205 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3206 if (!conn)
3207 goto unlock;
3208
3209 if (!ev->status)
3210 memcpy(conn->features[0], ev->features, 8);
3211
3212 if (conn->state != BT_CONFIG)
3213 goto unlock;
3214
3215 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3216 lmp_ext_feat_capable(conn)) {
3217 struct hci_cp_read_remote_ext_features cp;
3218 cp.handle = ev->handle;
3219 cp.page = 0x01;
3220 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3221 sizeof(cp), &cp);
3222 goto unlock;
3223 }
3224
3225 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3226 struct hci_cp_remote_name_req cp;
3227 memset(&cp, 0, sizeof(cp));
3228 bacpy(&cp.bdaddr, &conn->dst);
3229 cp.pscan_rep_mode = 0x02;
3230 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3231 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3232 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3233
3234 if (!hci_outgoing_auth_needed(hdev, conn)) {
3235 conn->state = BT_CONNECTED;
3236 hci_connect_cfm(conn, ev->status);
3237 hci_conn_drop(conn);
3238 }
3239
3240 unlock:
3241 hci_dev_unlock(hdev);
3242 }
3243
hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, u16 *opcode, u8 *status, hci_req_complete_t *req_complete, hci_req_complete_skb_t *req_complete_skb)3244 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3245 u16 *opcode, u8 *status,
3246 hci_req_complete_t *req_complete,
3247 hci_req_complete_skb_t *req_complete_skb)
3248 {
3249 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3250
3251 *opcode = __le16_to_cpu(ev->opcode);
3252 *status = skb->data[sizeof(*ev)];
3253
3254 skb_pull(skb, sizeof(*ev));
3255
3256 switch (*opcode) {
3257 case HCI_OP_INQUIRY_CANCEL:
3258 hci_cc_inquiry_cancel(hdev, skb, status);
3259 break;
3260
3261 case HCI_OP_PERIODIC_INQ:
3262 hci_cc_periodic_inq(hdev, skb);
3263 break;
3264
3265 case HCI_OP_EXIT_PERIODIC_INQ:
3266 hci_cc_exit_periodic_inq(hdev, skb);
3267 break;
3268
3269 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3270 hci_cc_remote_name_req_cancel(hdev, skb);
3271 break;
3272
3273 case HCI_OP_ROLE_DISCOVERY:
3274 hci_cc_role_discovery(hdev, skb);
3275 break;
3276
3277 case HCI_OP_READ_LINK_POLICY:
3278 hci_cc_read_link_policy(hdev, skb);
3279 break;
3280
3281 case HCI_OP_WRITE_LINK_POLICY:
3282 hci_cc_write_link_policy(hdev, skb);
3283 break;
3284
3285 case HCI_OP_READ_DEF_LINK_POLICY:
3286 hci_cc_read_def_link_policy(hdev, skb);
3287 break;
3288
3289 case HCI_OP_WRITE_DEF_LINK_POLICY:
3290 hci_cc_write_def_link_policy(hdev, skb);
3291 break;
3292
3293 case HCI_OP_RESET:
3294 hci_cc_reset(hdev, skb);
3295 break;
3296
3297 case HCI_OP_READ_STORED_LINK_KEY:
3298 hci_cc_read_stored_link_key(hdev, skb);
3299 break;
3300
3301 case HCI_OP_DELETE_STORED_LINK_KEY:
3302 hci_cc_delete_stored_link_key(hdev, skb);
3303 break;
3304
3305 case HCI_OP_WRITE_LOCAL_NAME:
3306 hci_cc_write_local_name(hdev, skb);
3307 break;
3308
3309 case HCI_OP_READ_LOCAL_NAME:
3310 hci_cc_read_local_name(hdev, skb);
3311 break;
3312
3313 case HCI_OP_WRITE_AUTH_ENABLE:
3314 hci_cc_write_auth_enable(hdev, skb);
3315 break;
3316
3317 case HCI_OP_WRITE_ENCRYPT_MODE:
3318 hci_cc_write_encrypt_mode(hdev, skb);
3319 break;
3320
3321 case HCI_OP_WRITE_SCAN_ENABLE:
3322 hci_cc_write_scan_enable(hdev, skb);
3323 break;
3324
3325 case HCI_OP_READ_CLASS_OF_DEV:
3326 hci_cc_read_class_of_dev(hdev, skb);
3327 break;
3328
3329 case HCI_OP_WRITE_CLASS_OF_DEV:
3330 hci_cc_write_class_of_dev(hdev, skb);
3331 break;
3332
3333 case HCI_OP_READ_VOICE_SETTING:
3334 hci_cc_read_voice_setting(hdev, skb);
3335 break;
3336
3337 case HCI_OP_WRITE_VOICE_SETTING:
3338 hci_cc_write_voice_setting(hdev, skb);
3339 break;
3340
3341 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3342 hci_cc_read_num_supported_iac(hdev, skb);
3343 break;
3344
3345 case HCI_OP_WRITE_SSP_MODE:
3346 hci_cc_write_ssp_mode(hdev, skb);
3347 break;
3348
3349 case HCI_OP_WRITE_SC_SUPPORT:
3350 hci_cc_write_sc_support(hdev, skb);
3351 break;
3352
3353 case HCI_OP_READ_AUTH_PAYLOAD_TO:
3354 hci_cc_read_auth_payload_timeout(hdev, skb);
3355 break;
3356
3357 case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3358 hci_cc_write_auth_payload_timeout(hdev, skb);
3359 break;
3360
3361 case HCI_OP_READ_LOCAL_VERSION:
3362 hci_cc_read_local_version(hdev, skb);
3363 break;
3364
3365 case HCI_OP_READ_LOCAL_COMMANDS:
3366 hci_cc_read_local_commands(hdev, skb);
3367 break;
3368
3369 case HCI_OP_READ_LOCAL_FEATURES:
3370 hci_cc_read_local_features(hdev, skb);
3371 break;
3372
3373 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3374 hci_cc_read_local_ext_features(hdev, skb);
3375 break;
3376
3377 case HCI_OP_READ_BUFFER_SIZE:
3378 hci_cc_read_buffer_size(hdev, skb);
3379 break;
3380
3381 case HCI_OP_READ_BD_ADDR:
3382 hci_cc_read_bd_addr(hdev, skb);
3383 break;
3384
3385 case HCI_OP_READ_LOCAL_PAIRING_OPTS:
3386 hci_cc_read_local_pairing_opts(hdev, skb);
3387 break;
3388
3389 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3390 hci_cc_read_page_scan_activity(hdev, skb);
3391 break;
3392
3393 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3394 hci_cc_write_page_scan_activity(hdev, skb);
3395 break;
3396
3397 case HCI_OP_READ_PAGE_SCAN_TYPE:
3398 hci_cc_read_page_scan_type(hdev, skb);
3399 break;
3400
3401 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3402 hci_cc_write_page_scan_type(hdev, skb);
3403 break;
3404
3405 case HCI_OP_READ_DATA_BLOCK_SIZE:
3406 hci_cc_read_data_block_size(hdev, skb);
3407 break;
3408
3409 case HCI_OP_READ_FLOW_CONTROL_MODE:
3410 hci_cc_read_flow_control_mode(hdev, skb);
3411 break;
3412
3413 case HCI_OP_READ_LOCAL_AMP_INFO:
3414 hci_cc_read_local_amp_info(hdev, skb);
3415 break;
3416
3417 case HCI_OP_READ_CLOCK:
3418 hci_cc_read_clock(hdev, skb);
3419 break;
3420
3421 case HCI_OP_READ_INQ_RSP_TX_POWER:
3422 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3423 break;
3424
3425 case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3426 hci_cc_read_def_err_data_reporting(hdev, skb);
3427 break;
3428
3429 case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3430 hci_cc_write_def_err_data_reporting(hdev, skb);
3431 break;
3432
3433 case HCI_OP_PIN_CODE_REPLY:
3434 hci_cc_pin_code_reply(hdev, skb);
3435 break;
3436
3437 case HCI_OP_PIN_CODE_NEG_REPLY:
3438 hci_cc_pin_code_neg_reply(hdev, skb);
3439 break;
3440
3441 case HCI_OP_READ_LOCAL_OOB_DATA:
3442 hci_cc_read_local_oob_data(hdev, skb);
3443 break;
3444
3445 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3446 hci_cc_read_local_oob_ext_data(hdev, skb);
3447 break;
3448
3449 case HCI_OP_LE_READ_BUFFER_SIZE:
3450 hci_cc_le_read_buffer_size(hdev, skb);
3451 break;
3452
3453 case HCI_OP_LE_READ_LOCAL_FEATURES:
3454 hci_cc_le_read_local_features(hdev, skb);
3455 break;
3456
3457 case HCI_OP_LE_READ_ADV_TX_POWER:
3458 hci_cc_le_read_adv_tx_power(hdev, skb);
3459 break;
3460
3461 case HCI_OP_USER_CONFIRM_REPLY:
3462 hci_cc_user_confirm_reply(hdev, skb);
3463 break;
3464
3465 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3466 hci_cc_user_confirm_neg_reply(hdev, skb);
3467 break;
3468
3469 case HCI_OP_USER_PASSKEY_REPLY:
3470 hci_cc_user_passkey_reply(hdev, skb);
3471 break;
3472
3473 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3474 hci_cc_user_passkey_neg_reply(hdev, skb);
3475 break;
3476
3477 case HCI_OP_LE_SET_RANDOM_ADDR:
3478 hci_cc_le_set_random_addr(hdev, skb);
3479 break;
3480
3481 case HCI_OP_LE_SET_ADV_ENABLE:
3482 hci_cc_le_set_adv_enable(hdev, skb);
3483 break;
3484
3485 case HCI_OP_LE_SET_SCAN_PARAM:
3486 hci_cc_le_set_scan_param(hdev, skb);
3487 break;
3488
3489 case HCI_OP_LE_SET_SCAN_ENABLE:
3490 hci_cc_le_set_scan_enable(hdev, skb);
3491 break;
3492
3493 case HCI_OP_LE_READ_ACCEPT_LIST_SIZE:
3494 hci_cc_le_read_accept_list_size(hdev, skb);
3495 break;
3496
3497 case HCI_OP_LE_CLEAR_ACCEPT_LIST:
3498 hci_cc_le_clear_accept_list(hdev, skb);
3499 break;
3500
3501 case HCI_OP_LE_ADD_TO_ACCEPT_LIST:
3502 hci_cc_le_add_to_accept_list(hdev, skb);
3503 break;
3504
3505 case HCI_OP_LE_DEL_FROM_ACCEPT_LIST:
3506 hci_cc_le_del_from_accept_list(hdev, skb);
3507 break;
3508
3509 case HCI_OP_LE_READ_SUPPORTED_STATES:
3510 hci_cc_le_read_supported_states(hdev, skb);
3511 break;
3512
3513 case HCI_OP_LE_READ_DEF_DATA_LEN:
3514 hci_cc_le_read_def_data_len(hdev, skb);
3515 break;
3516
3517 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3518 hci_cc_le_write_def_data_len(hdev, skb);
3519 break;
3520
3521 case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3522 hci_cc_le_add_to_resolv_list(hdev, skb);
3523 break;
3524
3525 case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3526 hci_cc_le_del_from_resolv_list(hdev, skb);
3527 break;
3528
3529 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3530 hci_cc_le_clear_resolv_list(hdev, skb);
3531 break;
3532
3533 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3534 hci_cc_le_read_resolv_list_size(hdev, skb);
3535 break;
3536
3537 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3538 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3539 break;
3540
3541 case HCI_OP_LE_READ_MAX_DATA_LEN:
3542 hci_cc_le_read_max_data_len(hdev, skb);
3543 break;
3544
3545 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3546 hci_cc_write_le_host_supported(hdev, skb);
3547 break;
3548
3549 case HCI_OP_LE_SET_ADV_PARAM:
3550 hci_cc_set_adv_param(hdev, skb);
3551 break;
3552
3553 case HCI_OP_READ_RSSI:
3554 hci_cc_read_rssi(hdev, skb);
3555 break;
3556
3557 case HCI_OP_READ_TX_POWER:
3558 hci_cc_read_tx_power(hdev, skb);
3559 break;
3560
3561 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3562 hci_cc_write_ssp_debug_mode(hdev, skb);
3563 break;
3564
3565 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3566 hci_cc_le_set_ext_scan_param(hdev, skb);
3567 break;
3568
3569 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3570 hci_cc_le_set_ext_scan_enable(hdev, skb);
3571 break;
3572
3573 case HCI_OP_LE_SET_DEFAULT_PHY:
3574 hci_cc_le_set_default_phy(hdev, skb);
3575 break;
3576
3577 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3578 hci_cc_le_read_num_adv_sets(hdev, skb);
3579 break;
3580
3581 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3582 hci_cc_set_ext_adv_param(hdev, skb);
3583 break;
3584
3585 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3586 hci_cc_le_set_ext_adv_enable(hdev, skb);
3587 break;
3588
3589 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3590 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3591 break;
3592
3593 default:
3594 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3595 break;
3596 }
3597
3598 if (*opcode != HCI_OP_NOP)
3599 cancel_delayed_work(&hdev->cmd_timer);
3600
3601 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3602 atomic_set(&hdev->cmd_cnt, 1);
3603
3604 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3605 req_complete_skb);
3606
3607 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3608 bt_dev_err(hdev,
3609 "unexpected event for opcode 0x%4.4x", *opcode);
3610 return;
3611 }
3612
3613 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3614 queue_work(hdev->workqueue, &hdev->cmd_work);
3615 }
3616
hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb, u16 *opcode, u8 *status, hci_req_complete_t *req_complete, hci_req_complete_skb_t *req_complete_skb)3617 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3618 u16 *opcode, u8 *status,
3619 hci_req_complete_t *req_complete,
3620 hci_req_complete_skb_t *req_complete_skb)
3621 {
3622 struct hci_ev_cmd_status *ev = (void *) skb->data;
3623
3624 skb_pull(skb, sizeof(*ev));
3625
3626 *opcode = __le16_to_cpu(ev->opcode);
3627 *status = ev->status;
3628
3629 switch (*opcode) {
3630 case HCI_OP_INQUIRY:
3631 hci_cs_inquiry(hdev, ev->status);
3632 break;
3633
3634 case HCI_OP_CREATE_CONN:
3635 hci_cs_create_conn(hdev, ev->status);
3636 break;
3637
3638 case HCI_OP_DISCONNECT:
3639 hci_cs_disconnect(hdev, ev->status);
3640 break;
3641
3642 case HCI_OP_ADD_SCO:
3643 hci_cs_add_sco(hdev, ev->status);
3644 break;
3645
3646 case HCI_OP_AUTH_REQUESTED:
3647 hci_cs_auth_requested(hdev, ev->status);
3648 break;
3649
3650 case HCI_OP_SET_CONN_ENCRYPT:
3651 hci_cs_set_conn_encrypt(hdev, ev->status);
3652 break;
3653
3654 case HCI_OP_REMOTE_NAME_REQ:
3655 hci_cs_remote_name_req(hdev, ev->status);
3656 break;
3657
3658 case HCI_OP_READ_REMOTE_FEATURES:
3659 hci_cs_read_remote_features(hdev, ev->status);
3660 break;
3661
3662 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3663 hci_cs_read_remote_ext_features(hdev, ev->status);
3664 break;
3665
3666 case HCI_OP_SETUP_SYNC_CONN:
3667 hci_cs_setup_sync_conn(hdev, ev->status);
3668 break;
3669
3670 case HCI_OP_SNIFF_MODE:
3671 hci_cs_sniff_mode(hdev, ev->status);
3672 break;
3673
3674 case HCI_OP_EXIT_SNIFF_MODE:
3675 hci_cs_exit_sniff_mode(hdev, ev->status);
3676 break;
3677
3678 case HCI_OP_SWITCH_ROLE:
3679 hci_cs_switch_role(hdev, ev->status);
3680 break;
3681
3682 case HCI_OP_LE_CREATE_CONN:
3683 hci_cs_le_create_conn(hdev, ev->status);
3684 break;
3685
3686 case HCI_OP_LE_READ_REMOTE_FEATURES:
3687 hci_cs_le_read_remote_features(hdev, ev->status);
3688 break;
3689
3690 case HCI_OP_LE_START_ENC:
3691 hci_cs_le_start_enc(hdev, ev->status);
3692 break;
3693
3694 case HCI_OP_LE_EXT_CREATE_CONN:
3695 hci_cs_le_ext_create_conn(hdev, ev->status);
3696 break;
3697
3698 default:
3699 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3700 break;
3701 }
3702
3703 if (*opcode != HCI_OP_NOP)
3704 cancel_delayed_work(&hdev->cmd_timer);
3705
3706 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3707 atomic_set(&hdev->cmd_cnt, 1);
3708
3709 /* Indicate request completion if the command failed. Also, if
3710 * we're not waiting for a special event and we get a success
3711 * command status we should try to flag the request as completed
3712 * (since for this kind of commands there will not be a command
3713 * complete event).
3714 */
3715 if (ev->status ||
3716 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3717 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3718 req_complete_skb);
3719
3720 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3721 bt_dev_err(hdev,
3722 "unexpected event for opcode 0x%4.4x", *opcode);
3723 return;
3724 }
3725
3726 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3727 queue_work(hdev->workqueue, &hdev->cmd_work);
3728 }
3729
hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)3730 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3731 {
3732 struct hci_ev_hardware_error *ev = (void *) skb->data;
3733
3734 hdev->hw_error_code = ev->code;
3735
3736 queue_work(hdev->req_workqueue, &hdev->error_reset);
3737 }
3738
hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)3739 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3740 {
3741 struct hci_ev_role_change *ev = (void *) skb->data;
3742 struct hci_conn *conn;
3743
3744 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3745
3746 hci_dev_lock(hdev);
3747
3748 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3749 if (conn) {
3750 if (!ev->status)
3751 conn->role = ev->role;
3752
3753 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3754
3755 hci_role_switch_cfm(conn, ev->status, ev->role);
3756 }
3757
3758 hci_dev_unlock(hdev);
3759 }
3760
hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)3761 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3762 {
3763 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3764 int i;
3765
3766 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3767 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3768 return;
3769 }
3770
3771 if (skb->len < sizeof(*ev) ||
3772 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3773 BT_DBG("%s bad parameters", hdev->name);
3774 return;
3775 }
3776
3777 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3778
3779 for (i = 0; i < ev->num_hndl; i++) {
3780 struct hci_comp_pkts_info *info = &ev->handles[i];
3781 struct hci_conn *conn;
3782 __u16 handle, count;
3783
3784 handle = __le16_to_cpu(info->handle);
3785 count = __le16_to_cpu(info->count);
3786
3787 conn = hci_conn_hash_lookup_handle(hdev, handle);
3788 if (!conn)
3789 continue;
3790
3791 conn->sent -= count;
3792
3793 switch (conn->type) {
3794 case ACL_LINK:
3795 hdev->acl_cnt += count;
3796 if (hdev->acl_cnt > hdev->acl_pkts)
3797 hdev->acl_cnt = hdev->acl_pkts;
3798 break;
3799
3800 case LE_LINK:
3801 if (hdev->le_pkts) {
3802 hdev->le_cnt += count;
3803 if (hdev->le_cnt > hdev->le_pkts)
3804 hdev->le_cnt = hdev->le_pkts;
3805 } else {
3806 hdev->acl_cnt += count;
3807 if (hdev->acl_cnt > hdev->acl_pkts)
3808 hdev->acl_cnt = hdev->acl_pkts;
3809 }
3810 break;
3811
3812 case SCO_LINK:
3813 hdev->sco_cnt += count;
3814 if (hdev->sco_cnt > hdev->sco_pkts)
3815 hdev->sco_cnt = hdev->sco_pkts;
3816 break;
3817
3818 default:
3819 bt_dev_err(hdev, "unknown type %d conn %p",
3820 conn->type, conn);
3821 break;
3822 }
3823 }
3824
3825 queue_work(hdev->workqueue, &hdev->tx_work);
3826 }
3827
__hci_conn_lookup_handle(struct hci_dev *hdev, __u16 handle)3828 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3829 __u16 handle)
3830 {
3831 struct hci_chan *chan;
3832
3833 switch (hdev->dev_type) {
3834 case HCI_PRIMARY:
3835 return hci_conn_hash_lookup_handle(hdev, handle);
3836 case HCI_AMP:
3837 chan = hci_chan_lookup_handle(hdev, handle);
3838 if (chan)
3839 return chan->conn;
3840 break;
3841 default:
3842 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3843 break;
3844 }
3845
3846 return NULL;
3847 }
3848
hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)3849 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3850 {
3851 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3852 int i;
3853
3854 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3855 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3856 return;
3857 }
3858
3859 if (skb->len < sizeof(*ev) ||
3860 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3861 BT_DBG("%s bad parameters", hdev->name);
3862 return;
3863 }
3864
3865 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3866 ev->num_hndl);
3867
3868 for (i = 0; i < ev->num_hndl; i++) {
3869 struct hci_comp_blocks_info *info = &ev->handles[i];
3870 struct hci_conn *conn = NULL;
3871 __u16 handle, block_count;
3872
3873 handle = __le16_to_cpu(info->handle);
3874 block_count = __le16_to_cpu(info->blocks);
3875
3876 conn = __hci_conn_lookup_handle(hdev, handle);
3877 if (!conn)
3878 continue;
3879
3880 conn->sent -= block_count;
3881
3882 switch (conn->type) {
3883 case ACL_LINK:
3884 case AMP_LINK:
3885 hdev->block_cnt += block_count;
3886 if (hdev->block_cnt > hdev->num_blocks)
3887 hdev->block_cnt = hdev->num_blocks;
3888 break;
3889
3890 default:
3891 bt_dev_err(hdev, "unknown type %d conn %p",
3892 conn->type, conn);
3893 break;
3894 }
3895 }
3896
3897 queue_work(hdev->workqueue, &hdev->tx_work);
3898 }
3899
hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)3900 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3901 {
3902 struct hci_ev_mode_change *ev = (void *) skb->data;
3903 struct hci_conn *conn;
3904
3905 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3906
3907 hci_dev_lock(hdev);
3908
3909 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3910 if (conn) {
3911 conn->mode = ev->mode;
3912
3913 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3914 &conn->flags)) {
3915 if (conn->mode == HCI_CM_ACTIVE)
3916 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3917 else
3918 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3919 }
3920
3921 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3922 hci_sco_setup(conn, ev->status);
3923 }
3924
3925 hci_dev_unlock(hdev);
3926 }
3927
hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)3928 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3929 {
3930 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3931 struct hci_conn *conn;
3932
3933 BT_DBG("%s", hdev->name);
3934
3935 hci_dev_lock(hdev);
3936
3937 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3938 if (!conn)
3939 goto unlock;
3940
3941 if (conn->state == BT_CONNECTED) {
3942 hci_conn_hold(conn);
3943 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3944 hci_conn_drop(conn);
3945 }
3946
3947 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3948 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3949 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3950 sizeof(ev->bdaddr), &ev->bdaddr);
3951 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3952 u8 secure;
3953
3954 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3955 secure = 1;
3956 else
3957 secure = 0;
3958
3959 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3960 }
3961
3962 unlock:
3963 hci_dev_unlock(hdev);
3964 }
3965
conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)3966 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3967 {
3968 if (key_type == HCI_LK_CHANGED_COMBINATION)
3969 return;
3970
3971 conn->pin_length = pin_len;
3972 conn->key_type = key_type;
3973
3974 switch (key_type) {
3975 case HCI_LK_LOCAL_UNIT:
3976 case HCI_LK_REMOTE_UNIT:
3977 case HCI_LK_DEBUG_COMBINATION:
3978 return;
3979 case HCI_LK_COMBINATION:
3980 if (pin_len == 16)
3981 conn->pending_sec_level = BT_SECURITY_HIGH;
3982 else
3983 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3984 break;
3985 case HCI_LK_UNAUTH_COMBINATION_P192:
3986 case HCI_LK_UNAUTH_COMBINATION_P256:
3987 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3988 break;
3989 case HCI_LK_AUTH_COMBINATION_P192:
3990 conn->pending_sec_level = BT_SECURITY_HIGH;
3991 break;
3992 case HCI_LK_AUTH_COMBINATION_P256:
3993 conn->pending_sec_level = BT_SECURITY_FIPS;
3994 break;
3995 }
3996 }
3997
hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)3998 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3999 {
4000 struct hci_ev_link_key_req *ev = (void *) skb->data;
4001 struct hci_cp_link_key_reply cp;
4002 struct hci_conn *conn;
4003 struct link_key *key;
4004
4005 BT_DBG("%s", hdev->name);
4006
4007 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4008 return;
4009
4010 hci_dev_lock(hdev);
4011
4012 key = hci_find_link_key(hdev, &ev->bdaddr);
4013 if (!key) {
4014 BT_DBG("%s link key not found for %pMR", hdev->name,
4015 &ev->bdaddr);
4016 goto not_found;
4017 }
4018
4019 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
4020 &ev->bdaddr);
4021
4022 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4023 if (conn) {
4024 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4025
4026 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4027 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4028 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4029 BT_DBG("%s ignoring unauthenticated key", hdev->name);
4030 goto not_found;
4031 }
4032
4033 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4034 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4035 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4036 BT_DBG("%s ignoring key unauthenticated for high security",
4037 hdev->name);
4038 goto not_found;
4039 }
4040
4041 conn_set_key(conn, key->type, key->pin_len);
4042 }
4043
4044 bacpy(&cp.bdaddr, &ev->bdaddr);
4045 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4046
4047 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4048
4049 hci_dev_unlock(hdev);
4050
4051 return;
4052
4053 not_found:
4054 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4055 hci_dev_unlock(hdev);
4056 }
4057
hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)4058 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4059 {
4060 struct hci_ev_link_key_notify *ev = (void *) skb->data;
4061 struct hci_conn *conn;
4062 struct link_key *key;
4063 bool persistent;
4064 u8 pin_len = 0;
4065
4066 BT_DBG("%s", hdev->name);
4067
4068 hci_dev_lock(hdev);
4069
4070 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4071 if (!conn)
4072 goto unlock;
4073
4074 /* Ignore NULL link key against CVE-2020-26555 */
4075 if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4076 bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4077 &ev->bdaddr);
4078 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4079 hci_conn_drop(conn);
4080 goto unlock;
4081 }
4082
4083 hci_conn_hold(conn);
4084 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4085 hci_conn_drop(conn);
4086
4087 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4088 conn_set_key(conn, ev->key_type, conn->pin_length);
4089
4090 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4091 goto unlock;
4092
4093 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4094 ev->key_type, pin_len, &persistent);
4095 if (!key)
4096 goto unlock;
4097
4098 /* Update connection information since adding the key will have
4099 * fixed up the type in the case of changed combination keys.
4100 */
4101 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4102 conn_set_key(conn, key->type, key->pin_len);
4103
4104 mgmt_new_link_key(hdev, key, persistent);
4105
4106 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4107 * is set. If it's not set simply remove the key from the kernel
4108 * list (we've still notified user space about it but with
4109 * store_hint being 0).
4110 */
4111 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4112 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4113 list_del_rcu(&key->list);
4114 kfree_rcu(key, rcu);
4115 goto unlock;
4116 }
4117
4118 if (persistent)
4119 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4120 else
4121 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4122
4123 unlock:
4124 hci_dev_unlock(hdev);
4125 }
4126
hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)4127 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4128 {
4129 struct hci_ev_clock_offset *ev = (void *) skb->data;
4130 struct hci_conn *conn;
4131
4132 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4133
4134 hci_dev_lock(hdev);
4135
4136 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4137 if (conn && !ev->status) {
4138 struct inquiry_entry *ie;
4139
4140 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4141 if (ie) {
4142 ie->data.clock_offset = ev->clock_offset;
4143 ie->timestamp = jiffies;
4144 }
4145 }
4146
4147 hci_dev_unlock(hdev);
4148 }
4149
hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)4150 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4151 {
4152 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4153 struct hci_conn *conn;
4154
4155 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4156
4157 hci_dev_lock(hdev);
4158
4159 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4160 if (conn && !ev->status)
4161 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4162
4163 hci_dev_unlock(hdev);
4164 }
4165
hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)4166 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4167 {
4168 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4169 struct inquiry_entry *ie;
4170
4171 BT_DBG("%s", hdev->name);
4172
4173 hci_dev_lock(hdev);
4174
4175 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4176 if (ie) {
4177 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4178 ie->timestamp = jiffies;
4179 }
4180
4181 hci_dev_unlock(hdev);
4182 }
4183
hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)4184 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4185 struct sk_buff *skb)
4186 {
4187 struct inquiry_data data;
4188 int num_rsp = *((__u8 *) skb->data);
4189
4190 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4191
4192 if (!num_rsp)
4193 return;
4194
4195 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4196 return;
4197
4198 hci_dev_lock(hdev);
4199
4200 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4201 struct inquiry_info_with_rssi_and_pscan_mode *info;
4202 info = (void *) (skb->data + 1);
4203
4204 if (skb->len < num_rsp * sizeof(*info) + 1)
4205 goto unlock;
4206
4207 for (; num_rsp; num_rsp--, info++) {
4208 u32 flags;
4209
4210 bacpy(&data.bdaddr, &info->bdaddr);
4211 data.pscan_rep_mode = info->pscan_rep_mode;
4212 data.pscan_period_mode = info->pscan_period_mode;
4213 data.pscan_mode = info->pscan_mode;
4214 memcpy(data.dev_class, info->dev_class, 3);
4215 data.clock_offset = info->clock_offset;
4216 data.rssi = info->rssi;
4217 data.ssp_mode = 0x00;
4218
4219 flags = hci_inquiry_cache_update(hdev, &data, false);
4220
4221 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4222 info->dev_class, info->rssi,
4223 flags, NULL, 0, NULL, 0);
4224 }
4225 } else {
4226 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4227
4228 if (skb->len < num_rsp * sizeof(*info) + 1)
4229 goto unlock;
4230
4231 for (; num_rsp; num_rsp--, info++) {
4232 u32 flags;
4233
4234 bacpy(&data.bdaddr, &info->bdaddr);
4235 data.pscan_rep_mode = info->pscan_rep_mode;
4236 data.pscan_period_mode = info->pscan_period_mode;
4237 data.pscan_mode = 0x00;
4238 memcpy(data.dev_class, info->dev_class, 3);
4239 data.clock_offset = info->clock_offset;
4240 data.rssi = info->rssi;
4241 data.ssp_mode = 0x00;
4242
4243 flags = hci_inquiry_cache_update(hdev, &data, false);
4244
4245 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4246 info->dev_class, info->rssi,
4247 flags, NULL, 0, NULL, 0);
4248 }
4249 }
4250
4251 unlock:
4252 hci_dev_unlock(hdev);
4253 }
4254
hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)4255 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4256 struct sk_buff *skb)
4257 {
4258 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4259 struct hci_conn *conn;
4260
4261 BT_DBG("%s", hdev->name);
4262
4263 hci_dev_lock(hdev);
4264
4265 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4266 if (!conn)
4267 goto unlock;
4268
4269 if (ev->page < HCI_MAX_PAGES)
4270 memcpy(conn->features[ev->page], ev->features, 8);
4271
4272 if (!ev->status && ev->page == 0x01) {
4273 struct inquiry_entry *ie;
4274
4275 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4276 if (ie)
4277 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4278
4279 if (ev->features[0] & LMP_HOST_SSP) {
4280 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4281 } else {
4282 /* It is mandatory by the Bluetooth specification that
4283 * Extended Inquiry Results are only used when Secure
4284 * Simple Pairing is enabled, but some devices violate
4285 * this.
4286 *
4287 * To make these devices work, the internal SSP
4288 * enabled flag needs to be cleared if the remote host
4289 * features do not indicate SSP support */
4290 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4291 }
4292
4293 if (ev->features[0] & LMP_HOST_SC)
4294 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4295 }
4296
4297 if (conn->state != BT_CONFIG)
4298 goto unlock;
4299
4300 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4301 struct hci_cp_remote_name_req cp;
4302 memset(&cp, 0, sizeof(cp));
4303 bacpy(&cp.bdaddr, &conn->dst);
4304 cp.pscan_rep_mode = 0x02;
4305 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4306 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4307 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4308
4309 if (!hci_outgoing_auth_needed(hdev, conn)) {
4310 conn->state = BT_CONNECTED;
4311 hci_connect_cfm(conn, ev->status);
4312 hci_conn_drop(conn);
4313 }
4314
4315 unlock:
4316 hci_dev_unlock(hdev);
4317 }
4318
hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)4319 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4320 struct sk_buff *skb)
4321 {
4322 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4323 struct hci_conn *conn;
4324
4325 switch (ev->link_type) {
4326 case SCO_LINK:
4327 case ESCO_LINK:
4328 break;
4329 default:
4330 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
4331 * for HCI_Synchronous_Connection_Complete is limited to
4332 * either SCO or eSCO
4333 */
4334 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
4335 return;
4336 }
4337
4338 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4339
4340 hci_dev_lock(hdev);
4341
4342 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4343 if (!conn) {
4344 if (ev->link_type == ESCO_LINK)
4345 goto unlock;
4346
4347 /* When the link type in the event indicates SCO connection
4348 * and lookup of the connection object fails, then check
4349 * if an eSCO connection object exists.
4350 *
4351 * The core limits the synchronous connections to either
4352 * SCO or eSCO. The eSCO connection is preferred and tried
4353 * to be setup first and until successfully established,
4354 * the link type will be hinted as eSCO.
4355 */
4356 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4357 if (!conn)
4358 goto unlock;
4359 }
4360
4361 switch (ev->status) {
4362 case 0x00:
4363 /* The synchronous connection complete event should only be
4364 * sent once per new connection. Receiving a successful
4365 * complete event when the connection status is already
4366 * BT_CONNECTED means that the device is misbehaving and sent
4367 * multiple complete event packets for the same new connection.
4368 *
4369 * Registering the device more than once can corrupt kernel
4370 * memory, hence upon detecting this invalid event, we report
4371 * an error and ignore the packet.
4372 */
4373 if (conn->state == BT_CONNECTED) {
4374 bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
4375 goto unlock;
4376 }
4377
4378 conn->handle = __le16_to_cpu(ev->handle);
4379 conn->state = BT_CONNECTED;
4380 conn->type = ev->link_type;
4381
4382 hci_debugfs_create_conn(conn);
4383 hci_conn_add_sysfs(conn);
4384 break;
4385
4386 case 0x10: /* Connection Accept Timeout */
4387 case 0x0d: /* Connection Rejected due to Limited Resources */
4388 case 0x11: /* Unsupported Feature or Parameter Value */
4389 case 0x1c: /* SCO interval rejected */
4390 case 0x1a: /* Unsupported Remote Feature */
4391 case 0x1e: /* Invalid LMP Parameters */
4392 case 0x1f: /* Unspecified error */
4393 case 0x20: /* Unsupported LMP Parameter value */
4394 if (conn->out) {
4395 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4396 (hdev->esco_type & EDR_ESCO_MASK);
4397 if (hci_setup_sync(conn, conn->link->handle))
4398 goto unlock;
4399 }
4400 fallthrough;
4401
4402 default:
4403 conn->state = BT_CLOSED;
4404 break;
4405 }
4406
4407 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4408
4409 switch (ev->air_mode) {
4410 case 0x02:
4411 if (hdev->notify)
4412 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
4413 break;
4414 case 0x03:
4415 if (hdev->notify)
4416 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
4417 break;
4418 }
4419
4420 hci_connect_cfm(conn, ev->status);
4421 if (ev->status)
4422 hci_conn_del(conn);
4423
4424 unlock:
4425 hci_dev_unlock(hdev);
4426 }
4427
eir_get_length(u8 *eir, size_t eir_len)4428 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4429 {
4430 size_t parsed = 0;
4431
4432 while (parsed < eir_len) {
4433 u8 field_len = eir[0];
4434
4435 if (field_len == 0)
4436 return parsed;
4437
4438 parsed += field_len + 1;
4439 eir += field_len + 1;
4440 }
4441
4442 return eir_len;
4443 }
4444
hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)4445 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4446 struct sk_buff *skb)
4447 {
4448 struct inquiry_data data;
4449 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4450 int num_rsp = *((__u8 *) skb->data);
4451 size_t eir_len;
4452
4453 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4454
4455 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
4456 return;
4457
4458 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4459 return;
4460
4461 hci_dev_lock(hdev);
4462
4463 for (; num_rsp; num_rsp--, info++) {
4464 u32 flags;
4465 bool name_known;
4466
4467 bacpy(&data.bdaddr, &info->bdaddr);
4468 data.pscan_rep_mode = info->pscan_rep_mode;
4469 data.pscan_period_mode = info->pscan_period_mode;
4470 data.pscan_mode = 0x00;
4471 memcpy(data.dev_class, info->dev_class, 3);
4472 data.clock_offset = info->clock_offset;
4473 data.rssi = info->rssi;
4474 data.ssp_mode = 0x01;
4475
4476 if (hci_dev_test_flag(hdev, HCI_MGMT))
4477 name_known = eir_get_data(info->data,
4478 sizeof(info->data),
4479 EIR_NAME_COMPLETE, NULL);
4480 else
4481 name_known = true;
4482
4483 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4484
4485 eir_len = eir_get_length(info->data, sizeof(info->data));
4486
4487 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4488 info->dev_class, info->rssi,
4489 flags, info->data, eir_len, NULL, 0);
4490 }
4491
4492 hci_dev_unlock(hdev);
4493 }
4494
hci_key_refresh_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)4495 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4496 struct sk_buff *skb)
4497 {
4498 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4499 struct hci_conn *conn;
4500
4501 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4502 __le16_to_cpu(ev->handle));
4503
4504 hci_dev_lock(hdev);
4505
4506 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4507 if (!conn)
4508 goto unlock;
4509
4510 /* For BR/EDR the necessary steps are taken through the
4511 * auth_complete event.
4512 */
4513 if (conn->type != LE_LINK)
4514 goto unlock;
4515
4516 if (!ev->status)
4517 conn->sec_level = conn->pending_sec_level;
4518
4519 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4520
4521 if (ev->status && conn->state == BT_CONNECTED) {
4522 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4523 hci_conn_drop(conn);
4524 goto unlock;
4525 }
4526
4527 if (conn->state == BT_CONFIG) {
4528 if (!ev->status)
4529 conn->state = BT_CONNECTED;
4530
4531 hci_connect_cfm(conn, ev->status);
4532 hci_conn_drop(conn);
4533 } else {
4534 hci_auth_cfm(conn, ev->status);
4535
4536 hci_conn_hold(conn);
4537 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4538 hci_conn_drop(conn);
4539 }
4540
4541 unlock:
4542 hci_dev_unlock(hdev);
4543 }
4544
hci_get_auth_req(struct hci_conn *conn)4545 static u8 hci_get_auth_req(struct hci_conn *conn)
4546 {
4547 /* If remote requests no-bonding follow that lead */
4548 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4549 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4550 return conn->remote_auth | (conn->auth_type & 0x01);
4551
4552 /* If both remote and local have enough IO capabilities, require
4553 * MITM protection
4554 */
4555 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4556 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4557 return conn->remote_auth | 0x01;
4558
4559 /* No MITM protection possible so ignore remote requirement */
4560 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4561 }
4562
bredr_oob_data_present(struct hci_conn *conn)4563 static u8 bredr_oob_data_present(struct hci_conn *conn)
4564 {
4565 struct hci_dev *hdev = conn->hdev;
4566 struct oob_data *data;
4567
4568 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4569 if (!data)
4570 return 0x00;
4571
4572 if (bredr_sc_enabled(hdev)) {
4573 /* When Secure Connections is enabled, then just
4574 * return the present value stored with the OOB
4575 * data. The stored value contains the right present
4576 * information. However it can only be trusted when
4577 * not in Secure Connection Only mode.
4578 */
4579 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4580 return data->present;
4581
4582 /* When Secure Connections Only mode is enabled, then
4583 * the P-256 values are required. If they are not
4584 * available, then do not declare that OOB data is
4585 * present.
4586 */
4587 if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
4588 !crypto_memneq(data->hash256, ZERO_KEY, 16))
4589 return 0x00;
4590
4591 return 0x02;
4592 }
4593
4594 /* When Secure Connections is not enabled or actually
4595 * not supported by the hardware, then check that if
4596 * P-192 data values are present.
4597 */
4598 if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
4599 !crypto_memneq(data->hash192, ZERO_KEY, 16))
4600 return 0x00;
4601
4602 return 0x01;
4603 }
4604
hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)4605 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4606 {
4607 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4608 struct hci_conn *conn;
4609
4610 BT_DBG("%s", hdev->name);
4611
4612 hci_dev_lock(hdev);
4613
4614 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4615 if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4616 goto unlock;
4617
4618 /* Assume remote supports SSP since it has triggered this event */
4619 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4620
4621 hci_conn_hold(conn);
4622
4623 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4624 goto unlock;
4625
4626 /* Allow pairing if we're pairable, the initiators of the
4627 * pairing or if the remote is not requesting bonding.
4628 */
4629 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4630 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4631 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4632 struct hci_cp_io_capability_reply cp;
4633
4634 bacpy(&cp.bdaddr, &ev->bdaddr);
4635 /* Change the IO capability from KeyboardDisplay
4636 * to DisplayYesNo as it is not supported by BT spec. */
4637 cp.capability = (conn->io_capability == 0x04) ?
4638 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4639
4640 /* If we are initiators, there is no remote information yet */
4641 if (conn->remote_auth == 0xff) {
4642 /* Request MITM protection if our IO caps allow it
4643 * except for the no-bonding case.
4644 */
4645 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4646 conn->auth_type != HCI_AT_NO_BONDING)
4647 conn->auth_type |= 0x01;
4648 } else {
4649 conn->auth_type = hci_get_auth_req(conn);
4650 }
4651
4652 /* If we're not bondable, force one of the non-bondable
4653 * authentication requirement values.
4654 */
4655 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4656 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4657
4658 cp.authentication = conn->auth_type;
4659 cp.oob_data = bredr_oob_data_present(conn);
4660
4661 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4662 sizeof(cp), &cp);
4663 } else {
4664 struct hci_cp_io_capability_neg_reply cp;
4665
4666 bacpy(&cp.bdaddr, &ev->bdaddr);
4667 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4668
4669 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4670 sizeof(cp), &cp);
4671 }
4672
4673 unlock:
4674 hci_dev_unlock(hdev);
4675 }
4676
hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)4677 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4678 {
4679 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4680 struct hci_conn *conn;
4681
4682 BT_DBG("%s", hdev->name);
4683
4684 hci_dev_lock(hdev);
4685
4686 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4687 if (!conn)
4688 goto unlock;
4689
4690 conn->remote_cap = ev->capability;
4691 conn->remote_auth = ev->authentication;
4692
4693 unlock:
4694 hci_dev_unlock(hdev);
4695 }
4696
hci_user_confirm_request_evt(struct hci_dev *hdev, struct sk_buff *skb)4697 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4698 struct sk_buff *skb)
4699 {
4700 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4701 int loc_mitm, rem_mitm, confirm_hint = 0;
4702 struct hci_conn *conn;
4703
4704 BT_DBG("%s", hdev->name);
4705
4706 hci_dev_lock(hdev);
4707
4708 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4709 goto unlock;
4710
4711 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4712 if (!conn)
4713 goto unlock;
4714
4715 loc_mitm = (conn->auth_type & 0x01);
4716 rem_mitm = (conn->remote_auth & 0x01);
4717
4718 /* If we require MITM but the remote device can't provide that
4719 * (it has NoInputNoOutput) then reject the confirmation
4720 * request. We check the security level here since it doesn't
4721 * necessarily match conn->auth_type.
4722 */
4723 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4724 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4725 BT_DBG("Rejecting request: remote device can't provide MITM");
4726 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4727 sizeof(ev->bdaddr), &ev->bdaddr);
4728 goto unlock;
4729 }
4730
4731 /* If no side requires MITM protection; auto-accept */
4732 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4733 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4734
4735 /* If we're not the initiators request authorization to
4736 * proceed from user space (mgmt_user_confirm with
4737 * confirm_hint set to 1). The exception is if neither
4738 * side had MITM or if the local IO capability is
4739 * NoInputNoOutput, in which case we do auto-accept
4740 */
4741 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4742 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4743 (loc_mitm || rem_mitm)) {
4744 BT_DBG("Confirming auto-accept as acceptor");
4745 confirm_hint = 1;
4746 goto confirm;
4747 }
4748
4749 /* If there already exists link key in local host, leave the
4750 * decision to user space since the remote device could be
4751 * legitimate or malicious.
4752 */
4753 if (hci_find_link_key(hdev, &ev->bdaddr)) {
4754 bt_dev_dbg(hdev, "Local host already has link key");
4755 confirm_hint = 1;
4756 goto confirm;
4757 }
4758
4759 BT_DBG("Auto-accept of user confirmation with %ums delay",
4760 hdev->auto_accept_delay);
4761
4762 if (hdev->auto_accept_delay > 0) {
4763 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4764 queue_delayed_work(conn->hdev->workqueue,
4765 &conn->auto_accept_work, delay);
4766 goto unlock;
4767 }
4768
4769 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4770 sizeof(ev->bdaddr), &ev->bdaddr);
4771 goto unlock;
4772 }
4773
4774 confirm:
4775 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4776 le32_to_cpu(ev->passkey), confirm_hint);
4777
4778 unlock:
4779 hci_dev_unlock(hdev);
4780 }
4781
hci_user_passkey_request_evt(struct hci_dev *hdev, struct sk_buff *skb)4782 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4783 struct sk_buff *skb)
4784 {
4785 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4786
4787 BT_DBG("%s", hdev->name);
4788
4789 if (hci_dev_test_flag(hdev, HCI_MGMT))
4790 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4791 }
4792
hci_user_passkey_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)4793 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4794 struct sk_buff *skb)
4795 {
4796 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4797 struct hci_conn *conn;
4798
4799 BT_DBG("%s", hdev->name);
4800
4801 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4802 if (!conn)
4803 return;
4804
4805 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4806 conn->passkey_entered = 0;
4807
4808 if (hci_dev_test_flag(hdev, HCI_MGMT))
4809 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4810 conn->dst_type, conn->passkey_notify,
4811 conn->passkey_entered);
4812 }
4813
hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)4814 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4815 {
4816 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4817 struct hci_conn *conn;
4818
4819 BT_DBG("%s", hdev->name);
4820
4821 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4822 if (!conn)
4823 return;
4824
4825 switch (ev->type) {
4826 case HCI_KEYPRESS_STARTED:
4827 conn->passkey_entered = 0;
4828 return;
4829
4830 case HCI_KEYPRESS_ENTERED:
4831 conn->passkey_entered++;
4832 break;
4833
4834 case HCI_KEYPRESS_ERASED:
4835 conn->passkey_entered--;
4836 break;
4837
4838 case HCI_KEYPRESS_CLEARED:
4839 conn->passkey_entered = 0;
4840 break;
4841
4842 case HCI_KEYPRESS_COMPLETED:
4843 return;
4844 }
4845
4846 if (hci_dev_test_flag(hdev, HCI_MGMT))
4847 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4848 conn->dst_type, conn->passkey_notify,
4849 conn->passkey_entered);
4850 }
4851
hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)4852 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4853 struct sk_buff *skb)
4854 {
4855 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4856 struct hci_conn *conn;
4857
4858 BT_DBG("%s", hdev->name);
4859
4860 hci_dev_lock(hdev);
4861
4862 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4863 if (!conn || !hci_conn_ssp_enabled(conn))
4864 goto unlock;
4865
4866 /* Reset the authentication requirement to unknown */
4867 conn->remote_auth = 0xff;
4868
4869 /* To avoid duplicate auth_failed events to user space we check
4870 * the HCI_CONN_AUTH_PEND flag which will be set if we
4871 * initiated the authentication. A traditional auth_complete
4872 * event gets always produced as initiator and is also mapped to
4873 * the mgmt_auth_failed event */
4874 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4875 mgmt_auth_failed(conn, ev->status);
4876
4877 hci_conn_drop(conn);
4878
4879 unlock:
4880 hci_dev_unlock(hdev);
4881 }
4882
hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)4883 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4884 struct sk_buff *skb)
4885 {
4886 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4887 struct inquiry_entry *ie;
4888 struct hci_conn *conn;
4889
4890 BT_DBG("%s", hdev->name);
4891
4892 hci_dev_lock(hdev);
4893
4894 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4895 if (conn)
4896 memcpy(conn->features[1], ev->features, 8);
4897
4898 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4899 if (ie)
4900 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4901
4902 hci_dev_unlock(hdev);
4903 }
4904
hci_remote_oob_data_request_evt(struct hci_dev *hdev, struct sk_buff *skb)4905 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4906 struct sk_buff *skb)
4907 {
4908 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4909 struct oob_data *data;
4910
4911 BT_DBG("%s", hdev->name);
4912
4913 hci_dev_lock(hdev);
4914
4915 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4916 goto unlock;
4917
4918 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4919 if (!data) {
4920 struct hci_cp_remote_oob_data_neg_reply cp;
4921
4922 bacpy(&cp.bdaddr, &ev->bdaddr);
4923 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4924 sizeof(cp), &cp);
4925 goto unlock;
4926 }
4927
4928 if (bredr_sc_enabled(hdev)) {
4929 struct hci_cp_remote_oob_ext_data_reply cp;
4930
4931 bacpy(&cp.bdaddr, &ev->bdaddr);
4932 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4933 memset(cp.hash192, 0, sizeof(cp.hash192));
4934 memset(cp.rand192, 0, sizeof(cp.rand192));
4935 } else {
4936 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4937 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4938 }
4939 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4940 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4941
4942 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4943 sizeof(cp), &cp);
4944 } else {
4945 struct hci_cp_remote_oob_data_reply cp;
4946
4947 bacpy(&cp.bdaddr, &ev->bdaddr);
4948 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4949 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4950
4951 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4952 sizeof(cp), &cp);
4953 }
4954
4955 unlock:
4956 hci_dev_unlock(hdev);
4957 }
4958
4959 #if IS_ENABLED(CONFIG_BT_HS)
hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)4960 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4961 {
4962 struct hci_ev_channel_selected *ev = (void *)skb->data;
4963 struct hci_conn *hcon;
4964
4965 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4966
4967 skb_pull(skb, sizeof(*ev));
4968
4969 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4970 if (!hcon)
4971 return;
4972
4973 amp_read_loc_assoc_final_data(hdev, hcon);
4974 }
4975
hci_phy_link_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)4976 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4977 struct sk_buff *skb)
4978 {
4979 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4980 struct hci_conn *hcon, *bredr_hcon;
4981
4982 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4983 ev->status);
4984
4985 hci_dev_lock(hdev);
4986
4987 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4988 if (!hcon) {
4989 hci_dev_unlock(hdev);
4990 return;
4991 }
4992
4993 if (!hcon->amp_mgr) {
4994 hci_dev_unlock(hdev);
4995 return;
4996 }
4997
4998 if (ev->status) {
4999 hci_conn_del(hcon);
5000 hci_dev_unlock(hdev);
5001 return;
5002 }
5003
5004 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5005
5006 hcon->state = BT_CONNECTED;
5007 bacpy(&hcon->dst, &bredr_hcon->dst);
5008
5009 hci_conn_hold(hcon);
5010 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5011 hci_conn_drop(hcon);
5012
5013 hci_debugfs_create_conn(hcon);
5014 hci_conn_add_sysfs(hcon);
5015
5016 amp_physical_cfm(bredr_hcon, hcon);
5017
5018 hci_dev_unlock(hdev);
5019 }
5020
hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)5021 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5022 {
5023 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
5024 struct hci_conn *hcon;
5025 struct hci_chan *hchan;
5026 struct amp_mgr *mgr;
5027
5028 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5029 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
5030 ev->status);
5031
5032 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5033 if (!hcon)
5034 return;
5035
5036 /* Create AMP hchan */
5037 hchan = hci_chan_create(hcon);
5038 if (!hchan)
5039 return;
5040
5041 hchan->handle = le16_to_cpu(ev->handle);
5042 hchan->amp = true;
5043
5044 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5045
5046 mgr = hcon->amp_mgr;
5047 if (mgr && mgr->bredr_chan) {
5048 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5049
5050 l2cap_chan_lock(bredr_chan);
5051
5052 bredr_chan->conn->mtu = hdev->block_mtu;
5053 l2cap_logical_cfm(bredr_chan, hchan, 0);
5054 hci_conn_hold(hcon);
5055
5056 l2cap_chan_unlock(bredr_chan);
5057 }
5058 }
5059
hci_disconn_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)5060 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
5061 struct sk_buff *skb)
5062 {
5063 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
5064 struct hci_chan *hchan;
5065
5066 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
5067 le16_to_cpu(ev->handle), ev->status);
5068
5069 if (ev->status)
5070 return;
5071
5072 hci_dev_lock(hdev);
5073
5074 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5075 if (!hchan || !hchan->amp)
5076 goto unlock;
5077
5078 amp_destroy_logical_link(hchan, ev->reason);
5079
5080 unlock:
5081 hci_dev_unlock(hdev);
5082 }
5083
hci_disconn_phylink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)5084 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
5085 struct sk_buff *skb)
5086 {
5087 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
5088 struct hci_conn *hcon;
5089
5090 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5091
5092 if (ev->status)
5093 return;
5094
5095 hci_dev_lock(hdev);
5096
5097 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5098 if (hcon && hcon->type == AMP_LINK) {
5099 hcon->state = BT_CLOSED;
5100 hci_disconn_cfm(hcon, ev->reason);
5101 hci_conn_del(hcon);
5102 }
5103
5104 hci_dev_unlock(hdev);
5105 }
5106 #endif
5107
le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr, u8 bdaddr_type, bdaddr_t *local_rpa)5108 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5109 u8 bdaddr_type, bdaddr_t *local_rpa)
5110 {
5111 if (conn->out) {
5112 conn->dst_type = bdaddr_type;
5113 conn->resp_addr_type = bdaddr_type;
5114 bacpy(&conn->resp_addr, bdaddr);
5115
5116 /* Check if the controller has set a Local RPA then it must be
5117 * used instead or hdev->rpa.
5118 */
5119 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5120 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5121 bacpy(&conn->init_addr, local_rpa);
5122 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5123 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5124 bacpy(&conn->init_addr, &conn->hdev->rpa);
5125 } else {
5126 hci_copy_identity_address(conn->hdev, &conn->init_addr,
5127 &conn->init_addr_type);
5128 }
5129 } else {
5130 conn->resp_addr_type = conn->hdev->adv_addr_type;
5131 /* Check if the controller has set a Local RPA then it must be
5132 * used instead or hdev->rpa.
5133 */
5134 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5135 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5136 bacpy(&conn->resp_addr, local_rpa);
5137 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5138 /* In case of ext adv, resp_addr will be updated in
5139 * Adv Terminated event.
5140 */
5141 if (!ext_adv_capable(conn->hdev))
5142 bacpy(&conn->resp_addr,
5143 &conn->hdev->random_addr);
5144 } else {
5145 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5146 }
5147
5148 conn->init_addr_type = bdaddr_type;
5149 bacpy(&conn->init_addr, bdaddr);
5150
5151 /* For incoming connections, set the default minimum
5152 * and maximum connection interval. They will be used
5153 * to check if the parameters are in range and if not
5154 * trigger the connection update procedure.
5155 */
5156 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5157 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5158 }
5159 }
5160
le_conn_complete_evt(struct hci_dev *hdev, u8 status, bdaddr_t *bdaddr, u8 bdaddr_type, bdaddr_t *local_rpa, u8 role, u16 handle, u16 interval, u16 latency, u16 supervision_timeout)5161 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5162 bdaddr_t *bdaddr, u8 bdaddr_type,
5163 bdaddr_t *local_rpa, u8 role, u16 handle,
5164 u16 interval, u16 latency,
5165 u16 supervision_timeout)
5166 {
5167 struct hci_conn_params *params;
5168 struct hci_conn *conn;
5169 struct smp_irk *irk;
5170 u8 addr_type;
5171
5172 hci_dev_lock(hdev);
5173
5174 /* All controllers implicitly stop advertising in the event of a
5175 * connection, so ensure that the state bit is cleared.
5176 */
5177 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5178
5179 conn = hci_lookup_le_connect(hdev);
5180 if (!conn) {
5181 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5182 if (!conn) {
5183 bt_dev_err(hdev, "no memory for new connection");
5184 goto unlock;
5185 }
5186
5187 conn->dst_type = bdaddr_type;
5188
5189 /* If we didn't have a hci_conn object previously
5190 * but we're in central role this must be something
5191 * initiated using an accept list. Since accept list based
5192 * connections are not "first class citizens" we don't
5193 * have full tracking of them. Therefore, we go ahead
5194 * with a "best effort" approach of determining the
5195 * initiator address based on the HCI_PRIVACY flag.
5196 */
5197 if (conn->out) {
5198 conn->resp_addr_type = bdaddr_type;
5199 bacpy(&conn->resp_addr, bdaddr);
5200 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5201 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5202 bacpy(&conn->init_addr, &hdev->rpa);
5203 } else {
5204 hci_copy_identity_address(hdev,
5205 &conn->init_addr,
5206 &conn->init_addr_type);
5207 }
5208 }
5209 } else {
5210 cancel_delayed_work(&conn->le_conn_timeout);
5211 }
5212
5213 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5214
5215 /* Lookup the identity address from the stored connection
5216 * address and address type.
5217 *
5218 * When establishing connections to an identity address, the
5219 * connection procedure will store the resolvable random
5220 * address first. Now if it can be converted back into the
5221 * identity address, start using the identity address from
5222 * now on.
5223 */
5224 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5225 if (irk) {
5226 bacpy(&conn->dst, &irk->bdaddr);
5227 conn->dst_type = irk->addr_type;
5228 }
5229
5230 if (status) {
5231 hci_le_conn_failed(conn, status);
5232 goto unlock;
5233 }
5234
5235 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5236 addr_type = BDADDR_LE_PUBLIC;
5237 else
5238 addr_type = BDADDR_LE_RANDOM;
5239
5240 /* Drop the connection if the device is blocked */
5241 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5242 hci_conn_drop(conn);
5243 goto unlock;
5244 }
5245
5246 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5247 mgmt_device_connected(hdev, conn, 0, NULL, 0);
5248
5249 conn->sec_level = BT_SECURITY_LOW;
5250 conn->handle = handle;
5251 conn->state = BT_CONFIG;
5252
5253 conn->le_conn_interval = interval;
5254 conn->le_conn_latency = latency;
5255 conn->le_supv_timeout = supervision_timeout;
5256
5257 hci_debugfs_create_conn(conn);
5258 hci_conn_add_sysfs(conn);
5259
5260 /* The remote features procedure is defined for master
5261 * role only. So only in case of an initiated connection
5262 * request the remote features.
5263 *
5264 * If the local controller supports slave-initiated features
5265 * exchange, then requesting the remote features in slave
5266 * role is possible. Otherwise just transition into the
5267 * connected state without requesting the remote features.
5268 */
5269 if (conn->out ||
5270 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
5271 struct hci_cp_le_read_remote_features cp;
5272
5273 cp.handle = __cpu_to_le16(conn->handle);
5274
5275 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5276 sizeof(cp), &cp);
5277
5278 hci_conn_hold(conn);
5279 } else {
5280 conn->state = BT_CONNECTED;
5281 hci_connect_cfm(conn, status);
5282 }
5283
5284 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5285 conn->dst_type);
5286 if (params) {
5287 list_del_init(¶ms->action);
5288 if (params->conn) {
5289 hci_conn_drop(params->conn);
5290 hci_conn_put(params->conn);
5291 params->conn = NULL;
5292 }
5293 }
5294
5295 unlock:
5296 hci_update_background_scan(hdev);
5297 hci_dev_unlock(hdev);
5298 }
5299
hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)5300 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5301 {
5302 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5303
5304 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5305
5306 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5307 NULL, ev->role, le16_to_cpu(ev->handle),
5308 le16_to_cpu(ev->interval),
5309 le16_to_cpu(ev->latency),
5310 le16_to_cpu(ev->supervision_timeout));
5311 }
5312
hci_le_enh_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)5313 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5314 struct sk_buff *skb)
5315 {
5316 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5317
5318 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5319
5320 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5321 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5322 le16_to_cpu(ev->interval),
5323 le16_to_cpu(ev->latency),
5324 le16_to_cpu(ev->supervision_timeout));
5325
5326 if (use_ll_privacy(hdev) &&
5327 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5328 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
5329 hci_req_disable_address_resolution(hdev);
5330 }
5331
hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)5332 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5333 {
5334 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5335 struct hci_conn *conn;
5336
5337 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5338
5339 if (ev->status) {
5340 struct adv_info *adv;
5341
5342 adv = hci_find_adv_instance(hdev, ev->handle);
5343 if (!adv)
5344 return;
5345
5346 /* Remove advertising as it has been terminated */
5347 hci_remove_adv_instance(hdev, ev->handle);
5348 mgmt_advertising_removed(NULL, hdev, ev->handle);
5349
5350 return;
5351 }
5352
5353 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5354 if (conn) {
5355 struct adv_info *adv_instance;
5356
5357 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5358 bacmp(&conn->resp_addr, BDADDR_ANY))
5359 return;
5360
5361 if (!hdev->cur_adv_instance) {
5362 bacpy(&conn->resp_addr, &hdev->random_addr);
5363 return;
5364 }
5365
5366 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
5367 if (adv_instance)
5368 bacpy(&conn->resp_addr, &adv_instance->random_addr);
5369 }
5370 }
5371
hci_le_conn_update_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)5372 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5373 struct sk_buff *skb)
5374 {
5375 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5376 struct hci_conn *conn;
5377
5378 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5379
5380 if (ev->status)
5381 return;
5382
5383 hci_dev_lock(hdev);
5384
5385 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5386 if (conn) {
5387 conn->le_conn_interval = le16_to_cpu(ev->interval);
5388 conn->le_conn_latency = le16_to_cpu(ev->latency);
5389 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5390 }
5391
5392 hci_dev_unlock(hdev);
5393 }
5394
5395 /* This function requires the caller holds hdev->lock */
check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type, u8 adv_type, bdaddr_t *direct_rpa)5396 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5397 bdaddr_t *addr,
5398 u8 addr_type, u8 adv_type,
5399 bdaddr_t *direct_rpa)
5400 {
5401 struct hci_conn *conn;
5402 struct hci_conn_params *params;
5403
5404 /* If the event is not connectable don't proceed further */
5405 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5406 return NULL;
5407
5408 /* Ignore if the device is blocked */
5409 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type))
5410 return NULL;
5411
5412 /* Most controller will fail if we try to create new connections
5413 * while we have an existing one in slave role.
5414 */
5415 if (hdev->conn_hash.le_num_slave > 0 &&
5416 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
5417 !(hdev->le_states[3] & 0x10)))
5418 return NULL;
5419
5420 /* If we're not connectable only connect devices that we have in
5421 * our pend_le_conns list.
5422 */
5423 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5424 addr_type);
5425 if (!params)
5426 return NULL;
5427
5428 if (!params->explicit_connect) {
5429 switch (params->auto_connect) {
5430 case HCI_AUTO_CONN_DIRECT:
5431 /* Only devices advertising with ADV_DIRECT_IND are
5432 * triggering a connection attempt. This is allowing
5433 * incoming connections from slave devices.
5434 */
5435 if (adv_type != LE_ADV_DIRECT_IND)
5436 return NULL;
5437 break;
5438 case HCI_AUTO_CONN_ALWAYS:
5439 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5440 * are triggering a connection attempt. This means
5441 * that incoming connections from slave device are
5442 * accepted and also outgoing connections to slave
5443 * devices are established when found.
5444 */
5445 break;
5446 default:
5447 return NULL;
5448 }
5449 }
5450
5451 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5452 hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER,
5453 direct_rpa);
5454 if (!IS_ERR(conn)) {
5455 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5456 * by higher layer that tried to connect, if no then
5457 * store the pointer since we don't really have any
5458 * other owner of the object besides the params that
5459 * triggered it. This way we can abort the connection if
5460 * the parameters get removed and keep the reference
5461 * count consistent once the connection is established.
5462 */
5463
5464 if (!params->explicit_connect)
5465 params->conn = hci_conn_get(conn);
5466
5467 return conn;
5468 }
5469
5470 switch (PTR_ERR(conn)) {
5471 case -EBUSY:
5472 /* If hci_connect() returns -EBUSY it means there is already
5473 * an LE connection attempt going on. Since controllers don't
5474 * support more than one connection attempt at the time, we
5475 * don't consider this an error case.
5476 */
5477 break;
5478 default:
5479 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5480 return NULL;
5481 }
5482
5483 return NULL;
5484 }
5485
process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, u8 bdaddr_type, bdaddr_t *direct_addr, u8 direct_addr_type, s8 rssi, u8 *data, u8 len, bool ext_adv)5486 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5487 u8 bdaddr_type, bdaddr_t *direct_addr,
5488 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5489 bool ext_adv)
5490 {
5491 struct discovery_state *d = &hdev->discovery;
5492 struct smp_irk *irk;
5493 struct hci_conn *conn;
5494 bool match;
5495 u32 flags;
5496 u8 *ptr;
5497
5498 switch (type) {
5499 case LE_ADV_IND:
5500 case LE_ADV_DIRECT_IND:
5501 case LE_ADV_SCAN_IND:
5502 case LE_ADV_NONCONN_IND:
5503 case LE_ADV_SCAN_RSP:
5504 break;
5505 default:
5506 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5507 "type: 0x%02x", type);
5508 return;
5509 }
5510
5511 if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5512 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5513 return;
5514 }
5515
5516 /* Find the end of the data in case the report contains padded zero
5517 * bytes at the end causing an invalid length value.
5518 *
5519 * When data is NULL, len is 0 so there is no need for extra ptr
5520 * check as 'ptr < data + 0' is already false in such case.
5521 */
5522 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5523 if (ptr + 1 + *ptr > data + len)
5524 break;
5525 }
5526
5527 /* Adjust for actual length. This handles the case when remote
5528 * device is advertising with incorrect data length.
5529 */
5530 len = ptr - data;
5531
5532 /* If the direct address is present, then this report is from
5533 * a LE Direct Advertising Report event. In that case it is
5534 * important to see if the address is matching the local
5535 * controller address.
5536 */
5537 if (direct_addr) {
5538 /* Only resolvable random addresses are valid for these
5539 * kind of reports and others can be ignored.
5540 */
5541 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5542 return;
5543
5544 /* If the controller is not using resolvable random
5545 * addresses, then this report can be ignored.
5546 */
5547 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5548 return;
5549
5550 /* If the local IRK of the controller does not match
5551 * with the resolvable random address provided, then
5552 * this report can be ignored.
5553 */
5554 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5555 return;
5556 }
5557
5558 /* Check if we need to convert to identity address */
5559 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5560 if (irk) {
5561 bdaddr = &irk->bdaddr;
5562 bdaddr_type = irk->addr_type;
5563 }
5564
5565 /* Check if we have been requested to connect to this device.
5566 *
5567 * direct_addr is set only for directed advertising reports (it is NULL
5568 * for advertising reports) and is already verified to be RPA above.
5569 */
5570 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5571 direct_addr);
5572 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
5573 /* Store report for later inclusion by
5574 * mgmt_device_connected
5575 */
5576 memcpy(conn->le_adv_data, data, len);
5577 conn->le_adv_data_len = len;
5578 }
5579
5580 /* Passive scanning shouldn't trigger any device found events,
5581 * except for devices marked as CONN_REPORT for which we do send
5582 * device found events, or advertisement monitoring requested.
5583 */
5584 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5585 if (type == LE_ADV_DIRECT_IND)
5586 return;
5587
5588 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5589 bdaddr, bdaddr_type) &&
5590 idr_is_empty(&hdev->adv_monitors_idr))
5591 return;
5592
5593 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5594 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5595 else
5596 flags = 0;
5597 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5598 rssi, flags, data, len, NULL, 0);
5599 return;
5600 }
5601
5602 /* When receiving non-connectable or scannable undirected
5603 * advertising reports, this means that the remote device is
5604 * not connectable and then clearly indicate this in the
5605 * device found event.
5606 *
5607 * When receiving a scan response, then there is no way to
5608 * know if the remote device is connectable or not. However
5609 * since scan responses are merged with a previously seen
5610 * advertising report, the flags field from that report
5611 * will be used.
5612 *
5613 * In the really unlikely case that a controller get confused
5614 * and just sends a scan response event, then it is marked as
5615 * not connectable as well.
5616 */
5617 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5618 type == LE_ADV_SCAN_RSP)
5619 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5620 else
5621 flags = 0;
5622
5623 /* If there's nothing pending either store the data from this
5624 * event or send an immediate device found event if the data
5625 * should not be stored for later.
5626 */
5627 if (!ext_adv && !has_pending_adv_report(hdev)) {
5628 /* If the report will trigger a SCAN_REQ store it for
5629 * later merging.
5630 */
5631 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5632 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5633 rssi, flags, data, len);
5634 return;
5635 }
5636
5637 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5638 rssi, flags, data, len, NULL, 0);
5639 return;
5640 }
5641
5642 /* Check if the pending report is for the same device as the new one */
5643 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5644 bdaddr_type == d->last_adv_addr_type);
5645
5646 /* If the pending data doesn't match this report or this isn't a
5647 * scan response (e.g. we got a duplicate ADV_IND) then force
5648 * sending of the pending data.
5649 */
5650 if (type != LE_ADV_SCAN_RSP || !match) {
5651 /* Send out whatever is in the cache, but skip duplicates */
5652 if (!match)
5653 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5654 d->last_adv_addr_type, NULL,
5655 d->last_adv_rssi, d->last_adv_flags,
5656 d->last_adv_data,
5657 d->last_adv_data_len, NULL, 0);
5658
5659 /* If the new report will trigger a SCAN_REQ store it for
5660 * later merging.
5661 */
5662 if (!ext_adv && (type == LE_ADV_IND ||
5663 type == LE_ADV_SCAN_IND)) {
5664 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5665 rssi, flags, data, len);
5666 return;
5667 }
5668
5669 /* The advertising reports cannot be merged, so clear
5670 * the pending report and send out a device found event.
5671 */
5672 clear_pending_adv_report(hdev);
5673 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5674 rssi, flags, data, len, NULL, 0);
5675 return;
5676 }
5677
5678 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5679 * the new event is a SCAN_RSP. We can therefore proceed with
5680 * sending a merged device found event.
5681 */
5682 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5683 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5684 d->last_adv_data, d->last_adv_data_len, data, len);
5685 clear_pending_adv_report(hdev);
5686 }
5687
hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)5688 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5689 {
5690 u8 num_reports = skb->data[0];
5691 void *ptr = &skb->data[1];
5692
5693 hci_dev_lock(hdev);
5694
5695 while (num_reports--) {
5696 struct hci_ev_le_advertising_info *ev = ptr;
5697 s8 rssi;
5698
5699 if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) {
5700 bt_dev_err(hdev, "Malicious advertising data.");
5701 break;
5702 }
5703
5704 if (ev->length <= HCI_MAX_AD_LENGTH &&
5705 ev->data + ev->length <= skb_tail_pointer(skb)) {
5706 rssi = ev->data[ev->length];
5707 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5708 ev->bdaddr_type, NULL, 0, rssi,
5709 ev->data, ev->length, false);
5710 } else {
5711 bt_dev_err(hdev, "Dropping invalid advertising data");
5712 }
5713
5714 ptr += sizeof(*ev) + ev->length + 1;
5715 }
5716
5717 hci_dev_unlock(hdev);
5718 }
5719
ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)5720 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
5721 {
5722 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5723 switch (evt_type) {
5724 case LE_LEGACY_ADV_IND:
5725 return LE_ADV_IND;
5726 case LE_LEGACY_ADV_DIRECT_IND:
5727 return LE_ADV_DIRECT_IND;
5728 case LE_LEGACY_ADV_SCAN_IND:
5729 return LE_ADV_SCAN_IND;
5730 case LE_LEGACY_NONCONN_IND:
5731 return LE_ADV_NONCONN_IND;
5732 case LE_LEGACY_SCAN_RSP_ADV:
5733 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5734 return LE_ADV_SCAN_RSP;
5735 }
5736
5737 goto invalid;
5738 }
5739
5740 if (evt_type & LE_EXT_ADV_CONN_IND) {
5741 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5742 return LE_ADV_DIRECT_IND;
5743
5744 return LE_ADV_IND;
5745 }
5746
5747 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5748 return LE_ADV_SCAN_RSP;
5749
5750 if (evt_type & LE_EXT_ADV_SCAN_IND)
5751 return LE_ADV_SCAN_IND;
5752
5753 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5754 evt_type & LE_EXT_ADV_DIRECT_IND)
5755 return LE_ADV_NONCONN_IND;
5756
5757 invalid:
5758 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
5759 evt_type);
5760
5761 return LE_ADV_INVALID;
5762 }
5763
hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)5764 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5765 {
5766 u8 num_reports = skb->data[0];
5767 void *ptr = &skb->data[1];
5768
5769 hci_dev_lock(hdev);
5770
5771 while (num_reports--) {
5772 struct hci_ev_le_ext_adv_report *ev = ptr;
5773 u8 legacy_evt_type;
5774 u16 evt_type;
5775
5776 evt_type = __le16_to_cpu(ev->evt_type);
5777 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
5778 if (legacy_evt_type != LE_ADV_INVALID) {
5779 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5780 ev->bdaddr_type, NULL, 0, ev->rssi,
5781 ev->data, ev->length,
5782 !(evt_type & LE_EXT_ADV_LEGACY_PDU));
5783 }
5784
5785 ptr += sizeof(*ev) + ev->length;
5786 }
5787
5788 hci_dev_unlock(hdev);
5789 }
5790
hci_le_remote_feat_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)5791 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5792 struct sk_buff *skb)
5793 {
5794 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5795 struct hci_conn *conn;
5796
5797 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5798
5799 hci_dev_lock(hdev);
5800
5801 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5802 if (conn) {
5803 if (!ev->status)
5804 memcpy(conn->features[0], ev->features, 8);
5805
5806 if (conn->state == BT_CONFIG) {
5807 __u8 status;
5808
5809 /* If the local controller supports slave-initiated
5810 * features exchange, but the remote controller does
5811 * not, then it is possible that the error code 0x1a
5812 * for unsupported remote feature gets returned.
5813 *
5814 * In this specific case, allow the connection to
5815 * transition into connected state and mark it as
5816 * successful.
5817 */
5818 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5819 !conn->out && ev->status == 0x1a)
5820 status = 0x00;
5821 else
5822 status = ev->status;
5823
5824 conn->state = BT_CONNECTED;
5825 hci_connect_cfm(conn, status);
5826 hci_conn_drop(conn);
5827 }
5828 }
5829
5830 hci_dev_unlock(hdev);
5831 }
5832
hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)5833 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5834 {
5835 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5836 struct hci_cp_le_ltk_reply cp;
5837 struct hci_cp_le_ltk_neg_reply neg;
5838 struct hci_conn *conn;
5839 struct smp_ltk *ltk;
5840
5841 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5842
5843 hci_dev_lock(hdev);
5844
5845 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5846 if (conn == NULL)
5847 goto not_found;
5848
5849 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5850 if (!ltk)
5851 goto not_found;
5852
5853 if (smp_ltk_is_sc(ltk)) {
5854 /* With SC both EDiv and Rand are set to zero */
5855 if (ev->ediv || ev->rand)
5856 goto not_found;
5857 } else {
5858 /* For non-SC keys check that EDiv and Rand match */
5859 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5860 goto not_found;
5861 }
5862
5863 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5864 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5865 cp.handle = cpu_to_le16(conn->handle);
5866
5867 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5868
5869 conn->enc_key_size = ltk->enc_size;
5870
5871 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5872
5873 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5874 * temporary key used to encrypt a connection following
5875 * pairing. It is used during the Encrypted Session Setup to
5876 * distribute the keys. Later, security can be re-established
5877 * using a distributed LTK.
5878 */
5879 if (ltk->type == SMP_STK) {
5880 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5881 list_del_rcu(<k->list);
5882 kfree_rcu(ltk, rcu);
5883 } else {
5884 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5885 }
5886
5887 hci_dev_unlock(hdev);
5888
5889 return;
5890
5891 not_found:
5892 neg.handle = ev->handle;
5893 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5894 hci_dev_unlock(hdev);
5895 }
5896
send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle, u8 reason)5897 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5898 u8 reason)
5899 {
5900 struct hci_cp_le_conn_param_req_neg_reply cp;
5901
5902 cp.handle = cpu_to_le16(handle);
5903 cp.reason = reason;
5904
5905 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5906 &cp);
5907 }
5908
hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, struct sk_buff *skb)5909 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5910 struct sk_buff *skb)
5911 {
5912 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5913 struct hci_cp_le_conn_param_req_reply cp;
5914 struct hci_conn *hcon;
5915 u16 handle, min, max, latency, timeout;
5916
5917 handle = le16_to_cpu(ev->handle);
5918 min = le16_to_cpu(ev->interval_min);
5919 max = le16_to_cpu(ev->interval_max);
5920 latency = le16_to_cpu(ev->latency);
5921 timeout = le16_to_cpu(ev->timeout);
5922
5923 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5924 if (!hcon || hcon->state != BT_CONNECTED)
5925 return send_conn_param_neg_reply(hdev, handle,
5926 HCI_ERROR_UNKNOWN_CONN_ID);
5927
5928 if (hci_check_conn_params(min, max, latency, timeout))
5929 return send_conn_param_neg_reply(hdev, handle,
5930 HCI_ERROR_INVALID_LL_PARAMS);
5931
5932 if (hcon->role == HCI_ROLE_MASTER) {
5933 struct hci_conn_params *params;
5934 u8 store_hint;
5935
5936 hci_dev_lock(hdev);
5937
5938 params = hci_conn_params_lookup(hdev, &hcon->dst,
5939 hcon->dst_type);
5940 if (params) {
5941 params->conn_min_interval = min;
5942 params->conn_max_interval = max;
5943 params->conn_latency = latency;
5944 params->supervision_timeout = timeout;
5945 store_hint = 0x01;
5946 } else{
5947 store_hint = 0x00;
5948 }
5949
5950 hci_dev_unlock(hdev);
5951
5952 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5953 store_hint, min, max, latency, timeout);
5954 }
5955
5956 cp.handle = ev->handle;
5957 cp.interval_min = ev->interval_min;
5958 cp.interval_max = ev->interval_max;
5959 cp.latency = ev->latency;
5960 cp.timeout = ev->timeout;
5961 cp.min_ce_len = 0;
5962 cp.max_ce_len = 0;
5963
5964 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5965 }
5966
hci_le_direct_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)5967 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5968 struct sk_buff *skb)
5969 {
5970 u8 num_reports = skb->data[0];
5971 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
5972
5973 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
5974 return;
5975
5976 hci_dev_lock(hdev);
5977
5978 for (; num_reports; num_reports--, ev++)
5979 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5980 ev->bdaddr_type, &ev->direct_addr,
5981 ev->direct_addr_type, ev->rssi, NULL, 0,
5982 false);
5983
5984 hci_dev_unlock(hdev);
5985 }
5986
hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)5987 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
5988 {
5989 struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
5990 struct hci_conn *conn;
5991
5992 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5993
5994 if (ev->status)
5995 return;
5996
5997 hci_dev_lock(hdev);
5998
5999 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6000 if (!conn)
6001 goto unlock;
6002
6003 conn->le_tx_phy = ev->tx_phy;
6004 conn->le_rx_phy = ev->rx_phy;
6005
6006 unlock:
6007 hci_dev_unlock(hdev);
6008 }
6009
hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)6010 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
6011 {
6012 struct hci_ev_le_meta *le_ev = (void *) skb->data;
6013
6014 skb_pull(skb, sizeof(*le_ev));
6015
6016 switch (le_ev->subevent) {
6017 case HCI_EV_LE_CONN_COMPLETE:
6018 hci_le_conn_complete_evt(hdev, skb);
6019 break;
6020
6021 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
6022 hci_le_conn_update_complete_evt(hdev, skb);
6023 break;
6024
6025 case HCI_EV_LE_ADVERTISING_REPORT:
6026 hci_le_adv_report_evt(hdev, skb);
6027 break;
6028
6029 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
6030 hci_le_remote_feat_complete_evt(hdev, skb);
6031 break;
6032
6033 case HCI_EV_LE_LTK_REQ:
6034 hci_le_ltk_request_evt(hdev, skb);
6035 break;
6036
6037 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
6038 hci_le_remote_conn_param_req_evt(hdev, skb);
6039 break;
6040
6041 case HCI_EV_LE_DIRECT_ADV_REPORT:
6042 hci_le_direct_adv_report_evt(hdev, skb);
6043 break;
6044
6045 case HCI_EV_LE_PHY_UPDATE_COMPLETE:
6046 hci_le_phy_update_evt(hdev, skb);
6047 break;
6048
6049 case HCI_EV_LE_EXT_ADV_REPORT:
6050 hci_le_ext_adv_report_evt(hdev, skb);
6051 break;
6052
6053 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
6054 hci_le_enh_conn_complete_evt(hdev, skb);
6055 break;
6056
6057 case HCI_EV_LE_EXT_ADV_SET_TERM:
6058 hci_le_ext_adv_term_evt(hdev, skb);
6059 break;
6060
6061 default:
6062 break;
6063 }
6064 }
6065
hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 event, struct sk_buff *skb)6066 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
6067 u8 event, struct sk_buff *skb)
6068 {
6069 struct hci_ev_cmd_complete *ev;
6070 struct hci_event_hdr *hdr;
6071
6072 if (!skb)
6073 return false;
6074
6075 if (skb->len < sizeof(*hdr)) {
6076 bt_dev_err(hdev, "too short HCI event");
6077 return false;
6078 }
6079
6080 hdr = (void *) skb->data;
6081 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6082
6083 if (event) {
6084 if (hdr->evt != event)
6085 return false;
6086 return true;
6087 }
6088
6089 /* Check if request ended in Command Status - no way to retreive
6090 * any extra parameters in this case.
6091 */
6092 if (hdr->evt == HCI_EV_CMD_STATUS)
6093 return false;
6094
6095 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
6096 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
6097 hdr->evt);
6098 return false;
6099 }
6100
6101 if (skb->len < sizeof(*ev)) {
6102 bt_dev_err(hdev, "too short cmd_complete event");
6103 return false;
6104 }
6105
6106 ev = (void *) skb->data;
6107 skb_pull(skb, sizeof(*ev));
6108
6109 if (opcode != __le16_to_cpu(ev->opcode)) {
6110 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
6111 __le16_to_cpu(ev->opcode));
6112 return false;
6113 }
6114
6115 return true;
6116 }
6117
hci_store_wake_reason(struct hci_dev *hdev, u8 event, struct sk_buff *skb)6118 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
6119 struct sk_buff *skb)
6120 {
6121 struct hci_ev_le_advertising_info *adv;
6122 struct hci_ev_le_direct_adv_info *direct_adv;
6123 struct hci_ev_le_ext_adv_report *ext_adv;
6124 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
6125 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
6126
6127 hci_dev_lock(hdev);
6128
6129 /* If we are currently suspended and this is the first BT event seen,
6130 * save the wake reason associated with the event.
6131 */
6132 if (!hdev->suspended || hdev->wake_reason)
6133 goto unlock;
6134
6135 /* Default to remote wake. Values for wake_reason are documented in the
6136 * Bluez mgmt api docs.
6137 */
6138 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
6139
6140 /* Once configured for remote wakeup, we should only wake up for
6141 * reconnections. It's useful to see which device is waking us up so
6142 * keep track of the bdaddr of the connection event that woke us up.
6143 */
6144 if (event == HCI_EV_CONN_REQUEST) {
6145 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
6146 hdev->wake_addr_type = BDADDR_BREDR;
6147 } else if (event == HCI_EV_CONN_COMPLETE) {
6148 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
6149 hdev->wake_addr_type = BDADDR_BREDR;
6150 } else if (event == HCI_EV_LE_META) {
6151 struct hci_ev_le_meta *le_ev = (void *)skb->data;
6152 u8 subevent = le_ev->subevent;
6153 u8 *ptr = &skb->data[sizeof(*le_ev)];
6154 u8 num_reports = *ptr;
6155
6156 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
6157 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
6158 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
6159 num_reports) {
6160 adv = (void *)(ptr + 1);
6161 direct_adv = (void *)(ptr + 1);
6162 ext_adv = (void *)(ptr + 1);
6163
6164 switch (subevent) {
6165 case HCI_EV_LE_ADVERTISING_REPORT:
6166 bacpy(&hdev->wake_addr, &adv->bdaddr);
6167 hdev->wake_addr_type = adv->bdaddr_type;
6168 break;
6169 case HCI_EV_LE_DIRECT_ADV_REPORT:
6170 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
6171 hdev->wake_addr_type = direct_adv->bdaddr_type;
6172 break;
6173 case HCI_EV_LE_EXT_ADV_REPORT:
6174 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
6175 hdev->wake_addr_type = ext_adv->bdaddr_type;
6176 break;
6177 }
6178 }
6179 } else {
6180 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
6181 }
6182
6183 unlock:
6184 hci_dev_unlock(hdev);
6185 }
6186
hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)6187 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
6188 {
6189 struct hci_event_hdr *hdr = (void *) skb->data;
6190 hci_req_complete_t req_complete = NULL;
6191 hci_req_complete_skb_t req_complete_skb = NULL;
6192 struct sk_buff *orig_skb = NULL;
6193 u8 status = 0, event = hdr->evt, req_evt = 0;
6194 u16 opcode = HCI_OP_NOP;
6195
6196 if (!event) {
6197 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
6198 goto done;
6199 }
6200
6201 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
6202 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
6203 opcode = __le16_to_cpu(cmd_hdr->opcode);
6204 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
6205 &req_complete_skb);
6206 req_evt = event;
6207 }
6208
6209 /* If it looks like we might end up having to call
6210 * req_complete_skb, store a pristine copy of the skb since the
6211 * various handlers may modify the original one through
6212 * skb_pull() calls, etc.
6213 */
6214 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
6215 event == HCI_EV_CMD_COMPLETE)
6216 orig_skb = skb_clone(skb, GFP_KERNEL);
6217
6218 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6219
6220 /* Store wake reason if we're suspended */
6221 hci_store_wake_reason(hdev, event, skb);
6222
6223 switch (event) {
6224 case HCI_EV_INQUIRY_COMPLETE:
6225 hci_inquiry_complete_evt(hdev, skb);
6226 break;
6227
6228 case HCI_EV_INQUIRY_RESULT:
6229 hci_inquiry_result_evt(hdev, skb);
6230 break;
6231
6232 case HCI_EV_CONN_COMPLETE:
6233 hci_conn_complete_evt(hdev, skb);
6234 break;
6235
6236 case HCI_EV_CONN_REQUEST:
6237 hci_conn_request_evt(hdev, skb);
6238 break;
6239
6240 case HCI_EV_DISCONN_COMPLETE:
6241 hci_disconn_complete_evt(hdev, skb);
6242 break;
6243
6244 case HCI_EV_AUTH_COMPLETE:
6245 hci_auth_complete_evt(hdev, skb);
6246 break;
6247
6248 case HCI_EV_REMOTE_NAME:
6249 hci_remote_name_evt(hdev, skb);
6250 break;
6251
6252 case HCI_EV_ENCRYPT_CHANGE:
6253 hci_encrypt_change_evt(hdev, skb);
6254 break;
6255
6256 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6257 hci_change_link_key_complete_evt(hdev, skb);
6258 break;
6259
6260 case HCI_EV_REMOTE_FEATURES:
6261 hci_remote_features_evt(hdev, skb);
6262 break;
6263
6264 case HCI_EV_CMD_COMPLETE:
6265 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6266 &req_complete, &req_complete_skb);
6267 break;
6268
6269 case HCI_EV_CMD_STATUS:
6270 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6271 &req_complete_skb);
6272 break;
6273
6274 case HCI_EV_HARDWARE_ERROR:
6275 hci_hardware_error_evt(hdev, skb);
6276 break;
6277
6278 case HCI_EV_ROLE_CHANGE:
6279 hci_role_change_evt(hdev, skb);
6280 break;
6281
6282 case HCI_EV_NUM_COMP_PKTS:
6283 hci_num_comp_pkts_evt(hdev, skb);
6284 break;
6285
6286 case HCI_EV_MODE_CHANGE:
6287 hci_mode_change_evt(hdev, skb);
6288 break;
6289
6290 case HCI_EV_PIN_CODE_REQ:
6291 hci_pin_code_request_evt(hdev, skb);
6292 break;
6293
6294 case HCI_EV_LINK_KEY_REQ:
6295 hci_link_key_request_evt(hdev, skb);
6296 break;
6297
6298 case HCI_EV_LINK_KEY_NOTIFY:
6299 hci_link_key_notify_evt(hdev, skb);
6300 break;
6301
6302 case HCI_EV_CLOCK_OFFSET:
6303 hci_clock_offset_evt(hdev, skb);
6304 break;
6305
6306 case HCI_EV_PKT_TYPE_CHANGE:
6307 hci_pkt_type_change_evt(hdev, skb);
6308 break;
6309
6310 case HCI_EV_PSCAN_REP_MODE:
6311 hci_pscan_rep_mode_evt(hdev, skb);
6312 break;
6313
6314 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6315 hci_inquiry_result_with_rssi_evt(hdev, skb);
6316 break;
6317
6318 case HCI_EV_REMOTE_EXT_FEATURES:
6319 hci_remote_ext_features_evt(hdev, skb);
6320 break;
6321
6322 case HCI_EV_SYNC_CONN_COMPLETE:
6323 hci_sync_conn_complete_evt(hdev, skb);
6324 break;
6325
6326 case HCI_EV_EXTENDED_INQUIRY_RESULT:
6327 hci_extended_inquiry_result_evt(hdev, skb);
6328 break;
6329
6330 case HCI_EV_KEY_REFRESH_COMPLETE:
6331 hci_key_refresh_complete_evt(hdev, skb);
6332 break;
6333
6334 case HCI_EV_IO_CAPA_REQUEST:
6335 hci_io_capa_request_evt(hdev, skb);
6336 break;
6337
6338 case HCI_EV_IO_CAPA_REPLY:
6339 hci_io_capa_reply_evt(hdev, skb);
6340 break;
6341
6342 case HCI_EV_USER_CONFIRM_REQUEST:
6343 hci_user_confirm_request_evt(hdev, skb);
6344 break;
6345
6346 case HCI_EV_USER_PASSKEY_REQUEST:
6347 hci_user_passkey_request_evt(hdev, skb);
6348 break;
6349
6350 case HCI_EV_USER_PASSKEY_NOTIFY:
6351 hci_user_passkey_notify_evt(hdev, skb);
6352 break;
6353
6354 case HCI_EV_KEYPRESS_NOTIFY:
6355 hci_keypress_notify_evt(hdev, skb);
6356 break;
6357
6358 case HCI_EV_SIMPLE_PAIR_COMPLETE:
6359 hci_simple_pair_complete_evt(hdev, skb);
6360 break;
6361
6362 case HCI_EV_REMOTE_HOST_FEATURES:
6363 hci_remote_host_features_evt(hdev, skb);
6364 break;
6365
6366 case HCI_EV_LE_META:
6367 hci_le_meta_evt(hdev, skb);
6368 break;
6369
6370 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6371 hci_remote_oob_data_request_evt(hdev, skb);
6372 break;
6373
6374 #if IS_ENABLED(CONFIG_BT_HS)
6375 case HCI_EV_CHANNEL_SELECTED:
6376 hci_chan_selected_evt(hdev, skb);
6377 break;
6378
6379 case HCI_EV_PHY_LINK_COMPLETE:
6380 hci_phy_link_complete_evt(hdev, skb);
6381 break;
6382
6383 case HCI_EV_LOGICAL_LINK_COMPLETE:
6384 hci_loglink_complete_evt(hdev, skb);
6385 break;
6386
6387 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6388 hci_disconn_loglink_complete_evt(hdev, skb);
6389 break;
6390
6391 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6392 hci_disconn_phylink_complete_evt(hdev, skb);
6393 break;
6394 #endif
6395
6396 case HCI_EV_NUM_COMP_BLOCKS:
6397 hci_num_comp_blocks_evt(hdev, skb);
6398 break;
6399
6400 case HCI_EV_VENDOR:
6401 msft_vendor_evt(hdev, skb);
6402 break;
6403
6404 default:
6405 BT_DBG("%s event 0x%2.2x", hdev->name, event);
6406 break;
6407 }
6408
6409 if (req_complete) {
6410 req_complete(hdev, status, opcode);
6411 } else if (req_complete_skb) {
6412 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6413 kfree_skb(orig_skb);
6414 orig_skb = NULL;
6415 }
6416 req_complete_skb(hdev, status, opcode, orig_skb);
6417 }
6418
6419 done:
6420 kfree_skb(orig_skb);
6421 kfree_skb(skb);
6422 hdev->stat.evt_rx++;
6423 }
6424