1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41
42 #define MGMT_VERSION 1
43 #define MGMT_REVISION 18
44
45 static const u16 mgmt_commands[] = {
46 MGMT_OP_READ_INDEX_LIST,
47 MGMT_OP_READ_INFO,
48 MGMT_OP_SET_POWERED,
49 MGMT_OP_SET_DISCOVERABLE,
50 MGMT_OP_SET_CONNECTABLE,
51 MGMT_OP_SET_FAST_CONNECTABLE,
52 MGMT_OP_SET_BONDABLE,
53 MGMT_OP_SET_LINK_SECURITY,
54 MGMT_OP_SET_SSP,
55 MGMT_OP_SET_HS,
56 MGMT_OP_SET_LE,
57 MGMT_OP_SET_DEV_CLASS,
58 MGMT_OP_SET_LOCAL_NAME,
59 MGMT_OP_ADD_UUID,
60 MGMT_OP_REMOVE_UUID,
61 MGMT_OP_LOAD_LINK_KEYS,
62 MGMT_OP_LOAD_LONG_TERM_KEYS,
63 MGMT_OP_DISCONNECT,
64 MGMT_OP_GET_CONNECTIONS,
65 MGMT_OP_PIN_CODE_REPLY,
66 MGMT_OP_PIN_CODE_NEG_REPLY,
67 MGMT_OP_SET_IO_CAPABILITY,
68 MGMT_OP_PAIR_DEVICE,
69 MGMT_OP_CANCEL_PAIR_DEVICE,
70 MGMT_OP_UNPAIR_DEVICE,
71 MGMT_OP_USER_CONFIRM_REPLY,
72 MGMT_OP_USER_CONFIRM_NEG_REPLY,
73 MGMT_OP_USER_PASSKEY_REPLY,
74 MGMT_OP_USER_PASSKEY_NEG_REPLY,
75 MGMT_OP_READ_LOCAL_OOB_DATA,
76 MGMT_OP_ADD_REMOTE_OOB_DATA,
77 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
78 MGMT_OP_START_DISCOVERY,
79 MGMT_OP_STOP_DISCOVERY,
80 MGMT_OP_CONFIRM_NAME,
81 MGMT_OP_BLOCK_DEVICE,
82 MGMT_OP_UNBLOCK_DEVICE,
83 MGMT_OP_SET_DEVICE_ID,
84 MGMT_OP_SET_ADVERTISING,
85 MGMT_OP_SET_BREDR,
86 MGMT_OP_SET_STATIC_ADDRESS,
87 MGMT_OP_SET_SCAN_PARAMS,
88 MGMT_OP_SET_SECURE_CONN,
89 MGMT_OP_SET_DEBUG_KEYS,
90 MGMT_OP_SET_PRIVACY,
91 MGMT_OP_LOAD_IRKS,
92 MGMT_OP_GET_CONN_INFO,
93 MGMT_OP_GET_CLOCK_INFO,
94 MGMT_OP_ADD_DEVICE,
95 MGMT_OP_REMOVE_DEVICE,
96 MGMT_OP_LOAD_CONN_PARAM,
97 MGMT_OP_READ_UNCONF_INDEX_LIST,
98 MGMT_OP_READ_CONFIG_INFO,
99 MGMT_OP_SET_EXTERNAL_CONFIG,
100 MGMT_OP_SET_PUBLIC_ADDRESS,
101 MGMT_OP_START_SERVICE_DISCOVERY,
102 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
103 MGMT_OP_READ_EXT_INDEX_LIST,
104 MGMT_OP_READ_ADV_FEATURES,
105 MGMT_OP_ADD_ADVERTISING,
106 MGMT_OP_REMOVE_ADVERTISING,
107 MGMT_OP_GET_ADV_SIZE_INFO,
108 MGMT_OP_START_LIMITED_DISCOVERY,
109 MGMT_OP_READ_EXT_INFO,
110 MGMT_OP_SET_APPEARANCE,
111 MGMT_OP_SET_BLOCKED_KEYS,
112 MGMT_OP_SET_WIDEBAND_SPEECH,
113 MGMT_OP_READ_SECURITY_INFO,
114 MGMT_OP_READ_EXP_FEATURES_INFO,
115 MGMT_OP_SET_EXP_FEATURE,
116 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
117 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
118 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
119 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
120 MGMT_OP_GET_DEVICE_FLAGS,
121 MGMT_OP_SET_DEVICE_FLAGS,
122 MGMT_OP_READ_ADV_MONITOR_FEATURES,
123 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
124 MGMT_OP_REMOVE_ADV_MONITOR,
125 };
126
127 static const u16 mgmt_events[] = {
128 MGMT_EV_CONTROLLER_ERROR,
129 MGMT_EV_INDEX_ADDED,
130 MGMT_EV_INDEX_REMOVED,
131 MGMT_EV_NEW_SETTINGS,
132 MGMT_EV_CLASS_OF_DEV_CHANGED,
133 MGMT_EV_LOCAL_NAME_CHANGED,
134 MGMT_EV_NEW_LINK_KEY,
135 MGMT_EV_NEW_LONG_TERM_KEY,
136 MGMT_EV_DEVICE_CONNECTED,
137 MGMT_EV_DEVICE_DISCONNECTED,
138 MGMT_EV_CONNECT_FAILED,
139 MGMT_EV_PIN_CODE_REQUEST,
140 MGMT_EV_USER_CONFIRM_REQUEST,
141 MGMT_EV_USER_PASSKEY_REQUEST,
142 MGMT_EV_AUTH_FAILED,
143 MGMT_EV_DEVICE_FOUND,
144 MGMT_EV_DISCOVERING,
145 MGMT_EV_DEVICE_BLOCKED,
146 MGMT_EV_DEVICE_UNBLOCKED,
147 MGMT_EV_DEVICE_UNPAIRED,
148 MGMT_EV_PASSKEY_NOTIFY,
149 MGMT_EV_NEW_IRK,
150 MGMT_EV_NEW_CSRK,
151 MGMT_EV_DEVICE_ADDED,
152 MGMT_EV_DEVICE_REMOVED,
153 MGMT_EV_NEW_CONN_PARAM,
154 MGMT_EV_UNCONF_INDEX_ADDED,
155 MGMT_EV_UNCONF_INDEX_REMOVED,
156 MGMT_EV_NEW_CONFIG_OPTIONS,
157 MGMT_EV_EXT_INDEX_ADDED,
158 MGMT_EV_EXT_INDEX_REMOVED,
159 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
160 MGMT_EV_ADVERTISING_ADDED,
161 MGMT_EV_ADVERTISING_REMOVED,
162 MGMT_EV_EXT_INFO_CHANGED,
163 MGMT_EV_PHY_CONFIGURATION_CHANGED,
164 MGMT_EV_EXP_FEATURE_CHANGED,
165 MGMT_EV_DEVICE_FLAGS_CHANGED,
166 MGMT_EV_CONTROLLER_SUSPEND,
167 MGMT_EV_CONTROLLER_RESUME,
168 };
169
170 static const u16 mgmt_untrusted_commands[] = {
171 MGMT_OP_READ_INDEX_LIST,
172 MGMT_OP_READ_INFO,
173 MGMT_OP_READ_UNCONF_INDEX_LIST,
174 MGMT_OP_READ_CONFIG_INFO,
175 MGMT_OP_READ_EXT_INDEX_LIST,
176 MGMT_OP_READ_EXT_INFO,
177 MGMT_OP_READ_SECURITY_INFO,
178 MGMT_OP_READ_EXP_FEATURES_INFO,
179 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
180 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
181 };
182
183 static const u16 mgmt_untrusted_events[] = {
184 MGMT_EV_INDEX_ADDED,
185 MGMT_EV_INDEX_REMOVED,
186 MGMT_EV_NEW_SETTINGS,
187 MGMT_EV_CLASS_OF_DEV_CHANGED,
188 MGMT_EV_LOCAL_NAME_CHANGED,
189 MGMT_EV_UNCONF_INDEX_ADDED,
190 MGMT_EV_UNCONF_INDEX_REMOVED,
191 MGMT_EV_NEW_CONFIG_OPTIONS,
192 MGMT_EV_EXT_INDEX_ADDED,
193 MGMT_EV_EXT_INDEX_REMOVED,
194 MGMT_EV_EXT_INFO_CHANGED,
195 MGMT_EV_EXP_FEATURE_CHANGED,
196 MGMT_EV_ADV_MONITOR_ADDED,
197 MGMT_EV_ADV_MONITOR_REMOVED,
198 };
199
200 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
201
202 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
203 "\x00\x00\x00\x00\x00\x00\x00\x00"
204
205 /* HCI to MGMT error code conversion table */
206 static const u8 mgmt_status_table[] = {
207 MGMT_STATUS_SUCCESS,
208 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
209 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
210 MGMT_STATUS_FAILED, /* Hardware Failure */
211 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
212 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
213 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
214 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
215 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
216 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
217 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
218 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
219 MGMT_STATUS_BUSY, /* Command Disallowed */
220 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
221 MGMT_STATUS_REJECTED, /* Rejected Security */
222 MGMT_STATUS_REJECTED, /* Rejected Personal */
223 MGMT_STATUS_TIMEOUT, /* Host Timeout */
224 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
225 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
226 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
227 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
228 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
229 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
230 MGMT_STATUS_BUSY, /* Repeated Attempts */
231 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
232 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
233 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
234 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
235 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
236 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
237 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
238 MGMT_STATUS_FAILED, /* Unspecified Error */
239 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
240 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
241 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
242 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
243 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
244 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
245 MGMT_STATUS_FAILED, /* Unit Link Key Used */
246 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
247 MGMT_STATUS_TIMEOUT, /* Instant Passed */
248 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
249 MGMT_STATUS_FAILED, /* Transaction Collision */
250 MGMT_STATUS_FAILED, /* Reserved for future use */
251 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
252 MGMT_STATUS_REJECTED, /* QoS Rejected */
253 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
254 MGMT_STATUS_REJECTED, /* Insufficient Security */
255 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
256 MGMT_STATUS_FAILED, /* Reserved for future use */
257 MGMT_STATUS_BUSY, /* Role Switch Pending */
258 MGMT_STATUS_FAILED, /* Reserved for future use */
259 MGMT_STATUS_FAILED, /* Slot Violation */
260 MGMT_STATUS_FAILED, /* Role Switch Failed */
261 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
262 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
263 MGMT_STATUS_BUSY, /* Host Busy Pairing */
264 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
265 MGMT_STATUS_BUSY, /* Controller Busy */
266 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
267 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
268 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
269 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
270 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
271 };
272
mgmt_status(u8 hci_status)273 static u8 mgmt_status(u8 hci_status)
274 {
275 if (hci_status < ARRAY_SIZE(mgmt_status_table))
276 return mgmt_status_table[hci_status];
277
278 return MGMT_STATUS_FAILED;
279 }
280
mgmt_index_event(u16 event, struct hci_dev *hdev, void *data, u16 len, int flag)281 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
282 u16 len, int flag)
283 {
284 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
285 flag, NULL);
286 }
287
mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data, u16 len, int flag, struct sock *skip_sk)288 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
289 u16 len, int flag, struct sock *skip_sk)
290 {
291 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
292 flag, skip_sk);
293 }
294
mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len, struct sock *skip_sk)295 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
296 struct sock *skip_sk)
297 {
298 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
299 HCI_SOCK_TRUSTED, skip_sk);
300 }
301
le_addr_type(u8 mgmt_addr_type)302 static u8 le_addr_type(u8 mgmt_addr_type)
303 {
304 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
305 return ADDR_LE_DEV_PUBLIC;
306 else
307 return ADDR_LE_DEV_RANDOM;
308 }
309
mgmt_fill_version_info(void *ver)310 void mgmt_fill_version_info(void *ver)
311 {
312 struct mgmt_rp_read_version *rp = ver;
313
314 rp->version = MGMT_VERSION;
315 rp->revision = cpu_to_le16(MGMT_REVISION);
316 }
317
read_version(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len)318 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
319 u16 data_len)
320 {
321 struct mgmt_rp_read_version rp;
322
323 bt_dev_dbg(hdev, "sock %p", sk);
324
325 mgmt_fill_version_info(&rp);
326
327 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
328 &rp, sizeof(rp));
329 }
330
read_commands(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len)331 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
332 u16 data_len)
333 {
334 struct mgmt_rp_read_commands *rp;
335 u16 num_commands, num_events;
336 size_t rp_size;
337 int i, err;
338
339 bt_dev_dbg(hdev, "sock %p", sk);
340
341 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
342 num_commands = ARRAY_SIZE(mgmt_commands);
343 num_events = ARRAY_SIZE(mgmt_events);
344 } else {
345 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
346 num_events = ARRAY_SIZE(mgmt_untrusted_events);
347 }
348
349 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
350
351 rp = kmalloc(rp_size, GFP_KERNEL);
352 if (!rp)
353 return -ENOMEM;
354
355 rp->num_commands = cpu_to_le16(num_commands);
356 rp->num_events = cpu_to_le16(num_events);
357
358 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
359 __le16 *opcode = rp->opcodes;
360
361 for (i = 0; i < num_commands; i++, opcode++)
362 put_unaligned_le16(mgmt_commands[i], opcode);
363
364 for (i = 0; i < num_events; i++, opcode++)
365 put_unaligned_le16(mgmt_events[i], opcode);
366 } else {
367 __le16 *opcode = rp->opcodes;
368
369 for (i = 0; i < num_commands; i++, opcode++)
370 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
371
372 for (i = 0; i < num_events; i++, opcode++)
373 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
374 }
375
376 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
377 rp, rp_size);
378 kfree(rp);
379
380 return err;
381 }
382
read_index_list(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len)383 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
384 u16 data_len)
385 {
386 struct mgmt_rp_read_index_list *rp;
387 struct hci_dev *d;
388 size_t rp_len;
389 u16 count;
390 int err;
391
392 bt_dev_dbg(hdev, "sock %p", sk);
393
394 read_lock(&hci_dev_list_lock);
395
396 count = 0;
397 list_for_each_entry(d, &hci_dev_list, list) {
398 if (d->dev_type == HCI_PRIMARY &&
399 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
400 count++;
401 }
402
403 rp_len = sizeof(*rp) + (2 * count);
404 rp = kmalloc(rp_len, GFP_ATOMIC);
405 if (!rp) {
406 read_unlock(&hci_dev_list_lock);
407 return -ENOMEM;
408 }
409
410 count = 0;
411 list_for_each_entry(d, &hci_dev_list, list) {
412 if (hci_dev_test_flag(d, HCI_SETUP) ||
413 hci_dev_test_flag(d, HCI_CONFIG) ||
414 hci_dev_test_flag(d, HCI_USER_CHANNEL))
415 continue;
416
417 /* Devices marked as raw-only are neither configured
418 * nor unconfigured controllers.
419 */
420 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
421 continue;
422
423 if (d->dev_type == HCI_PRIMARY &&
424 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
425 rp->index[count++] = cpu_to_le16(d->id);
426 bt_dev_dbg(hdev, "Added hci%u", d->id);
427 }
428 }
429
430 rp->num_controllers = cpu_to_le16(count);
431 rp_len = sizeof(*rp) + (2 * count);
432
433 read_unlock(&hci_dev_list_lock);
434
435 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
436 0, rp, rp_len);
437
438 kfree(rp);
439
440 return err;
441 }
442
read_unconf_index_list(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len)443 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
444 void *data, u16 data_len)
445 {
446 struct mgmt_rp_read_unconf_index_list *rp;
447 struct hci_dev *d;
448 size_t rp_len;
449 u16 count;
450 int err;
451
452 bt_dev_dbg(hdev, "sock %p", sk);
453
454 read_lock(&hci_dev_list_lock);
455
456 count = 0;
457 list_for_each_entry(d, &hci_dev_list, list) {
458 if (d->dev_type == HCI_PRIMARY &&
459 hci_dev_test_flag(d, HCI_UNCONFIGURED))
460 count++;
461 }
462
463 rp_len = sizeof(*rp) + (2 * count);
464 rp = kmalloc(rp_len, GFP_ATOMIC);
465 if (!rp) {
466 read_unlock(&hci_dev_list_lock);
467 return -ENOMEM;
468 }
469
470 count = 0;
471 list_for_each_entry(d, &hci_dev_list, list) {
472 if (hci_dev_test_flag(d, HCI_SETUP) ||
473 hci_dev_test_flag(d, HCI_CONFIG) ||
474 hci_dev_test_flag(d, HCI_USER_CHANNEL))
475 continue;
476
477 /* Devices marked as raw-only are neither configured
478 * nor unconfigured controllers.
479 */
480 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
481 continue;
482
483 if (d->dev_type == HCI_PRIMARY &&
484 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
485 rp->index[count++] = cpu_to_le16(d->id);
486 bt_dev_dbg(hdev, "Added hci%u", d->id);
487 }
488 }
489
490 rp->num_controllers = cpu_to_le16(count);
491 rp_len = sizeof(*rp) + (2 * count);
492
493 read_unlock(&hci_dev_list_lock);
494
495 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
496 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
497
498 kfree(rp);
499
500 return err;
501 }
502
read_ext_index_list(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len)503 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
504 void *data, u16 data_len)
505 {
506 struct mgmt_rp_read_ext_index_list *rp;
507 struct hci_dev *d;
508 u16 count;
509 int err;
510
511 bt_dev_dbg(hdev, "sock %p", sk);
512
513 read_lock(&hci_dev_list_lock);
514
515 count = 0;
516 list_for_each_entry(d, &hci_dev_list, list) {
517 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
518 count++;
519 }
520
521 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
522 if (!rp) {
523 read_unlock(&hci_dev_list_lock);
524 return -ENOMEM;
525 }
526
527 count = 0;
528 list_for_each_entry(d, &hci_dev_list, list) {
529 if (hci_dev_test_flag(d, HCI_SETUP) ||
530 hci_dev_test_flag(d, HCI_CONFIG) ||
531 hci_dev_test_flag(d, HCI_USER_CHANNEL))
532 continue;
533
534 /* Devices marked as raw-only are neither configured
535 * nor unconfigured controllers.
536 */
537 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
538 continue;
539
540 if (d->dev_type == HCI_PRIMARY) {
541 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
542 rp->entry[count].type = 0x01;
543 else
544 rp->entry[count].type = 0x00;
545 } else if (d->dev_type == HCI_AMP) {
546 rp->entry[count].type = 0x02;
547 } else {
548 continue;
549 }
550
551 rp->entry[count].bus = d->bus;
552 rp->entry[count++].index = cpu_to_le16(d->id);
553 bt_dev_dbg(hdev, "Added hci%u", d->id);
554 }
555
556 rp->num_controllers = cpu_to_le16(count);
557
558 read_unlock(&hci_dev_list_lock);
559
560 /* If this command is called at least once, then all the
561 * default index and unconfigured index events are disabled
562 * and from now on only extended index events are used.
563 */
564 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
565 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
566 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
567
568 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
569 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
570 struct_size(rp, entry, count));
571
572 kfree(rp);
573
574 return err;
575 }
576
is_configured(struct hci_dev *hdev)577 static bool is_configured(struct hci_dev *hdev)
578 {
579 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
580 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
581 return false;
582
583 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
584 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
585 !bacmp(&hdev->public_addr, BDADDR_ANY))
586 return false;
587
588 return true;
589 }
590
get_missing_options(struct hci_dev *hdev)591 static __le32 get_missing_options(struct hci_dev *hdev)
592 {
593 u32 options = 0;
594
595 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
596 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
597 options |= MGMT_OPTION_EXTERNAL_CONFIG;
598
599 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
600 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
601 !bacmp(&hdev->public_addr, BDADDR_ANY))
602 options |= MGMT_OPTION_PUBLIC_ADDRESS;
603
604 return cpu_to_le32(options);
605 }
606
new_options(struct hci_dev *hdev, struct sock *skip)607 static int new_options(struct hci_dev *hdev, struct sock *skip)
608 {
609 __le32 options = get_missing_options(hdev);
610
611 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
612 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
613 }
614
send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)615 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
616 {
617 __le32 options = get_missing_options(hdev);
618
619 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
620 sizeof(options));
621 }
622
read_config_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len)623 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
624 void *data, u16 data_len)
625 {
626 struct mgmt_rp_read_config_info rp;
627 u32 options = 0;
628
629 bt_dev_dbg(hdev, "sock %p", sk);
630
631 hci_dev_lock(hdev);
632
633 memset(&rp, 0, sizeof(rp));
634 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
635
636 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
637 options |= MGMT_OPTION_EXTERNAL_CONFIG;
638
639 if (hdev->set_bdaddr)
640 options |= MGMT_OPTION_PUBLIC_ADDRESS;
641
642 rp.supported_options = cpu_to_le32(options);
643 rp.missing_options = get_missing_options(hdev);
644
645 hci_dev_unlock(hdev);
646
647 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
648 &rp, sizeof(rp));
649 }
650
get_supported_phys(struct hci_dev *hdev)651 static u32 get_supported_phys(struct hci_dev *hdev)
652 {
653 u32 supported_phys = 0;
654
655 if (lmp_bredr_capable(hdev)) {
656 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
657
658 if (hdev->features[0][0] & LMP_3SLOT)
659 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
660
661 if (hdev->features[0][0] & LMP_5SLOT)
662 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
663
664 if (lmp_edr_2m_capable(hdev)) {
665 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
666
667 if (lmp_edr_3slot_capable(hdev))
668 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
669
670 if (lmp_edr_5slot_capable(hdev))
671 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
672
673 if (lmp_edr_3m_capable(hdev)) {
674 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
675
676 if (lmp_edr_3slot_capable(hdev))
677 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
678
679 if (lmp_edr_5slot_capable(hdev))
680 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
681 }
682 }
683 }
684
685 if (lmp_le_capable(hdev)) {
686 supported_phys |= MGMT_PHY_LE_1M_TX;
687 supported_phys |= MGMT_PHY_LE_1M_RX;
688
689 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
690 supported_phys |= MGMT_PHY_LE_2M_TX;
691 supported_phys |= MGMT_PHY_LE_2M_RX;
692 }
693
694 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
695 supported_phys |= MGMT_PHY_LE_CODED_TX;
696 supported_phys |= MGMT_PHY_LE_CODED_RX;
697 }
698 }
699
700 return supported_phys;
701 }
702
get_selected_phys(struct hci_dev *hdev)703 static u32 get_selected_phys(struct hci_dev *hdev)
704 {
705 u32 selected_phys = 0;
706
707 if (lmp_bredr_capable(hdev)) {
708 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
709
710 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
711 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
712
713 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
714 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
715
716 if (lmp_edr_2m_capable(hdev)) {
717 if (!(hdev->pkt_type & HCI_2DH1))
718 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
719
720 if (lmp_edr_3slot_capable(hdev) &&
721 !(hdev->pkt_type & HCI_2DH3))
722 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
723
724 if (lmp_edr_5slot_capable(hdev) &&
725 !(hdev->pkt_type & HCI_2DH5))
726 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
727
728 if (lmp_edr_3m_capable(hdev)) {
729 if (!(hdev->pkt_type & HCI_3DH1))
730 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
731
732 if (lmp_edr_3slot_capable(hdev) &&
733 !(hdev->pkt_type & HCI_3DH3))
734 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
735
736 if (lmp_edr_5slot_capable(hdev) &&
737 !(hdev->pkt_type & HCI_3DH5))
738 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
739 }
740 }
741 }
742
743 if (lmp_le_capable(hdev)) {
744 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
745 selected_phys |= MGMT_PHY_LE_1M_TX;
746
747 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
748 selected_phys |= MGMT_PHY_LE_1M_RX;
749
750 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
751 selected_phys |= MGMT_PHY_LE_2M_TX;
752
753 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
754 selected_phys |= MGMT_PHY_LE_2M_RX;
755
756 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
757 selected_phys |= MGMT_PHY_LE_CODED_TX;
758
759 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
760 selected_phys |= MGMT_PHY_LE_CODED_RX;
761 }
762
763 return selected_phys;
764 }
765
get_configurable_phys(struct hci_dev *hdev)766 static u32 get_configurable_phys(struct hci_dev *hdev)
767 {
768 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
769 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
770 }
771
get_supported_settings(struct hci_dev *hdev)772 static u32 get_supported_settings(struct hci_dev *hdev)
773 {
774 u32 settings = 0;
775
776 settings |= MGMT_SETTING_POWERED;
777 settings |= MGMT_SETTING_BONDABLE;
778 settings |= MGMT_SETTING_DEBUG_KEYS;
779 settings |= MGMT_SETTING_CONNECTABLE;
780 settings |= MGMT_SETTING_DISCOVERABLE;
781
782 if (lmp_bredr_capable(hdev)) {
783 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
784 settings |= MGMT_SETTING_FAST_CONNECTABLE;
785 settings |= MGMT_SETTING_BREDR;
786 settings |= MGMT_SETTING_LINK_SECURITY;
787
788 if (lmp_ssp_capable(hdev)) {
789 settings |= MGMT_SETTING_SSP;
790 if (IS_ENABLED(CONFIG_BT_HS))
791 settings |= MGMT_SETTING_HS;
792 }
793
794 if (lmp_sc_capable(hdev))
795 settings |= MGMT_SETTING_SECURE_CONN;
796
797 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
798 &hdev->quirks))
799 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
800 }
801
802 if (lmp_le_capable(hdev)) {
803 settings |= MGMT_SETTING_LE;
804 settings |= MGMT_SETTING_SECURE_CONN;
805 settings |= MGMT_SETTING_PRIVACY;
806 settings |= MGMT_SETTING_STATIC_ADDRESS;
807
808 /* When the experimental feature for LL Privacy support is
809 * enabled, then advertising is no longer supported.
810 */
811 if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
812 settings |= MGMT_SETTING_ADVERTISING;
813 }
814
815 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
816 hdev->set_bdaddr)
817 settings |= MGMT_SETTING_CONFIGURATION;
818
819 settings |= MGMT_SETTING_PHY_CONFIGURATION;
820
821 return settings;
822 }
823
get_current_settings(struct hci_dev *hdev)824 static u32 get_current_settings(struct hci_dev *hdev)
825 {
826 u32 settings = 0;
827
828 if (hdev_is_powered(hdev))
829 settings |= MGMT_SETTING_POWERED;
830
831 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
832 settings |= MGMT_SETTING_CONNECTABLE;
833
834 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
835 settings |= MGMT_SETTING_FAST_CONNECTABLE;
836
837 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
838 settings |= MGMT_SETTING_DISCOVERABLE;
839
840 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
841 settings |= MGMT_SETTING_BONDABLE;
842
843 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
844 settings |= MGMT_SETTING_BREDR;
845
846 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
847 settings |= MGMT_SETTING_LE;
848
849 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
850 settings |= MGMT_SETTING_LINK_SECURITY;
851
852 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
853 settings |= MGMT_SETTING_SSP;
854
855 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
856 settings |= MGMT_SETTING_HS;
857
858 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
859 settings |= MGMT_SETTING_ADVERTISING;
860
861 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
862 settings |= MGMT_SETTING_SECURE_CONN;
863
864 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
865 settings |= MGMT_SETTING_DEBUG_KEYS;
866
867 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
868 settings |= MGMT_SETTING_PRIVACY;
869
870 /* The current setting for static address has two purposes. The
871 * first is to indicate if the static address will be used and
872 * the second is to indicate if it is actually set.
873 *
874 * This means if the static address is not configured, this flag
875 * will never be set. If the address is configured, then if the
876 * address is actually used decides if the flag is set or not.
877 *
878 * For single mode LE only controllers and dual-mode controllers
879 * with BR/EDR disabled, the existence of the static address will
880 * be evaluated.
881 */
882 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
883 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
884 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
885 if (bacmp(&hdev->static_addr, BDADDR_ANY))
886 settings |= MGMT_SETTING_STATIC_ADDRESS;
887 }
888
889 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
890 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
891
892 return settings;
893 }
894
pending_find(u16 opcode, struct hci_dev *hdev)895 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
896 {
897 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
898 }
899
pending_find_data(u16 opcode, struct hci_dev *hdev, const void *data)900 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
901 struct hci_dev *hdev,
902 const void *data)
903 {
904 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
905 }
906
mgmt_get_adv_discov_flags(struct hci_dev *hdev)907 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
908 {
909 struct mgmt_pending_cmd *cmd;
910
911 /* If there's a pending mgmt command the flags will not yet have
912 * their final values, so check for this first.
913 */
914 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
915 if (cmd) {
916 struct mgmt_mode *cp = cmd->param;
917 if (cp->val == 0x01)
918 return LE_AD_GENERAL;
919 else if (cp->val == 0x02)
920 return LE_AD_LIMITED;
921 } else {
922 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
923 return LE_AD_LIMITED;
924 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
925 return LE_AD_GENERAL;
926 }
927
928 return 0;
929 }
930
mgmt_get_connectable(struct hci_dev *hdev)931 bool mgmt_get_connectable(struct hci_dev *hdev)
932 {
933 struct mgmt_pending_cmd *cmd;
934
935 /* If there's a pending mgmt command the flag will not yet have
936 * it's final value, so check for this first.
937 */
938 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
939 if (cmd) {
940 struct mgmt_mode *cp = cmd->param;
941
942 return cp->val;
943 }
944
945 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
946 }
947
service_cache_off(struct work_struct *work)948 static void service_cache_off(struct work_struct *work)
949 {
950 struct hci_dev *hdev = container_of(work, struct hci_dev,
951 service_cache.work);
952 struct hci_request req;
953
954 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
955 return;
956
957 hci_req_init(&req, hdev);
958
959 hci_dev_lock(hdev);
960
961 __hci_req_update_eir(&req);
962 __hci_req_update_class(&req);
963
964 hci_dev_unlock(hdev);
965
966 hci_req_run(&req, NULL);
967 }
968
rpa_expired(struct work_struct *work)969 static void rpa_expired(struct work_struct *work)
970 {
971 struct hci_dev *hdev = container_of(work, struct hci_dev,
972 rpa_expired.work);
973 struct hci_request req;
974
975 bt_dev_dbg(hdev, "");
976
977 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
978
979 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
980 return;
981
982 /* The generation of a new RPA and programming it into the
983 * controller happens in the hci_req_enable_advertising()
984 * function.
985 */
986 hci_req_init(&req, hdev);
987 if (ext_adv_capable(hdev))
988 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
989 else
990 __hci_req_enable_advertising(&req);
991 hci_req_run(&req, NULL);
992 }
993
mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)994 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
995 {
996 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
997 return;
998
999 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1000 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1001
1002 /* Non-mgmt controlled devices get this bit set
1003 * implicitly so that pairing works for them, however
1004 * for mgmt we require user-space to explicitly enable
1005 * it
1006 */
1007 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1008 }
1009
read_controller_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len)1010 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1011 void *data, u16 data_len)
1012 {
1013 struct mgmt_rp_read_info rp;
1014
1015 bt_dev_dbg(hdev, "sock %p", sk);
1016
1017 hci_dev_lock(hdev);
1018
1019 memset(&rp, 0, sizeof(rp));
1020
1021 bacpy(&rp.bdaddr, &hdev->bdaddr);
1022
1023 rp.version = hdev->hci_ver;
1024 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1025
1026 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1027 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1028
1029 memcpy(rp.dev_class, hdev->dev_class, 3);
1030
1031 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1032 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1033
1034 hci_dev_unlock(hdev);
1035
1036 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1037 sizeof(rp));
1038 }
1039
append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)1040 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1041 {
1042 u16 eir_len = 0;
1043 size_t name_len;
1044
1045 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1046 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1047 hdev->dev_class, 3);
1048
1049 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1050 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1051 hdev->appearance);
1052
1053 name_len = strlen(hdev->dev_name);
1054 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1055 hdev->dev_name, name_len);
1056
1057 name_len = strlen(hdev->short_name);
1058 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1059 hdev->short_name, name_len);
1060
1061 return eir_len;
1062 }
1063
read_ext_controller_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len)1064 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1065 void *data, u16 data_len)
1066 {
1067 char buf[512];
1068 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1069 u16 eir_len;
1070
1071 bt_dev_dbg(hdev, "sock %p", sk);
1072
1073 memset(&buf, 0, sizeof(buf));
1074
1075 hci_dev_lock(hdev);
1076
1077 bacpy(&rp->bdaddr, &hdev->bdaddr);
1078
1079 rp->version = hdev->hci_ver;
1080 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1081
1082 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1083 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1084
1085
1086 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1087 rp->eir_len = cpu_to_le16(eir_len);
1088
1089 hci_dev_unlock(hdev);
1090
1091 /* If this command is called at least once, then the events
1092 * for class of device and local name changes are disabled
1093 * and only the new extended controller information event
1094 * is used.
1095 */
1096 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1097 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1098 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1099
1100 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1101 sizeof(*rp) + eir_len);
1102 }
1103
ext_info_changed(struct hci_dev *hdev, struct sock *skip)1104 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1105 {
1106 char buf[512];
1107 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1108 u16 eir_len;
1109
1110 memset(buf, 0, sizeof(buf));
1111
1112 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1113 ev->eir_len = cpu_to_le16(eir_len);
1114
1115 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1116 sizeof(*ev) + eir_len,
1117 HCI_MGMT_EXT_INFO_EVENTS, skip);
1118 }
1119
send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)1120 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1121 {
1122 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1123
1124 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1125 sizeof(settings));
1126 }
1127
clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)1128 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1129 {
1130 bt_dev_dbg(hdev, "status 0x%02x", status);
1131
1132 if (hci_conn_count(hdev) == 0) {
1133 cancel_delayed_work(&hdev->power_off);
1134 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1135 }
1136 }
1137
mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)1138 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1139 {
1140 struct mgmt_ev_advertising_added ev;
1141
1142 ev.instance = instance;
1143
1144 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1145 }
1146
mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev, u8 instance)1147 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1148 u8 instance)
1149 {
1150 struct mgmt_ev_advertising_removed ev;
1151
1152 ev.instance = instance;
1153
1154 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1155 }
1156
cancel_adv_timeout(struct hci_dev *hdev)1157 static void cancel_adv_timeout(struct hci_dev *hdev)
1158 {
1159 if (hdev->adv_instance_timeout) {
1160 hdev->adv_instance_timeout = 0;
1161 cancel_delayed_work(&hdev->adv_instance_expire);
1162 }
1163 }
1164
clean_up_hci_state(struct hci_dev *hdev)1165 static int clean_up_hci_state(struct hci_dev *hdev)
1166 {
1167 struct hci_request req;
1168 struct hci_conn *conn;
1169 bool discov_stopped;
1170 int err;
1171
1172 hci_req_init(&req, hdev);
1173
1174 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1175 test_bit(HCI_PSCAN, &hdev->flags)) {
1176 u8 scan = 0x00;
1177 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1178 }
1179
1180 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1181
1182 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1183 __hci_req_disable_advertising(&req);
1184
1185 discov_stopped = hci_req_stop_discovery(&req);
1186
1187 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1188 /* 0x15 == Terminated due to Power Off */
1189 __hci_abort_conn(&req, conn, 0x15);
1190 }
1191
1192 err = hci_req_run(&req, clean_up_hci_complete);
1193 if (!err && discov_stopped)
1194 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1195
1196 return err;
1197 }
1198
set_powered(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)1199 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1200 u16 len)
1201 {
1202 struct mgmt_mode *cp = data;
1203 struct mgmt_pending_cmd *cmd;
1204 int err;
1205
1206 bt_dev_dbg(hdev, "sock %p", sk);
1207
1208 if (cp->val != 0x00 && cp->val != 0x01)
1209 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1210 MGMT_STATUS_INVALID_PARAMS);
1211
1212 hci_dev_lock(hdev);
1213
1214 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1215 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1216 MGMT_STATUS_BUSY);
1217 goto failed;
1218 }
1219
1220 if (!!cp->val == hdev_is_powered(hdev)) {
1221 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1222 goto failed;
1223 }
1224
1225 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1226 if (!cmd) {
1227 err = -ENOMEM;
1228 goto failed;
1229 }
1230
1231 if (cp->val) {
1232 queue_work(hdev->req_workqueue, &hdev->power_on);
1233 err = 0;
1234 } else {
1235 /* Disconnect connections, stop scans, etc */
1236 err = clean_up_hci_state(hdev);
1237 if (!err)
1238 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1239 HCI_POWER_OFF_TIMEOUT);
1240
1241 /* ENODATA means there were no HCI commands queued */
1242 if (err == -ENODATA) {
1243 cancel_delayed_work(&hdev->power_off);
1244 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1245 err = 0;
1246 }
1247 }
1248
1249 failed:
1250 hci_dev_unlock(hdev);
1251 return err;
1252 }
1253
new_settings(struct hci_dev *hdev, struct sock *skip)1254 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1255 {
1256 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1257
1258 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1259 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1260 }
1261
mgmt_new_settings(struct hci_dev *hdev)1262 int mgmt_new_settings(struct hci_dev *hdev)
1263 {
1264 return new_settings(hdev, NULL);
1265 }
1266
1267 struct cmd_lookup {
1268 struct sock *sk;
1269 struct hci_dev *hdev;
1270 u8 mgmt_status;
1271 };
1272
settings_rsp(struct mgmt_pending_cmd *cmd, void *data)1273 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1274 {
1275 struct cmd_lookup *match = data;
1276
1277 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1278
1279 list_del(&cmd->list);
1280
1281 if (match->sk == NULL) {
1282 match->sk = cmd->sk;
1283 sock_hold(match->sk);
1284 }
1285
1286 mgmt_pending_free(cmd);
1287 }
1288
cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)1289 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1290 {
1291 u8 *status = data;
1292
1293 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1294 mgmt_pending_remove(cmd);
1295 }
1296
cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)1297 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1298 {
1299 if (cmd->cmd_complete) {
1300 u8 *status = data;
1301
1302 cmd->cmd_complete(cmd, *status);
1303 mgmt_pending_remove(cmd);
1304
1305 return;
1306 }
1307
1308 cmd_status_rsp(cmd, data);
1309 }
1310
generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)1311 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1312 {
1313 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1314 cmd->param, cmd->param_len);
1315 }
1316
addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)1317 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1318 {
1319 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1320 cmd->param, sizeof(struct mgmt_addr_info));
1321 }
1322
mgmt_bredr_support(struct hci_dev *hdev)1323 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1324 {
1325 if (!lmp_bredr_capable(hdev))
1326 return MGMT_STATUS_NOT_SUPPORTED;
1327 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1328 return MGMT_STATUS_REJECTED;
1329 else
1330 return MGMT_STATUS_SUCCESS;
1331 }
1332
mgmt_le_support(struct hci_dev *hdev)1333 static u8 mgmt_le_support(struct hci_dev *hdev)
1334 {
1335 if (!lmp_le_capable(hdev))
1336 return MGMT_STATUS_NOT_SUPPORTED;
1337 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1338 return MGMT_STATUS_REJECTED;
1339 else
1340 return MGMT_STATUS_SUCCESS;
1341 }
1342
mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)1343 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1344 {
1345 struct mgmt_pending_cmd *cmd;
1346
1347 bt_dev_dbg(hdev, "status 0x%02x", status);
1348
1349 hci_dev_lock(hdev);
1350
1351 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1352 if (!cmd)
1353 goto unlock;
1354
1355 if (status) {
1356 u8 mgmt_err = mgmt_status(status);
1357 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1358 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1359 goto remove_cmd;
1360 }
1361
1362 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1363 hdev->discov_timeout > 0) {
1364 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1365 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1366 }
1367
1368 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1369 new_settings(hdev, cmd->sk);
1370
1371 remove_cmd:
1372 mgmt_pending_remove(cmd);
1373
1374 unlock:
1375 hci_dev_unlock(hdev);
1376 }
1377
set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)1378 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1379 u16 len)
1380 {
1381 struct mgmt_cp_set_discoverable *cp = data;
1382 struct mgmt_pending_cmd *cmd;
1383 u16 timeout;
1384 int err;
1385
1386 bt_dev_dbg(hdev, "sock %p", sk);
1387
1388 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1389 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1390 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1391 MGMT_STATUS_REJECTED);
1392
1393 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1394 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1395 MGMT_STATUS_INVALID_PARAMS);
1396
1397 timeout = __le16_to_cpu(cp->timeout);
1398
1399 /* Disabling discoverable requires that no timeout is set,
1400 * and enabling limited discoverable requires a timeout.
1401 */
1402 if ((cp->val == 0x00 && timeout > 0) ||
1403 (cp->val == 0x02 && timeout == 0))
1404 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1405 MGMT_STATUS_INVALID_PARAMS);
1406
1407 hci_dev_lock(hdev);
1408
1409 if (!hdev_is_powered(hdev) && timeout > 0) {
1410 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1411 MGMT_STATUS_NOT_POWERED);
1412 goto failed;
1413 }
1414
1415 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1416 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1417 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1418 MGMT_STATUS_BUSY);
1419 goto failed;
1420 }
1421
1422 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1423 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1424 MGMT_STATUS_REJECTED);
1425 goto failed;
1426 }
1427
1428 if (hdev->advertising_paused) {
1429 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1430 MGMT_STATUS_BUSY);
1431 goto failed;
1432 }
1433
1434 if (!hdev_is_powered(hdev)) {
1435 bool changed = false;
1436
1437 /* Setting limited discoverable when powered off is
1438 * not a valid operation since it requires a timeout
1439 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1440 */
1441 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1442 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1443 changed = true;
1444 }
1445
1446 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1447 if (err < 0)
1448 goto failed;
1449
1450 if (changed)
1451 err = new_settings(hdev, sk);
1452
1453 goto failed;
1454 }
1455
1456 /* If the current mode is the same, then just update the timeout
1457 * value with the new value. And if only the timeout gets updated,
1458 * then no need for any HCI transactions.
1459 */
1460 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1461 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1462 HCI_LIMITED_DISCOVERABLE)) {
1463 cancel_delayed_work(&hdev->discov_off);
1464 hdev->discov_timeout = timeout;
1465
1466 if (cp->val && hdev->discov_timeout > 0) {
1467 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1468 queue_delayed_work(hdev->req_workqueue,
1469 &hdev->discov_off, to);
1470 }
1471
1472 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1473 goto failed;
1474 }
1475
1476 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1477 if (!cmd) {
1478 err = -ENOMEM;
1479 goto failed;
1480 }
1481
1482 /* Cancel any potential discoverable timeout that might be
1483 * still active and store new timeout value. The arming of
1484 * the timeout happens in the complete handler.
1485 */
1486 cancel_delayed_work(&hdev->discov_off);
1487 hdev->discov_timeout = timeout;
1488
1489 if (cp->val)
1490 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1491 else
1492 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1493
1494 /* Limited discoverable mode */
1495 if (cp->val == 0x02)
1496 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1497 else
1498 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1499
1500 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1501 err = 0;
1502
1503 failed:
1504 hci_dev_unlock(hdev);
1505 return err;
1506 }
1507
mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)1508 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1509 {
1510 struct mgmt_pending_cmd *cmd;
1511
1512 bt_dev_dbg(hdev, "status 0x%02x", status);
1513
1514 hci_dev_lock(hdev);
1515
1516 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1517 if (!cmd)
1518 goto unlock;
1519
1520 if (status) {
1521 u8 mgmt_err = mgmt_status(status);
1522 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1523 goto remove_cmd;
1524 }
1525
1526 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1527 new_settings(hdev, cmd->sk);
1528
1529 remove_cmd:
1530 mgmt_pending_remove(cmd);
1531
1532 unlock:
1533 hci_dev_unlock(hdev);
1534 }
1535
set_connectable_update_settings(struct hci_dev *hdev, struct sock *sk, u8 val)1536 static int set_connectable_update_settings(struct hci_dev *hdev,
1537 struct sock *sk, u8 val)
1538 {
1539 bool changed = false;
1540 int err;
1541
1542 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1543 changed = true;
1544
1545 if (val) {
1546 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1547 } else {
1548 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1549 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1550 }
1551
1552 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1553 if (err < 0)
1554 return err;
1555
1556 if (changed) {
1557 hci_req_update_scan(hdev);
1558 hci_update_background_scan(hdev);
1559 return new_settings(hdev, sk);
1560 }
1561
1562 return 0;
1563 }
1564
set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)1565 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1566 u16 len)
1567 {
1568 struct mgmt_mode *cp = data;
1569 struct mgmt_pending_cmd *cmd;
1570 int err;
1571
1572 bt_dev_dbg(hdev, "sock %p", sk);
1573
1574 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1575 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1576 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1577 MGMT_STATUS_REJECTED);
1578
1579 if (cp->val != 0x00 && cp->val != 0x01)
1580 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1581 MGMT_STATUS_INVALID_PARAMS);
1582
1583 hci_dev_lock(hdev);
1584
1585 if (!hdev_is_powered(hdev)) {
1586 err = set_connectable_update_settings(hdev, sk, cp->val);
1587 goto failed;
1588 }
1589
1590 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1591 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1592 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1593 MGMT_STATUS_BUSY);
1594 goto failed;
1595 }
1596
1597 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1598 if (!cmd) {
1599 err = -ENOMEM;
1600 goto failed;
1601 }
1602
1603 if (cp->val) {
1604 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1605 } else {
1606 if (hdev->discov_timeout > 0)
1607 cancel_delayed_work(&hdev->discov_off);
1608
1609 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1610 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1611 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1612 }
1613
1614 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1615 err = 0;
1616
1617 failed:
1618 hci_dev_unlock(hdev);
1619 return err;
1620 }
1621
set_bondable(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)1622 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1623 u16 len)
1624 {
1625 struct mgmt_mode *cp = data;
1626 bool changed;
1627 int err;
1628
1629 bt_dev_dbg(hdev, "sock %p", sk);
1630
1631 if (cp->val != 0x00 && cp->val != 0x01)
1632 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1633 MGMT_STATUS_INVALID_PARAMS);
1634
1635 hci_dev_lock(hdev);
1636
1637 if (cp->val)
1638 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1639 else
1640 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1641
1642 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1643 if (err < 0)
1644 goto unlock;
1645
1646 if (changed) {
1647 /* In limited privacy mode the change of bondable mode
1648 * may affect the local advertising address.
1649 */
1650 if (hdev_is_powered(hdev) &&
1651 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1652 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1653 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1654 queue_work(hdev->req_workqueue,
1655 &hdev->discoverable_update);
1656
1657 err = new_settings(hdev, sk);
1658 }
1659
1660 unlock:
1661 hci_dev_unlock(hdev);
1662 return err;
1663 }
1664
set_link_security(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)1665 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1666 u16 len)
1667 {
1668 struct mgmt_mode *cp = data;
1669 struct mgmt_pending_cmd *cmd;
1670 u8 val, status;
1671 int err;
1672
1673 bt_dev_dbg(hdev, "sock %p", sk);
1674
1675 status = mgmt_bredr_support(hdev);
1676 if (status)
1677 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1678 status);
1679
1680 if (cp->val != 0x00 && cp->val != 0x01)
1681 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1682 MGMT_STATUS_INVALID_PARAMS);
1683
1684 hci_dev_lock(hdev);
1685
1686 if (!hdev_is_powered(hdev)) {
1687 bool changed = false;
1688
1689 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1690 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1691 changed = true;
1692 }
1693
1694 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1695 if (err < 0)
1696 goto failed;
1697
1698 if (changed)
1699 err = new_settings(hdev, sk);
1700
1701 goto failed;
1702 }
1703
1704 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1705 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1706 MGMT_STATUS_BUSY);
1707 goto failed;
1708 }
1709
1710 val = !!cp->val;
1711
1712 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1713 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1714 goto failed;
1715 }
1716
1717 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1718 if (!cmd) {
1719 err = -ENOMEM;
1720 goto failed;
1721 }
1722
1723 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1724 if (err < 0) {
1725 mgmt_pending_remove(cmd);
1726 goto failed;
1727 }
1728
1729 failed:
1730 hci_dev_unlock(hdev);
1731 return err;
1732 }
1733
set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)1734 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1735 {
1736 struct mgmt_mode *cp = data;
1737 struct mgmt_pending_cmd *cmd;
1738 u8 status;
1739 int err;
1740
1741 bt_dev_dbg(hdev, "sock %p", sk);
1742
1743 status = mgmt_bredr_support(hdev);
1744 if (status)
1745 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1746
1747 if (!lmp_ssp_capable(hdev))
1748 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1749 MGMT_STATUS_NOT_SUPPORTED);
1750
1751 if (cp->val != 0x00 && cp->val != 0x01)
1752 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1753 MGMT_STATUS_INVALID_PARAMS);
1754
1755 hci_dev_lock(hdev);
1756
1757 if (!hdev_is_powered(hdev)) {
1758 bool changed;
1759
1760 if (cp->val) {
1761 changed = !hci_dev_test_and_set_flag(hdev,
1762 HCI_SSP_ENABLED);
1763 } else {
1764 changed = hci_dev_test_and_clear_flag(hdev,
1765 HCI_SSP_ENABLED);
1766 if (!changed)
1767 changed = hci_dev_test_and_clear_flag(hdev,
1768 HCI_HS_ENABLED);
1769 else
1770 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1771 }
1772
1773 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1774 if (err < 0)
1775 goto failed;
1776
1777 if (changed)
1778 err = new_settings(hdev, sk);
1779
1780 goto failed;
1781 }
1782
1783 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1784 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1785 MGMT_STATUS_BUSY);
1786 goto failed;
1787 }
1788
1789 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1790 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1791 goto failed;
1792 }
1793
1794 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1795 if (!cmd) {
1796 err = -ENOMEM;
1797 goto failed;
1798 }
1799
1800 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1801 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1802 sizeof(cp->val), &cp->val);
1803
1804 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1805 if (err < 0) {
1806 mgmt_pending_remove(cmd);
1807 goto failed;
1808 }
1809
1810 failed:
1811 hci_dev_unlock(hdev);
1812 return err;
1813 }
1814
set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)1815 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1816 {
1817 struct mgmt_mode *cp = data;
1818 bool changed;
1819 u8 status;
1820 int err;
1821
1822 bt_dev_dbg(hdev, "sock %p", sk);
1823
1824 if (!IS_ENABLED(CONFIG_BT_HS))
1825 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1826 MGMT_STATUS_NOT_SUPPORTED);
1827
1828 status = mgmt_bredr_support(hdev);
1829 if (status)
1830 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1831
1832 if (!lmp_ssp_capable(hdev))
1833 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1834 MGMT_STATUS_NOT_SUPPORTED);
1835
1836 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1837 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1838 MGMT_STATUS_REJECTED);
1839
1840 if (cp->val != 0x00 && cp->val != 0x01)
1841 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1842 MGMT_STATUS_INVALID_PARAMS);
1843
1844 hci_dev_lock(hdev);
1845
1846 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1847 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1848 MGMT_STATUS_BUSY);
1849 goto unlock;
1850 }
1851
1852 if (cp->val) {
1853 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1854 } else {
1855 if (hdev_is_powered(hdev)) {
1856 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1857 MGMT_STATUS_REJECTED);
1858 goto unlock;
1859 }
1860
1861 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1862 }
1863
1864 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1865 if (err < 0)
1866 goto unlock;
1867
1868 if (changed)
1869 err = new_settings(hdev, sk);
1870
1871 unlock:
1872 hci_dev_unlock(hdev);
1873 return err;
1874 }
1875
le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)1876 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1877 {
1878 struct cmd_lookup match = { NULL, hdev };
1879
1880 hci_dev_lock(hdev);
1881
1882 if (status) {
1883 u8 mgmt_err = mgmt_status(status);
1884
1885 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1886 &mgmt_err);
1887 goto unlock;
1888 }
1889
1890 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1891
1892 new_settings(hdev, match.sk);
1893
1894 if (match.sk)
1895 sock_put(match.sk);
1896
1897 /* Make sure the controller has a good default for
1898 * advertising data. Restrict the update to when LE
1899 * has actually been enabled. During power on, the
1900 * update in powered_update_hci will take care of it.
1901 */
1902 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1903 struct hci_request req;
1904 hci_req_init(&req, hdev);
1905 if (ext_adv_capable(hdev)) {
1906 int err;
1907
1908 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1909 if (!err)
1910 __hci_req_update_scan_rsp_data(&req, 0x00);
1911 } else {
1912 __hci_req_update_adv_data(&req, 0x00);
1913 __hci_req_update_scan_rsp_data(&req, 0x00);
1914 }
1915 hci_req_run(&req, NULL);
1916 hci_update_background_scan(hdev);
1917 }
1918
1919 unlock:
1920 hci_dev_unlock(hdev);
1921 }
1922
set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)1923 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1924 {
1925 struct mgmt_mode *cp = data;
1926 struct hci_cp_write_le_host_supported hci_cp;
1927 struct mgmt_pending_cmd *cmd;
1928 struct hci_request req;
1929 int err;
1930 u8 val, enabled;
1931
1932 bt_dev_dbg(hdev, "sock %p", sk);
1933
1934 if (!lmp_le_capable(hdev))
1935 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1936 MGMT_STATUS_NOT_SUPPORTED);
1937
1938 if (cp->val != 0x00 && cp->val != 0x01)
1939 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1940 MGMT_STATUS_INVALID_PARAMS);
1941
1942 /* Bluetooth single mode LE only controllers or dual-mode
1943 * controllers configured as LE only devices, do not allow
1944 * switching LE off. These have either LE enabled explicitly
1945 * or BR/EDR has been previously switched off.
1946 *
1947 * When trying to enable an already enabled LE, then gracefully
1948 * send a positive response. Trying to disable it however will
1949 * result into rejection.
1950 */
1951 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1952 if (cp->val == 0x01)
1953 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1954
1955 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1956 MGMT_STATUS_REJECTED);
1957 }
1958
1959 hci_dev_lock(hdev);
1960
1961 val = !!cp->val;
1962 enabled = lmp_host_le_capable(hdev);
1963
1964 if (!val)
1965 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1966
1967 if (!hdev_is_powered(hdev) || val == enabled) {
1968 bool changed = false;
1969
1970 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1971 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1972 changed = true;
1973 }
1974
1975 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1976 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1977 changed = true;
1978 }
1979
1980 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1981 if (err < 0)
1982 goto unlock;
1983
1984 if (changed)
1985 err = new_settings(hdev, sk);
1986
1987 goto unlock;
1988 }
1989
1990 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1991 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1992 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1993 MGMT_STATUS_BUSY);
1994 goto unlock;
1995 }
1996
1997 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1998 if (!cmd) {
1999 err = -ENOMEM;
2000 goto unlock;
2001 }
2002
2003 hci_req_init(&req, hdev);
2004
2005 memset(&hci_cp, 0, sizeof(hci_cp));
2006
2007 if (val) {
2008 hci_cp.le = val;
2009 hci_cp.simul = 0x00;
2010 } else {
2011 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2012 __hci_req_disable_advertising(&req);
2013
2014 if (ext_adv_capable(hdev))
2015 __hci_req_clear_ext_adv_sets(&req);
2016 }
2017
2018 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2019 &hci_cp);
2020
2021 err = hci_req_run(&req, le_enable_complete);
2022 if (err < 0)
2023 mgmt_pending_remove(cmd);
2024
2025 unlock:
2026 hci_dev_unlock(hdev);
2027 return err;
2028 }
2029
2030 /* This is a helper function to test for pending mgmt commands that can
2031 * cause CoD or EIR HCI commands. We can only allow one such pending
2032 * mgmt command at a time since otherwise we cannot easily track what
2033 * the current values are, will be, and based on that calculate if a new
2034 * HCI command needs to be sent and if yes with what value.
2035 */
pending_eir_or_class(struct hci_dev *hdev)2036 static bool pending_eir_or_class(struct hci_dev *hdev)
2037 {
2038 struct mgmt_pending_cmd *cmd;
2039
2040 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2041 switch (cmd->opcode) {
2042 case MGMT_OP_ADD_UUID:
2043 case MGMT_OP_REMOVE_UUID:
2044 case MGMT_OP_SET_DEV_CLASS:
2045 case MGMT_OP_SET_POWERED:
2046 return true;
2047 }
2048 }
2049
2050 return false;
2051 }
2052
2053 static const u8 bluetooth_base_uuid[] = {
2054 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2055 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2056 };
2057
get_uuid_size(const u8 *uuid)2058 static u8 get_uuid_size(const u8 *uuid)
2059 {
2060 u32 val;
2061
2062 if (memcmp(uuid, bluetooth_base_uuid, 12))
2063 return 128;
2064
2065 val = get_unaligned_le32(&uuid[12]);
2066 if (val > 0xffff)
2067 return 32;
2068
2069 return 16;
2070 }
2071
mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)2072 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2073 {
2074 struct mgmt_pending_cmd *cmd;
2075
2076 hci_dev_lock(hdev);
2077
2078 cmd = pending_find(mgmt_op, hdev);
2079 if (!cmd)
2080 goto unlock;
2081
2082 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2083 mgmt_status(status), hdev->dev_class, 3);
2084
2085 mgmt_pending_remove(cmd);
2086
2087 unlock:
2088 hci_dev_unlock(hdev);
2089 }
2090
add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)2091 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2092 {
2093 bt_dev_dbg(hdev, "status 0x%02x", status);
2094
2095 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2096 }
2097
add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)2098 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2099 {
2100 struct mgmt_cp_add_uuid *cp = data;
2101 struct mgmt_pending_cmd *cmd;
2102 struct hci_request req;
2103 struct bt_uuid *uuid;
2104 int err;
2105
2106 bt_dev_dbg(hdev, "sock %p", sk);
2107
2108 hci_dev_lock(hdev);
2109
2110 if (pending_eir_or_class(hdev)) {
2111 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2112 MGMT_STATUS_BUSY);
2113 goto failed;
2114 }
2115
2116 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2117 if (!uuid) {
2118 err = -ENOMEM;
2119 goto failed;
2120 }
2121
2122 memcpy(uuid->uuid, cp->uuid, 16);
2123 uuid->svc_hint = cp->svc_hint;
2124 uuid->size = get_uuid_size(cp->uuid);
2125
2126 list_add_tail(&uuid->list, &hdev->uuids);
2127
2128 hci_req_init(&req, hdev);
2129
2130 __hci_req_update_class(&req);
2131 __hci_req_update_eir(&req);
2132
2133 err = hci_req_run(&req, add_uuid_complete);
2134 if (err < 0) {
2135 if (err != -ENODATA)
2136 goto failed;
2137
2138 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2139 hdev->dev_class, 3);
2140 goto failed;
2141 }
2142
2143 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2144 if (!cmd) {
2145 err = -ENOMEM;
2146 goto failed;
2147 }
2148
2149 err = 0;
2150
2151 failed:
2152 hci_dev_unlock(hdev);
2153 return err;
2154 }
2155
enable_service_cache(struct hci_dev *hdev)2156 static bool enable_service_cache(struct hci_dev *hdev)
2157 {
2158 if (!hdev_is_powered(hdev))
2159 return false;
2160
2161 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2162 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2163 CACHE_TIMEOUT);
2164 return true;
2165 }
2166
2167 return false;
2168 }
2169
remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)2170 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2171 {
2172 bt_dev_dbg(hdev, "status 0x%02x", status);
2173
2174 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2175 }
2176
remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)2177 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2178 u16 len)
2179 {
2180 struct mgmt_cp_remove_uuid *cp = data;
2181 struct mgmt_pending_cmd *cmd;
2182 struct bt_uuid *match, *tmp;
2183 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2184 struct hci_request req;
2185 int err, found;
2186
2187 bt_dev_dbg(hdev, "sock %p", sk);
2188
2189 hci_dev_lock(hdev);
2190
2191 if (pending_eir_or_class(hdev)) {
2192 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2193 MGMT_STATUS_BUSY);
2194 goto unlock;
2195 }
2196
2197 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2198 hci_uuids_clear(hdev);
2199
2200 if (enable_service_cache(hdev)) {
2201 err = mgmt_cmd_complete(sk, hdev->id,
2202 MGMT_OP_REMOVE_UUID,
2203 0, hdev->dev_class, 3);
2204 goto unlock;
2205 }
2206
2207 goto update_class;
2208 }
2209
2210 found = 0;
2211
2212 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2213 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2214 continue;
2215
2216 list_del(&match->list);
2217 kfree(match);
2218 found++;
2219 }
2220
2221 if (found == 0) {
2222 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2223 MGMT_STATUS_INVALID_PARAMS);
2224 goto unlock;
2225 }
2226
2227 update_class:
2228 hci_req_init(&req, hdev);
2229
2230 __hci_req_update_class(&req);
2231 __hci_req_update_eir(&req);
2232
2233 err = hci_req_run(&req, remove_uuid_complete);
2234 if (err < 0) {
2235 if (err != -ENODATA)
2236 goto unlock;
2237
2238 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2239 hdev->dev_class, 3);
2240 goto unlock;
2241 }
2242
2243 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2244 if (!cmd) {
2245 err = -ENOMEM;
2246 goto unlock;
2247 }
2248
2249 err = 0;
2250
2251 unlock:
2252 hci_dev_unlock(hdev);
2253 return err;
2254 }
2255
set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)2256 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2257 {
2258 bt_dev_dbg(hdev, "status 0x%02x", status);
2259
2260 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2261 }
2262
set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)2263 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2264 u16 len)
2265 {
2266 struct mgmt_cp_set_dev_class *cp = data;
2267 struct mgmt_pending_cmd *cmd;
2268 struct hci_request req;
2269 int err;
2270
2271 bt_dev_dbg(hdev, "sock %p", sk);
2272
2273 if (!lmp_bredr_capable(hdev))
2274 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2275 MGMT_STATUS_NOT_SUPPORTED);
2276
2277 hci_dev_lock(hdev);
2278
2279 if (pending_eir_or_class(hdev)) {
2280 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2281 MGMT_STATUS_BUSY);
2282 goto unlock;
2283 }
2284
2285 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2286 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2287 MGMT_STATUS_INVALID_PARAMS);
2288 goto unlock;
2289 }
2290
2291 hdev->major_class = cp->major;
2292 hdev->minor_class = cp->minor;
2293
2294 if (!hdev_is_powered(hdev)) {
2295 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2296 hdev->dev_class, 3);
2297 goto unlock;
2298 }
2299
2300 hci_req_init(&req, hdev);
2301
2302 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2303 hci_dev_unlock(hdev);
2304 cancel_delayed_work_sync(&hdev->service_cache);
2305 hci_dev_lock(hdev);
2306 __hci_req_update_eir(&req);
2307 }
2308
2309 __hci_req_update_class(&req);
2310
2311 err = hci_req_run(&req, set_class_complete);
2312 if (err < 0) {
2313 if (err != -ENODATA)
2314 goto unlock;
2315
2316 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2317 hdev->dev_class, 3);
2318 goto unlock;
2319 }
2320
2321 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2322 if (!cmd) {
2323 err = -ENOMEM;
2324 goto unlock;
2325 }
2326
2327 err = 0;
2328
2329 unlock:
2330 hci_dev_unlock(hdev);
2331 return err;
2332 }
2333
load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)2334 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2335 u16 len)
2336 {
2337 struct mgmt_cp_load_link_keys *cp = data;
2338 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2339 sizeof(struct mgmt_link_key_info));
2340 u16 key_count, expected_len;
2341 bool changed;
2342 int i;
2343
2344 bt_dev_dbg(hdev, "sock %p", sk);
2345
2346 if (!lmp_bredr_capable(hdev))
2347 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2348 MGMT_STATUS_NOT_SUPPORTED);
2349
2350 key_count = __le16_to_cpu(cp->key_count);
2351 if (key_count > max_key_count) {
2352 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2353 key_count);
2354 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2355 MGMT_STATUS_INVALID_PARAMS);
2356 }
2357
2358 expected_len = struct_size(cp, keys, key_count);
2359 if (expected_len != len) {
2360 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2361 expected_len, len);
2362 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2363 MGMT_STATUS_INVALID_PARAMS);
2364 }
2365
2366 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2367 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2368 MGMT_STATUS_INVALID_PARAMS);
2369
2370 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2371 key_count);
2372
2373 for (i = 0; i < key_count; i++) {
2374 struct mgmt_link_key_info *key = &cp->keys[i];
2375
2376 /* Considering SMP over BREDR/LE, there is no need to check addr_type */
2377 if (key->type > 0x08)
2378 return mgmt_cmd_status(sk, hdev->id,
2379 MGMT_OP_LOAD_LINK_KEYS,
2380 MGMT_STATUS_INVALID_PARAMS);
2381 }
2382
2383 hci_dev_lock(hdev);
2384
2385 hci_link_keys_clear(hdev);
2386
2387 if (cp->debug_keys)
2388 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2389 else
2390 changed = hci_dev_test_and_clear_flag(hdev,
2391 HCI_KEEP_DEBUG_KEYS);
2392
2393 if (changed)
2394 new_settings(hdev, NULL);
2395
2396 for (i = 0; i < key_count; i++) {
2397 struct mgmt_link_key_info *key = &cp->keys[i];
2398
2399 if (hci_is_blocked_key(hdev,
2400 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2401 key->val)) {
2402 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2403 &key->addr.bdaddr);
2404 continue;
2405 }
2406
2407 /* Always ignore debug keys and require a new pairing if
2408 * the user wants to use them.
2409 */
2410 if (key->type == HCI_LK_DEBUG_COMBINATION)
2411 continue;
2412
2413 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2414 key->type, key->pin_len, NULL);
2415 }
2416
2417 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2418
2419 hci_dev_unlock(hdev);
2420
2421 return 0;
2422 }
2423
device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, struct sock *skip_sk)2424 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2425 u8 addr_type, struct sock *skip_sk)
2426 {
2427 struct mgmt_ev_device_unpaired ev;
2428
2429 bacpy(&ev.addr.bdaddr, bdaddr);
2430 ev.addr.type = addr_type;
2431
2432 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2433 skip_sk);
2434 }
2435
unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)2436 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2437 u16 len)
2438 {
2439 struct mgmt_cp_unpair_device *cp = data;
2440 struct mgmt_rp_unpair_device rp;
2441 struct hci_conn_params *params;
2442 struct mgmt_pending_cmd *cmd;
2443 struct hci_conn *conn;
2444 u8 addr_type;
2445 int err;
2446
2447 memset(&rp, 0, sizeof(rp));
2448 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2449 rp.addr.type = cp->addr.type;
2450
2451 if (!bdaddr_type_is_valid(cp->addr.type))
2452 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2453 MGMT_STATUS_INVALID_PARAMS,
2454 &rp, sizeof(rp));
2455
2456 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2457 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2458 MGMT_STATUS_INVALID_PARAMS,
2459 &rp, sizeof(rp));
2460
2461 hci_dev_lock(hdev);
2462
2463 if (!hdev_is_powered(hdev)) {
2464 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2465 MGMT_STATUS_NOT_POWERED, &rp,
2466 sizeof(rp));
2467 goto unlock;
2468 }
2469
2470 if (cp->addr.type == BDADDR_BREDR) {
2471 /* If disconnection is requested, then look up the
2472 * connection. If the remote device is connected, it
2473 * will be later used to terminate the link.
2474 *
2475 * Setting it to NULL explicitly will cause no
2476 * termination of the link.
2477 */
2478 if (cp->disconnect)
2479 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2480 &cp->addr.bdaddr);
2481 else
2482 conn = NULL;
2483
2484 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2485 if (err < 0) {
2486 err = mgmt_cmd_complete(sk, hdev->id,
2487 MGMT_OP_UNPAIR_DEVICE,
2488 MGMT_STATUS_NOT_PAIRED, &rp,
2489 sizeof(rp));
2490 goto unlock;
2491 }
2492
2493 goto done;
2494 }
2495
2496 /* LE address type */
2497 addr_type = le_addr_type(cp->addr.type);
2498
2499 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2500 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2501 if (err < 0) {
2502 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2503 MGMT_STATUS_NOT_PAIRED, &rp,
2504 sizeof(rp));
2505 goto unlock;
2506 }
2507
2508 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2509 if (!conn) {
2510 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2511 goto done;
2512 }
2513
2514
2515 /* Defer clearing up the connection parameters until closing to
2516 * give a chance of keeping them if a repairing happens.
2517 */
2518 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2519
2520 /* Disable auto-connection parameters if present */
2521 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2522 if (params) {
2523 if (params->explicit_connect)
2524 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2525 else
2526 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2527 }
2528
2529 /* If disconnection is not requested, then clear the connection
2530 * variable so that the link is not terminated.
2531 */
2532 if (!cp->disconnect)
2533 conn = NULL;
2534
2535 done:
2536 /* If the connection variable is set, then termination of the
2537 * link is requested.
2538 */
2539 if (!conn) {
2540 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2541 &rp, sizeof(rp));
2542 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2543 goto unlock;
2544 }
2545
2546 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2547 sizeof(*cp));
2548 if (!cmd) {
2549 err = -ENOMEM;
2550 goto unlock;
2551 }
2552
2553 cmd->cmd_complete = addr_cmd_complete;
2554
2555 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2556 if (err < 0)
2557 mgmt_pending_remove(cmd);
2558
2559 unlock:
2560 hci_dev_unlock(hdev);
2561 return err;
2562 }
2563
disconnect(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)2564 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2565 u16 len)
2566 {
2567 struct mgmt_cp_disconnect *cp = data;
2568 struct mgmt_rp_disconnect rp;
2569 struct mgmt_pending_cmd *cmd;
2570 struct hci_conn *conn;
2571 int err;
2572
2573 bt_dev_dbg(hdev, "sock %p", sk);
2574
2575 memset(&rp, 0, sizeof(rp));
2576 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2577 rp.addr.type = cp->addr.type;
2578
2579 if (!bdaddr_type_is_valid(cp->addr.type))
2580 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2581 MGMT_STATUS_INVALID_PARAMS,
2582 &rp, sizeof(rp));
2583
2584 hci_dev_lock(hdev);
2585
2586 if (!test_bit(HCI_UP, &hdev->flags)) {
2587 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2588 MGMT_STATUS_NOT_POWERED, &rp,
2589 sizeof(rp));
2590 goto failed;
2591 }
2592
2593 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2594 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2595 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2596 goto failed;
2597 }
2598
2599 if (cp->addr.type == BDADDR_BREDR)
2600 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2601 &cp->addr.bdaddr);
2602 else
2603 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2604 le_addr_type(cp->addr.type));
2605
2606 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2607 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2608 MGMT_STATUS_NOT_CONNECTED, &rp,
2609 sizeof(rp));
2610 goto failed;
2611 }
2612
2613 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2614 if (!cmd) {
2615 err = -ENOMEM;
2616 goto failed;
2617 }
2618
2619 cmd->cmd_complete = generic_cmd_complete;
2620
2621 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2622 if (err < 0)
2623 mgmt_pending_remove(cmd);
2624
2625 failed:
2626 hci_dev_unlock(hdev);
2627 return err;
2628 }
2629
link_to_bdaddr(u8 link_type, u8 addr_type)2630 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2631 {
2632 switch (link_type) {
2633 case LE_LINK:
2634 switch (addr_type) {
2635 case ADDR_LE_DEV_PUBLIC:
2636 return BDADDR_LE_PUBLIC;
2637
2638 default:
2639 /* Fallback to LE Random address type */
2640 return BDADDR_LE_RANDOM;
2641 }
2642
2643 default:
2644 /* Fallback to BR/EDR type */
2645 return BDADDR_BREDR;
2646 }
2647 }
2648
get_connections(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len)2649 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2650 u16 data_len)
2651 {
2652 struct mgmt_rp_get_connections *rp;
2653 struct hci_conn *c;
2654 int err;
2655 u16 i;
2656
2657 bt_dev_dbg(hdev, "sock %p", sk);
2658
2659 hci_dev_lock(hdev);
2660
2661 if (!hdev_is_powered(hdev)) {
2662 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2663 MGMT_STATUS_NOT_POWERED);
2664 goto unlock;
2665 }
2666
2667 i = 0;
2668 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2669 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2670 i++;
2671 }
2672
2673 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2674 if (!rp) {
2675 err = -ENOMEM;
2676 goto unlock;
2677 }
2678
2679 i = 0;
2680 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2681 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2682 continue;
2683 bacpy(&rp->addr[i].bdaddr, &c->dst);
2684 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2685 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2686 continue;
2687 i++;
2688 }
2689
2690 rp->conn_count = cpu_to_le16(i);
2691
2692 /* Recalculate length in case of filtered SCO connections, etc */
2693 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2694 struct_size(rp, addr, i));
2695
2696 kfree(rp);
2697
2698 unlock:
2699 hci_dev_unlock(hdev);
2700 return err;
2701 }
2702
send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev, struct mgmt_cp_pin_code_neg_reply *cp)2703 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2704 struct mgmt_cp_pin_code_neg_reply *cp)
2705 {
2706 struct mgmt_pending_cmd *cmd;
2707 int err;
2708
2709 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2710 sizeof(*cp));
2711 if (!cmd)
2712 return -ENOMEM;
2713
2714 cmd->cmd_complete = addr_cmd_complete;
2715
2716 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2717 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2718 if (err < 0)
2719 mgmt_pending_remove(cmd);
2720
2721 return err;
2722 }
2723
pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)2724 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2725 u16 len)
2726 {
2727 struct hci_conn *conn;
2728 struct mgmt_cp_pin_code_reply *cp = data;
2729 struct hci_cp_pin_code_reply reply;
2730 struct mgmt_pending_cmd *cmd;
2731 int err;
2732
2733 bt_dev_dbg(hdev, "sock %p", sk);
2734
2735 hci_dev_lock(hdev);
2736
2737 if (!hdev_is_powered(hdev)) {
2738 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2739 MGMT_STATUS_NOT_POWERED);
2740 goto failed;
2741 }
2742
2743 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2744 if (!conn) {
2745 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2746 MGMT_STATUS_NOT_CONNECTED);
2747 goto failed;
2748 }
2749
2750 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2751 struct mgmt_cp_pin_code_neg_reply ncp;
2752
2753 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2754
2755 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2756
2757 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2758 if (err >= 0)
2759 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2760 MGMT_STATUS_INVALID_PARAMS);
2761
2762 goto failed;
2763 }
2764
2765 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2766 if (!cmd) {
2767 err = -ENOMEM;
2768 goto failed;
2769 }
2770
2771 cmd->cmd_complete = addr_cmd_complete;
2772
2773 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2774 reply.pin_len = cp->pin_len;
2775 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2776
2777 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2778 if (err < 0)
2779 mgmt_pending_remove(cmd);
2780
2781 failed:
2782 hci_dev_unlock(hdev);
2783 return err;
2784 }
2785
set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)2786 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2787 u16 len)
2788 {
2789 struct mgmt_cp_set_io_capability *cp = data;
2790
2791 bt_dev_dbg(hdev, "sock %p", sk);
2792
2793 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2794 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2795 MGMT_STATUS_INVALID_PARAMS);
2796
2797 hci_dev_lock(hdev);
2798
2799 hdev->io_capability = cp->io_capability;
2800
2801 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2802
2803 hci_dev_unlock(hdev);
2804
2805 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2806 NULL, 0);
2807 }
2808
find_pairing(struct hci_conn *conn)2809 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2810 {
2811 struct hci_dev *hdev = conn->hdev;
2812 struct mgmt_pending_cmd *cmd;
2813
2814 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2815 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2816 continue;
2817
2818 if (cmd->user_data != conn)
2819 continue;
2820
2821 return cmd;
2822 }
2823
2824 return NULL;
2825 }
2826
pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)2827 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2828 {
2829 struct mgmt_rp_pair_device rp;
2830 struct hci_conn *conn = cmd->user_data;
2831 int err;
2832
2833 bacpy(&rp.addr.bdaddr, &conn->dst);
2834 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2835
2836 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2837 status, &rp, sizeof(rp));
2838
2839 /* So we don't get further callbacks for this connection */
2840 conn->connect_cfm_cb = NULL;
2841 conn->security_cfm_cb = NULL;
2842 conn->disconn_cfm_cb = NULL;
2843
2844 hci_conn_drop(conn);
2845
2846 /* The device is paired so there is no need to remove
2847 * its connection parameters anymore.
2848 */
2849 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2850
2851 hci_conn_put(conn);
2852
2853 return err;
2854 }
2855
mgmt_smp_complete(struct hci_conn *conn, bool complete)2856 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2857 {
2858 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2859 struct mgmt_pending_cmd *cmd;
2860
2861 cmd = find_pairing(conn);
2862 if (cmd) {
2863 cmd->cmd_complete(cmd, status);
2864 mgmt_pending_remove(cmd);
2865 }
2866 }
2867
pairing_complete_cb(struct hci_conn *conn, u8 status)2868 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2869 {
2870 struct mgmt_pending_cmd *cmd;
2871
2872 BT_DBG("status %u", status);
2873
2874 cmd = find_pairing(conn);
2875 if (!cmd) {
2876 BT_DBG("Unable to find a pending command");
2877 return;
2878 }
2879
2880 cmd->cmd_complete(cmd, mgmt_status(status));
2881 mgmt_pending_remove(cmd);
2882 }
2883
le_pairing_complete_cb(struct hci_conn *conn, u8 status)2884 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2885 {
2886 struct mgmt_pending_cmd *cmd;
2887
2888 BT_DBG("status %u", status);
2889
2890 if (!status)
2891 return;
2892
2893 cmd = find_pairing(conn);
2894 if (!cmd) {
2895 BT_DBG("Unable to find a pending command");
2896 return;
2897 }
2898
2899 cmd->cmd_complete(cmd, mgmt_status(status));
2900 mgmt_pending_remove(cmd);
2901 }
2902
pair_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)2903 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2904 u16 len)
2905 {
2906 struct mgmt_cp_pair_device *cp = data;
2907 struct mgmt_rp_pair_device rp;
2908 struct mgmt_pending_cmd *cmd;
2909 u8 sec_level, auth_type;
2910 struct hci_conn *conn;
2911 int err;
2912
2913 bt_dev_dbg(hdev, "sock %p", sk);
2914
2915 memset(&rp, 0, sizeof(rp));
2916 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2917 rp.addr.type = cp->addr.type;
2918
2919 if (!bdaddr_type_is_valid(cp->addr.type))
2920 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2921 MGMT_STATUS_INVALID_PARAMS,
2922 &rp, sizeof(rp));
2923
2924 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2925 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2926 MGMT_STATUS_INVALID_PARAMS,
2927 &rp, sizeof(rp));
2928
2929 hci_dev_lock(hdev);
2930
2931 if (!hdev_is_powered(hdev)) {
2932 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2933 MGMT_STATUS_NOT_POWERED, &rp,
2934 sizeof(rp));
2935 goto unlock;
2936 }
2937
2938 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2939 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2940 MGMT_STATUS_ALREADY_PAIRED, &rp,
2941 sizeof(rp));
2942 goto unlock;
2943 }
2944
2945 sec_level = BT_SECURITY_MEDIUM;
2946 auth_type = HCI_AT_DEDICATED_BONDING;
2947
2948 if (cp->addr.type == BDADDR_BREDR) {
2949 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2950 auth_type, CONN_REASON_PAIR_DEVICE);
2951 } else {
2952 u8 addr_type = le_addr_type(cp->addr.type);
2953 struct hci_conn_params *p;
2954
2955 /* When pairing a new device, it is expected to remember
2956 * this device for future connections. Adding the connection
2957 * parameter information ahead of time allows tracking
2958 * of the slave preferred values and will speed up any
2959 * further connection establishment.
2960 *
2961 * If connection parameters already exist, then they
2962 * will be kept and this function does nothing.
2963 */
2964 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2965 if (!p) {
2966 err = -EIO;
2967 goto unlock;
2968 }
2969
2970 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2971 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2972
2973 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2974 sec_level, HCI_LE_CONN_TIMEOUT,
2975 CONN_REASON_PAIR_DEVICE);
2976 }
2977
2978 if (IS_ERR(conn)) {
2979 int status;
2980
2981 if (PTR_ERR(conn) == -EBUSY)
2982 status = MGMT_STATUS_BUSY;
2983 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2984 status = MGMT_STATUS_NOT_SUPPORTED;
2985 else if (PTR_ERR(conn) == -ECONNREFUSED)
2986 status = MGMT_STATUS_REJECTED;
2987 else
2988 status = MGMT_STATUS_CONNECT_FAILED;
2989
2990 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2991 status, &rp, sizeof(rp));
2992 goto unlock;
2993 }
2994
2995 if (conn->connect_cfm_cb) {
2996 hci_conn_drop(conn);
2997 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2998 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2999 goto unlock;
3000 }
3001
3002 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3003 if (!cmd) {
3004 err = -ENOMEM;
3005 hci_conn_drop(conn);
3006 goto unlock;
3007 }
3008
3009 cmd->cmd_complete = pairing_complete;
3010
3011 /* For LE, just connecting isn't a proof that the pairing finished */
3012 if (cp->addr.type == BDADDR_BREDR) {
3013 conn->connect_cfm_cb = pairing_complete_cb;
3014 conn->security_cfm_cb = pairing_complete_cb;
3015 conn->disconn_cfm_cb = pairing_complete_cb;
3016 } else {
3017 conn->connect_cfm_cb = le_pairing_complete_cb;
3018 conn->security_cfm_cb = le_pairing_complete_cb;
3019 conn->disconn_cfm_cb = le_pairing_complete_cb;
3020 }
3021
3022 conn->io_capability = cp->io_cap;
3023 cmd->user_data = hci_conn_get(conn);
3024
3025 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3026 hci_conn_security(conn, sec_level, auth_type, true)) {
3027 cmd->cmd_complete(cmd, 0);
3028 mgmt_pending_remove(cmd);
3029 }
3030
3031 err = 0;
3032
3033 unlock:
3034 hci_dev_unlock(hdev);
3035 return err;
3036 }
3037
cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)3038 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3039 u16 len)
3040 {
3041 struct mgmt_addr_info *addr = data;
3042 struct mgmt_pending_cmd *cmd;
3043 struct hci_conn *conn;
3044 int err;
3045
3046 bt_dev_dbg(hdev, "sock %p", sk);
3047
3048 hci_dev_lock(hdev);
3049
3050 if (!hdev_is_powered(hdev)) {
3051 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3052 MGMT_STATUS_NOT_POWERED);
3053 goto unlock;
3054 }
3055
3056 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3057 if (!cmd) {
3058 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3059 MGMT_STATUS_INVALID_PARAMS);
3060 goto unlock;
3061 }
3062
3063 conn = cmd->user_data;
3064
3065 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3066 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3067 MGMT_STATUS_INVALID_PARAMS);
3068 goto unlock;
3069 }
3070
3071 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3072 mgmt_pending_remove(cmd);
3073
3074 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3075 addr, sizeof(*addr));
3076
3077 /* Since user doesn't want to proceed with the connection, abort any
3078 * ongoing pairing and then terminate the link if it was created
3079 * because of the pair device action.
3080 */
3081 if (addr->type == BDADDR_BREDR)
3082 hci_remove_link_key(hdev, &addr->bdaddr);
3083 else
3084 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3085 le_addr_type(addr->type));
3086
3087 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3088 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3089
3090 unlock:
3091 hci_dev_unlock(hdev);
3092 return err;
3093 }
3094
user_pairing_resp(struct sock *sk, struct hci_dev *hdev, struct mgmt_addr_info *addr, u16 mgmt_op, u16 hci_op, __le32 passkey)3095 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3096 struct mgmt_addr_info *addr, u16 mgmt_op,
3097 u16 hci_op, __le32 passkey)
3098 {
3099 struct mgmt_pending_cmd *cmd;
3100 struct hci_conn *conn;
3101 int err;
3102
3103 hci_dev_lock(hdev);
3104
3105 if (!hdev_is_powered(hdev)) {
3106 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3107 MGMT_STATUS_NOT_POWERED, addr,
3108 sizeof(*addr));
3109 goto done;
3110 }
3111
3112 if (addr->type == BDADDR_BREDR)
3113 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3114 else
3115 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3116 le_addr_type(addr->type));
3117
3118 if (!conn) {
3119 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3120 MGMT_STATUS_NOT_CONNECTED, addr,
3121 sizeof(*addr));
3122 goto done;
3123 }
3124
3125 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3126 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3127 if (!err)
3128 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3129 MGMT_STATUS_SUCCESS, addr,
3130 sizeof(*addr));
3131 else
3132 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3133 MGMT_STATUS_FAILED, addr,
3134 sizeof(*addr));
3135
3136 goto done;
3137 }
3138
3139 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3140 if (!cmd) {
3141 err = -ENOMEM;
3142 goto done;
3143 }
3144
3145 cmd->cmd_complete = addr_cmd_complete;
3146
3147 /* Continue with pairing via HCI */
3148 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3149 struct hci_cp_user_passkey_reply cp;
3150
3151 bacpy(&cp.bdaddr, &addr->bdaddr);
3152 cp.passkey = passkey;
3153 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3154 } else
3155 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3156 &addr->bdaddr);
3157
3158 if (err < 0)
3159 mgmt_pending_remove(cmd);
3160
3161 done:
3162 hci_dev_unlock(hdev);
3163 return err;
3164 }
3165
pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)3166 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3167 void *data, u16 len)
3168 {
3169 struct mgmt_cp_pin_code_neg_reply *cp = data;
3170
3171 bt_dev_dbg(hdev, "sock %p", sk);
3172
3173 return user_pairing_resp(sk, hdev, &cp->addr,
3174 MGMT_OP_PIN_CODE_NEG_REPLY,
3175 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3176 }
3177
user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)3178 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3179 u16 len)
3180 {
3181 struct mgmt_cp_user_confirm_reply *cp = data;
3182
3183 bt_dev_dbg(hdev, "sock %p", sk);
3184
3185 if (len != sizeof(*cp))
3186 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3187 MGMT_STATUS_INVALID_PARAMS);
3188
3189 return user_pairing_resp(sk, hdev, &cp->addr,
3190 MGMT_OP_USER_CONFIRM_REPLY,
3191 HCI_OP_USER_CONFIRM_REPLY, 0);
3192 }
3193
user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)3194 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3195 void *data, u16 len)
3196 {
3197 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3198
3199 bt_dev_dbg(hdev, "sock %p", sk);
3200
3201 return user_pairing_resp(sk, hdev, &cp->addr,
3202 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3203 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3204 }
3205
user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)3206 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3207 u16 len)
3208 {
3209 struct mgmt_cp_user_passkey_reply *cp = data;
3210
3211 bt_dev_dbg(hdev, "sock %p", sk);
3212
3213 return user_pairing_resp(sk, hdev, &cp->addr,
3214 MGMT_OP_USER_PASSKEY_REPLY,
3215 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3216 }
3217
user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)3218 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3219 void *data, u16 len)
3220 {
3221 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3222
3223 bt_dev_dbg(hdev, "sock %p", sk);
3224
3225 return user_pairing_resp(sk, hdev, &cp->addr,
3226 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3227 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3228 }
3229
adv_expire(struct hci_dev *hdev, u32 flags)3230 static void adv_expire(struct hci_dev *hdev, u32 flags)
3231 {
3232 struct adv_info *adv_instance;
3233 struct hci_request req;
3234 int err;
3235
3236 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3237 if (!adv_instance)
3238 return;
3239
3240 /* stop if current instance doesn't need to be changed */
3241 if (!(adv_instance->flags & flags))
3242 return;
3243
3244 cancel_adv_timeout(hdev);
3245
3246 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3247 if (!adv_instance)
3248 return;
3249
3250 hci_req_init(&req, hdev);
3251 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3252 true);
3253 if (err)
3254 return;
3255
3256 hci_req_run(&req, NULL);
3257 }
3258
set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)3259 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3260 {
3261 struct mgmt_cp_set_local_name *cp;
3262 struct mgmt_pending_cmd *cmd;
3263
3264 bt_dev_dbg(hdev, "status 0x%02x", status);
3265
3266 hci_dev_lock(hdev);
3267
3268 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3269 if (!cmd)
3270 goto unlock;
3271
3272 cp = cmd->param;
3273
3274 if (status) {
3275 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3276 mgmt_status(status));
3277 } else {
3278 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3279 cp, sizeof(*cp));
3280
3281 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3282 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3283 }
3284
3285 mgmt_pending_remove(cmd);
3286
3287 unlock:
3288 hci_dev_unlock(hdev);
3289 }
3290
set_local_name(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)3291 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3292 u16 len)
3293 {
3294 struct mgmt_cp_set_local_name *cp = data;
3295 struct mgmt_pending_cmd *cmd;
3296 struct hci_request req;
3297 int err;
3298
3299 bt_dev_dbg(hdev, "sock %p", sk);
3300
3301 hci_dev_lock(hdev);
3302
3303 /* If the old values are the same as the new ones just return a
3304 * direct command complete event.
3305 */
3306 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3307 !memcmp(hdev->short_name, cp->short_name,
3308 sizeof(hdev->short_name))) {
3309 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3310 data, len);
3311 goto failed;
3312 }
3313
3314 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3315
3316 if (!hdev_is_powered(hdev)) {
3317 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3318
3319 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3320 data, len);
3321 if (err < 0)
3322 goto failed;
3323
3324 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3325 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3326 ext_info_changed(hdev, sk);
3327
3328 goto failed;
3329 }
3330
3331 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3332 if (!cmd) {
3333 err = -ENOMEM;
3334 goto failed;
3335 }
3336
3337 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3338
3339 hci_req_init(&req, hdev);
3340
3341 if (lmp_bredr_capable(hdev)) {
3342 __hci_req_update_name(&req);
3343 __hci_req_update_eir(&req);
3344 }
3345
3346 /* The name is stored in the scan response data and so
3347 * no need to udpate the advertising data here.
3348 */
3349 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3350 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3351
3352 err = hci_req_run(&req, set_name_complete);
3353 if (err < 0)
3354 mgmt_pending_remove(cmd);
3355
3356 failed:
3357 hci_dev_unlock(hdev);
3358 return err;
3359 }
3360
set_appearance(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)3361 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3362 u16 len)
3363 {
3364 struct mgmt_cp_set_appearance *cp = data;
3365 u16 appearance;
3366 int err;
3367
3368 bt_dev_dbg(hdev, "sock %p", sk);
3369
3370 if (!lmp_le_capable(hdev))
3371 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3372 MGMT_STATUS_NOT_SUPPORTED);
3373
3374 appearance = le16_to_cpu(cp->appearance);
3375
3376 hci_dev_lock(hdev);
3377
3378 if (hdev->appearance != appearance) {
3379 hdev->appearance = appearance;
3380
3381 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3382 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3383
3384 ext_info_changed(hdev, sk);
3385 }
3386
3387 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3388 0);
3389
3390 hci_dev_unlock(hdev);
3391
3392 return err;
3393 }
3394
get_phy_configuration(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)3395 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3396 void *data, u16 len)
3397 {
3398 struct mgmt_rp_get_phy_confguration rp;
3399
3400 bt_dev_dbg(hdev, "sock %p", sk);
3401
3402 hci_dev_lock(hdev);
3403
3404 memset(&rp, 0, sizeof(rp));
3405
3406 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3407 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3408 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3409
3410 hci_dev_unlock(hdev);
3411
3412 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3413 &rp, sizeof(rp));
3414 }
3415
mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)3416 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3417 {
3418 struct mgmt_ev_phy_configuration_changed ev;
3419
3420 memset(&ev, 0, sizeof(ev));
3421
3422 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3423
3424 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3425 sizeof(ev), skip);
3426 }
3427
set_default_phy_complete(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb)3428 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3429 u16 opcode, struct sk_buff *skb)
3430 {
3431 struct mgmt_pending_cmd *cmd;
3432
3433 bt_dev_dbg(hdev, "status 0x%02x", status);
3434
3435 hci_dev_lock(hdev);
3436
3437 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3438 if (!cmd)
3439 goto unlock;
3440
3441 if (status) {
3442 mgmt_cmd_status(cmd->sk, hdev->id,
3443 MGMT_OP_SET_PHY_CONFIGURATION,
3444 mgmt_status(status));
3445 } else {
3446 mgmt_cmd_complete(cmd->sk, hdev->id,
3447 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3448 NULL, 0);
3449
3450 mgmt_phy_configuration_changed(hdev, cmd->sk);
3451 }
3452
3453 mgmt_pending_remove(cmd);
3454
3455 unlock:
3456 hci_dev_unlock(hdev);
3457 }
3458
set_phy_configuration(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)3459 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3460 void *data, u16 len)
3461 {
3462 struct mgmt_cp_set_phy_confguration *cp = data;
3463 struct hci_cp_le_set_default_phy cp_phy;
3464 struct mgmt_pending_cmd *cmd;
3465 struct hci_request req;
3466 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3467 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3468 bool changed = false;
3469 int err;
3470
3471 bt_dev_dbg(hdev, "sock %p", sk);
3472
3473 configurable_phys = get_configurable_phys(hdev);
3474 supported_phys = get_supported_phys(hdev);
3475 selected_phys = __le32_to_cpu(cp->selected_phys);
3476
3477 if (selected_phys & ~supported_phys)
3478 return mgmt_cmd_status(sk, hdev->id,
3479 MGMT_OP_SET_PHY_CONFIGURATION,
3480 MGMT_STATUS_INVALID_PARAMS);
3481
3482 unconfigure_phys = supported_phys & ~configurable_phys;
3483
3484 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3485 return mgmt_cmd_status(sk, hdev->id,
3486 MGMT_OP_SET_PHY_CONFIGURATION,
3487 MGMT_STATUS_INVALID_PARAMS);
3488
3489 if (selected_phys == get_selected_phys(hdev))
3490 return mgmt_cmd_complete(sk, hdev->id,
3491 MGMT_OP_SET_PHY_CONFIGURATION,
3492 0, NULL, 0);
3493
3494 hci_dev_lock(hdev);
3495
3496 if (!hdev_is_powered(hdev)) {
3497 err = mgmt_cmd_status(sk, hdev->id,
3498 MGMT_OP_SET_PHY_CONFIGURATION,
3499 MGMT_STATUS_REJECTED);
3500 goto unlock;
3501 }
3502
3503 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3504 err = mgmt_cmd_status(sk, hdev->id,
3505 MGMT_OP_SET_PHY_CONFIGURATION,
3506 MGMT_STATUS_BUSY);
3507 goto unlock;
3508 }
3509
3510 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3511 pkt_type |= (HCI_DH3 | HCI_DM3);
3512 else
3513 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3514
3515 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3516 pkt_type |= (HCI_DH5 | HCI_DM5);
3517 else
3518 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3519
3520 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3521 pkt_type &= ~HCI_2DH1;
3522 else
3523 pkt_type |= HCI_2DH1;
3524
3525 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3526 pkt_type &= ~HCI_2DH3;
3527 else
3528 pkt_type |= HCI_2DH3;
3529
3530 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3531 pkt_type &= ~HCI_2DH5;
3532 else
3533 pkt_type |= HCI_2DH5;
3534
3535 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3536 pkt_type &= ~HCI_3DH1;
3537 else
3538 pkt_type |= HCI_3DH1;
3539
3540 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3541 pkt_type &= ~HCI_3DH3;
3542 else
3543 pkt_type |= HCI_3DH3;
3544
3545 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3546 pkt_type &= ~HCI_3DH5;
3547 else
3548 pkt_type |= HCI_3DH5;
3549
3550 if (pkt_type != hdev->pkt_type) {
3551 hdev->pkt_type = pkt_type;
3552 changed = true;
3553 }
3554
3555 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3556 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3557 if (changed)
3558 mgmt_phy_configuration_changed(hdev, sk);
3559
3560 err = mgmt_cmd_complete(sk, hdev->id,
3561 MGMT_OP_SET_PHY_CONFIGURATION,
3562 0, NULL, 0);
3563
3564 goto unlock;
3565 }
3566
3567 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3568 len);
3569 if (!cmd) {
3570 err = -ENOMEM;
3571 goto unlock;
3572 }
3573
3574 hci_req_init(&req, hdev);
3575
3576 memset(&cp_phy, 0, sizeof(cp_phy));
3577
3578 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3579 cp_phy.all_phys |= 0x01;
3580
3581 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3582 cp_phy.all_phys |= 0x02;
3583
3584 if (selected_phys & MGMT_PHY_LE_1M_TX)
3585 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3586
3587 if (selected_phys & MGMT_PHY_LE_2M_TX)
3588 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3589
3590 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3591 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3592
3593 if (selected_phys & MGMT_PHY_LE_1M_RX)
3594 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3595
3596 if (selected_phys & MGMT_PHY_LE_2M_RX)
3597 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3598
3599 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3600 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3601
3602 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3603
3604 err = hci_req_run_skb(&req, set_default_phy_complete);
3605 if (err < 0)
3606 mgmt_pending_remove(cmd);
3607
3608 unlock:
3609 hci_dev_unlock(hdev);
3610
3611 return err;
3612 }
3613
set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)3614 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3615 u16 len)
3616 {
3617 int err = MGMT_STATUS_SUCCESS;
3618 struct mgmt_cp_set_blocked_keys *keys = data;
3619 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3620 sizeof(struct mgmt_blocked_key_info));
3621 u16 key_count, expected_len;
3622 int i;
3623
3624 bt_dev_dbg(hdev, "sock %p", sk);
3625
3626 key_count = __le16_to_cpu(keys->key_count);
3627 if (key_count > max_key_count) {
3628 bt_dev_err(hdev, "too big key_count value %u", key_count);
3629 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3630 MGMT_STATUS_INVALID_PARAMS);
3631 }
3632
3633 expected_len = struct_size(keys, keys, key_count);
3634 if (expected_len != len) {
3635 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3636 expected_len, len);
3637 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3638 MGMT_STATUS_INVALID_PARAMS);
3639 }
3640
3641 hci_dev_lock(hdev);
3642
3643 hci_blocked_keys_clear(hdev);
3644
3645 for (i = 0; i < keys->key_count; ++i) {
3646 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3647
3648 if (!b) {
3649 err = MGMT_STATUS_NO_RESOURCES;
3650 break;
3651 }
3652
3653 b->type = keys->keys[i].type;
3654 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3655 list_add_rcu(&b->list, &hdev->blocked_keys);
3656 }
3657 hci_dev_unlock(hdev);
3658
3659 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3660 err, NULL, 0);
3661 }
3662
set_wideband_speech(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)3663 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3664 void *data, u16 len)
3665 {
3666 struct mgmt_mode *cp = data;
3667 int err;
3668 bool changed = false;
3669
3670 bt_dev_dbg(hdev, "sock %p", sk);
3671
3672 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3673 return mgmt_cmd_status(sk, hdev->id,
3674 MGMT_OP_SET_WIDEBAND_SPEECH,
3675 MGMT_STATUS_NOT_SUPPORTED);
3676
3677 if (cp->val != 0x00 && cp->val != 0x01)
3678 return mgmt_cmd_status(sk, hdev->id,
3679 MGMT_OP_SET_WIDEBAND_SPEECH,
3680 MGMT_STATUS_INVALID_PARAMS);
3681
3682 hci_dev_lock(hdev);
3683
3684 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3685 err = mgmt_cmd_status(sk, hdev->id,
3686 MGMT_OP_SET_WIDEBAND_SPEECH,
3687 MGMT_STATUS_BUSY);
3688 goto unlock;
3689 }
3690
3691 if (hdev_is_powered(hdev) &&
3692 !!cp->val != hci_dev_test_flag(hdev,
3693 HCI_WIDEBAND_SPEECH_ENABLED)) {
3694 err = mgmt_cmd_status(sk, hdev->id,
3695 MGMT_OP_SET_WIDEBAND_SPEECH,
3696 MGMT_STATUS_REJECTED);
3697 goto unlock;
3698 }
3699
3700 if (cp->val)
3701 changed = !hci_dev_test_and_set_flag(hdev,
3702 HCI_WIDEBAND_SPEECH_ENABLED);
3703 else
3704 changed = hci_dev_test_and_clear_flag(hdev,
3705 HCI_WIDEBAND_SPEECH_ENABLED);
3706
3707 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3708 if (err < 0)
3709 goto unlock;
3710
3711 if (changed)
3712 err = new_settings(hdev, sk);
3713
3714 unlock:
3715 hci_dev_unlock(hdev);
3716 return err;
3717 }
3718
read_security_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len)3719 static int read_security_info(struct sock *sk, struct hci_dev *hdev,
3720 void *data, u16 data_len)
3721 {
3722 char buf[16];
3723 struct mgmt_rp_read_security_info *rp = (void *)buf;
3724 u16 sec_len = 0;
3725 u8 flags = 0;
3726
3727 bt_dev_dbg(hdev, "sock %p", sk);
3728
3729 memset(&buf, 0, sizeof(buf));
3730
3731 hci_dev_lock(hdev);
3732
3733 /* When the Read Simple Pairing Options command is supported, then
3734 * the remote public key validation is supported.
3735 */
3736 if (hdev->commands[41] & 0x08)
3737 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3738
3739 flags |= 0x02; /* Remote public key validation (LE) */
3740
3741 /* When the Read Encryption Key Size command is supported, then the
3742 * encryption key size is enforced.
3743 */
3744 if (hdev->commands[20] & 0x10)
3745 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3746
3747 flags |= 0x08; /* Encryption key size enforcement (LE) */
3748
3749 sec_len = eir_append_data(rp->sec, sec_len, 0x01, &flags, 1);
3750
3751 /* When the Read Simple Pairing Options command is supported, then
3752 * also max encryption key size information is provided.
3753 */
3754 if (hdev->commands[41] & 0x08)
3755 sec_len = eir_append_le16(rp->sec, sec_len, 0x02,
3756 hdev->max_enc_key_size);
3757
3758 sec_len = eir_append_le16(rp->sec, sec_len, 0x03, SMP_MAX_ENC_KEY_SIZE);
3759
3760 rp->sec_len = cpu_to_le16(sec_len);
3761
3762 hci_dev_unlock(hdev);
3763
3764 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_SECURITY_INFO, 0,
3765 rp, sizeof(*rp) + sec_len);
3766 }
3767
3768 #ifdef CONFIG_BT_FEATURE_DEBUG
3769 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3770 static const u8 debug_uuid[16] = {
3771 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3772 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3773 };
3774 #endif
3775
3776 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3777 static const u8 simult_central_periph_uuid[16] = {
3778 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3779 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3780 };
3781
3782 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3783 static const u8 rpa_resolution_uuid[16] = {
3784 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3785 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3786 };
3787
read_exp_features_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len)3788 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3789 void *data, u16 data_len)
3790 {
3791 char buf[62]; /* Enough space for 3 features */
3792 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3793 u16 idx = 0;
3794 u32 flags;
3795
3796 bt_dev_dbg(hdev, "sock %p", sk);
3797
3798 memset(&buf, 0, sizeof(buf));
3799
3800 #ifdef CONFIG_BT_FEATURE_DEBUG
3801 if (!hdev) {
3802 flags = bt_dbg_get() ? BIT(0) : 0;
3803
3804 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3805 rp->features[idx].flags = cpu_to_le32(flags);
3806 idx++;
3807 }
3808 #endif
3809
3810 if (hdev) {
3811 if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3812 (hdev->le_states[4] & 0x08) && /* Central */
3813 (hdev->le_states[4] & 0x40) && /* Peripheral */
3814 (hdev->le_states[3] & 0x10)) /* Simultaneous */
3815 flags = BIT(0);
3816 else
3817 flags = 0;
3818
3819 memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3820 rp->features[idx].flags = cpu_to_le32(flags);
3821 idx++;
3822 }
3823
3824 if (hdev && use_ll_privacy(hdev)) {
3825 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3826 flags = BIT(0) | BIT(1);
3827 else
3828 flags = BIT(1);
3829
3830 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3831 rp->features[idx].flags = cpu_to_le32(flags);
3832 idx++;
3833 }
3834
3835 rp->feature_count = cpu_to_le16(idx);
3836
3837 /* After reading the experimental features information, enable
3838 * the events to update client on any future change.
3839 */
3840 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3841
3842 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3843 MGMT_OP_READ_EXP_FEATURES_INFO,
3844 0, rp, sizeof(*rp) + (20 * idx));
3845 }
3846
exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev, struct sock *skip)3847 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3848 struct sock *skip)
3849 {
3850 struct mgmt_ev_exp_feature_changed ev;
3851
3852 memset(&ev, 0, sizeof(ev));
3853 memcpy(ev.uuid, rpa_resolution_uuid, 16);
3854 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3855
3856 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3857 &ev, sizeof(ev),
3858 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3859
3860 }
3861
3862 #ifdef CONFIG_BT_FEATURE_DEBUG
exp_debug_feature_changed(bool enabled, struct sock *skip)3863 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3864 {
3865 struct mgmt_ev_exp_feature_changed ev;
3866
3867 memset(&ev, 0, sizeof(ev));
3868 memcpy(ev.uuid, debug_uuid, 16);
3869 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3870
3871 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3872 &ev, sizeof(ev),
3873 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3874 }
3875 #endif
3876
set_exp_feature(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len)3877 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
3878 void *data, u16 data_len)
3879 {
3880 struct mgmt_cp_set_exp_feature *cp = data;
3881 struct mgmt_rp_set_exp_feature rp;
3882
3883 bt_dev_dbg(hdev, "sock %p", sk);
3884
3885 if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
3886 memset(rp.uuid, 0, 16);
3887 rp.flags = cpu_to_le32(0);
3888
3889 #ifdef CONFIG_BT_FEATURE_DEBUG
3890 if (!hdev) {
3891 bool changed = bt_dbg_get();
3892
3893 bt_dbg_set(false);
3894
3895 if (changed)
3896 exp_debug_feature_changed(false, sk);
3897 }
3898 #endif
3899
3900 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3901 bool changed = hci_dev_test_flag(hdev,
3902 HCI_ENABLE_LL_PRIVACY);
3903
3904 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3905
3906 if (changed)
3907 exp_ll_privacy_feature_changed(false, hdev, sk);
3908 }
3909
3910 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3911
3912 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3913 MGMT_OP_SET_EXP_FEATURE, 0,
3914 &rp, sizeof(rp));
3915 }
3916
3917 #ifdef CONFIG_BT_FEATURE_DEBUG
3918 if (!memcmp(cp->uuid, debug_uuid, 16)) {
3919 bool val, changed;
3920 int err;
3921
3922 /* Command requires to use the non-controller index */
3923 if (hdev)
3924 return mgmt_cmd_status(sk, hdev->id,
3925 MGMT_OP_SET_EXP_FEATURE,
3926 MGMT_STATUS_INVALID_INDEX);
3927
3928 /* Parameters are limited to a single octet */
3929 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3930 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3931 MGMT_OP_SET_EXP_FEATURE,
3932 MGMT_STATUS_INVALID_PARAMS);
3933
3934 /* Only boolean on/off is supported */
3935 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3936 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3937 MGMT_OP_SET_EXP_FEATURE,
3938 MGMT_STATUS_INVALID_PARAMS);
3939
3940 val = !!cp->param[0];
3941 changed = val ? !bt_dbg_get() : bt_dbg_get();
3942 bt_dbg_set(val);
3943
3944 memcpy(rp.uuid, debug_uuid, 16);
3945 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3946
3947 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3948
3949 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3950 MGMT_OP_SET_EXP_FEATURE, 0,
3951 &rp, sizeof(rp));
3952
3953 if (changed)
3954 exp_debug_feature_changed(val, sk);
3955
3956 return err;
3957 }
3958 #endif
3959
3960 if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) {
3961 bool val, changed;
3962 int err;
3963 u32 flags;
3964
3965 /* Command requires to use the controller index */
3966 if (!hdev)
3967 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3968 MGMT_OP_SET_EXP_FEATURE,
3969 MGMT_STATUS_INVALID_INDEX);
3970
3971 /* Changes can only be made when controller is powered down */
3972 if (hdev_is_powered(hdev))
3973 return mgmt_cmd_status(sk, hdev->id,
3974 MGMT_OP_SET_EXP_FEATURE,
3975 MGMT_STATUS_NOT_POWERED);
3976
3977 /* Parameters are limited to a single octet */
3978 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3979 return mgmt_cmd_status(sk, hdev->id,
3980 MGMT_OP_SET_EXP_FEATURE,
3981 MGMT_STATUS_INVALID_PARAMS);
3982
3983 /* Only boolean on/off is supported */
3984 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3985 return mgmt_cmd_status(sk, hdev->id,
3986 MGMT_OP_SET_EXP_FEATURE,
3987 MGMT_STATUS_INVALID_PARAMS);
3988
3989 val = !!cp->param[0];
3990
3991 if (val) {
3992 changed = !hci_dev_test_flag(hdev,
3993 HCI_ENABLE_LL_PRIVACY);
3994 hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3995 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
3996
3997 /* Enable LL privacy + supported settings changed */
3998 flags = BIT(0) | BIT(1);
3999 } else {
4000 changed = hci_dev_test_flag(hdev,
4001 HCI_ENABLE_LL_PRIVACY);
4002 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4003
4004 /* Disable LL privacy + supported settings changed */
4005 flags = BIT(1);
4006 }
4007
4008 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4009 rp.flags = cpu_to_le32(flags);
4010
4011 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4012
4013 err = mgmt_cmd_complete(sk, hdev->id,
4014 MGMT_OP_SET_EXP_FEATURE, 0,
4015 &rp, sizeof(rp));
4016
4017 if (changed)
4018 exp_ll_privacy_feature_changed(val, hdev, sk);
4019
4020 return err;
4021 }
4022
4023 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4024 MGMT_OP_SET_EXP_FEATURE,
4025 MGMT_STATUS_NOT_SUPPORTED);
4026 }
4027
4028 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4029
get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len)4030 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4031 u16 data_len)
4032 {
4033 struct mgmt_cp_get_device_flags *cp = data;
4034 struct mgmt_rp_get_device_flags rp;
4035 struct bdaddr_list_with_flags *br_params;
4036 struct hci_conn_params *params;
4037 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4038 u32 current_flags = 0;
4039 u8 status = MGMT_STATUS_INVALID_PARAMS;
4040
4041 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4042 &cp->addr.bdaddr, cp->addr.type);
4043
4044 hci_dev_lock(hdev);
4045
4046 memset(&rp, 0, sizeof(rp));
4047
4048 if (cp->addr.type == BDADDR_BREDR) {
4049 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4050 &cp->addr.bdaddr,
4051 cp->addr.type);
4052 if (!br_params)
4053 goto done;
4054
4055 current_flags = br_params->current_flags;
4056 } else {
4057 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4058 le_addr_type(cp->addr.type));
4059
4060 if (!params)
4061 goto done;
4062
4063 current_flags = params->current_flags;
4064 }
4065
4066 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4067 rp.addr.type = cp->addr.type;
4068 rp.supported_flags = cpu_to_le32(supported_flags);
4069 rp.current_flags = cpu_to_le32(current_flags);
4070
4071 status = MGMT_STATUS_SUCCESS;
4072
4073 done:
4074 hci_dev_unlock(hdev);
4075
4076 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4077 &rp, sizeof(rp));
4078 }
4079
device_flags_changed(struct sock *sk, struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type, u32 supported_flags, u32 current_flags)4080 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4081 bdaddr_t *bdaddr, u8 bdaddr_type,
4082 u32 supported_flags, u32 current_flags)
4083 {
4084 struct mgmt_ev_device_flags_changed ev;
4085
4086 bacpy(&ev.addr.bdaddr, bdaddr);
4087 ev.addr.type = bdaddr_type;
4088 ev.supported_flags = cpu_to_le32(supported_flags);
4089 ev.current_flags = cpu_to_le32(current_flags);
4090
4091 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4092 }
4093
set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)4094 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4095 u16 len)
4096 {
4097 struct mgmt_cp_set_device_flags *cp = data;
4098 struct bdaddr_list_with_flags *br_params;
4099 struct hci_conn_params *params;
4100 u8 status = MGMT_STATUS_INVALID_PARAMS;
4101 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4102 u32 current_flags = __le32_to_cpu(cp->current_flags);
4103
4104 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4105 &cp->addr.bdaddr, cp->addr.type,
4106 __le32_to_cpu(current_flags));
4107
4108 if ((supported_flags | current_flags) != supported_flags) {
4109 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4110 current_flags, supported_flags);
4111 goto done;
4112 }
4113
4114 hci_dev_lock(hdev);
4115
4116 if (cp->addr.type == BDADDR_BREDR) {
4117 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4118 &cp->addr.bdaddr,
4119 cp->addr.type);
4120
4121 if (br_params) {
4122 br_params->current_flags = current_flags;
4123 status = MGMT_STATUS_SUCCESS;
4124 } else {
4125 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4126 &cp->addr.bdaddr, cp->addr.type);
4127 }
4128 } else {
4129 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4130 le_addr_type(cp->addr.type));
4131 if (params) {
4132 params->current_flags = current_flags;
4133 status = MGMT_STATUS_SUCCESS;
4134 } else {
4135 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4136 &cp->addr.bdaddr,
4137 le_addr_type(cp->addr.type));
4138 }
4139 }
4140
4141 done:
4142 hci_dev_unlock(hdev);
4143
4144 if (status == MGMT_STATUS_SUCCESS)
4145 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4146 supported_flags, current_flags);
4147
4148 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4149 &cp->addr, sizeof(cp->addr));
4150 }
4151
mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev, u16 handle)4152 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4153 u16 handle)
4154 {
4155 struct mgmt_ev_adv_monitor_added ev;
4156
4157 ev.monitor_handle = cpu_to_le16(handle);
4158
4159 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4160 }
4161
mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev, u16 handle)4162 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
4163 u16 handle)
4164 {
4165 struct mgmt_ev_adv_monitor_added ev;
4166
4167 ev.monitor_handle = cpu_to_le16(handle);
4168
4169 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
4170 }
4171
read_adv_mon_features(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)4172 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4173 void *data, u16 len)
4174 {
4175 struct adv_monitor *monitor = NULL;
4176 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4177 int handle, err;
4178 size_t rp_size = 0;
4179 __u32 supported = 0;
4180 __u16 num_handles = 0;
4181 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4182
4183 BT_DBG("request for %s", hdev->name);
4184
4185 hci_dev_lock(hdev);
4186
4187 if (msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR)
4188 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4189
4190 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) {
4191 handles[num_handles++] = monitor->handle;
4192 }
4193
4194 hci_dev_unlock(hdev);
4195
4196 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4197 rp = kmalloc(rp_size, GFP_KERNEL);
4198 if (!rp)
4199 return -ENOMEM;
4200
4201 /* Once controller-based monitoring is in place, the enabled_features
4202 * should reflect the use.
4203 */
4204 rp->supported_features = cpu_to_le32(supported);
4205 rp->enabled_features = 0;
4206 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4207 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4208 rp->num_handles = cpu_to_le16(num_handles);
4209 if (num_handles)
4210 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4211
4212 err = mgmt_cmd_complete(sk, hdev->id,
4213 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4214 MGMT_STATUS_SUCCESS, rp, rp_size);
4215
4216 kfree(rp);
4217
4218 return err;
4219 }
4220
add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)4221 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4222 void *data, u16 len)
4223 {
4224 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4225 struct mgmt_rp_add_adv_patterns_monitor rp;
4226 struct adv_monitor *m = NULL;
4227 struct adv_pattern *p = NULL;
4228 unsigned int mp_cnt = 0, prev_adv_monitors_cnt;
4229 __u8 cp_ofst = 0, cp_len = 0;
4230 int err, i;
4231
4232 BT_DBG("request for %s", hdev->name);
4233
4234 if (len <= sizeof(*cp) || cp->pattern_count == 0) {
4235 err = mgmt_cmd_status(sk, hdev->id,
4236 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4237 MGMT_STATUS_INVALID_PARAMS);
4238 goto failed;
4239 }
4240
4241 m = kmalloc(sizeof(*m), GFP_KERNEL);
4242 if (!m) {
4243 err = -ENOMEM;
4244 goto failed;
4245 }
4246
4247 INIT_LIST_HEAD(&m->patterns);
4248 m->active = false;
4249
4250 for (i = 0; i < cp->pattern_count; i++) {
4251 if (++mp_cnt > HCI_MAX_ADV_MONITOR_NUM_PATTERNS) {
4252 err = mgmt_cmd_status(sk, hdev->id,
4253 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4254 MGMT_STATUS_INVALID_PARAMS);
4255 goto failed;
4256 }
4257
4258 cp_ofst = cp->patterns[i].offset;
4259 cp_len = cp->patterns[i].length;
4260 if (cp_ofst >= HCI_MAX_AD_LENGTH ||
4261 cp_len > HCI_MAX_AD_LENGTH ||
4262 (cp_ofst + cp_len) > HCI_MAX_AD_LENGTH) {
4263 err = mgmt_cmd_status(sk, hdev->id,
4264 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4265 MGMT_STATUS_INVALID_PARAMS);
4266 goto failed;
4267 }
4268
4269 p = kmalloc(sizeof(*p), GFP_KERNEL);
4270 if (!p) {
4271 err = -ENOMEM;
4272 goto failed;
4273 }
4274
4275 p->ad_type = cp->patterns[i].ad_type;
4276 p->offset = cp->patterns[i].offset;
4277 p->length = cp->patterns[i].length;
4278 memcpy(p->value, cp->patterns[i].value, p->length);
4279
4280 INIT_LIST_HEAD(&p->list);
4281 list_add(&p->list, &m->patterns);
4282 }
4283
4284 if (mp_cnt != cp->pattern_count) {
4285 err = mgmt_cmd_status(sk, hdev->id,
4286 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4287 MGMT_STATUS_INVALID_PARAMS);
4288 goto failed;
4289 }
4290
4291 hci_dev_lock(hdev);
4292
4293 prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4294
4295 err = hci_add_adv_monitor(hdev, m);
4296 if (err) {
4297 if (err == -ENOSPC) {
4298 mgmt_cmd_status(sk, hdev->id,
4299 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4300 MGMT_STATUS_NO_RESOURCES);
4301 }
4302 goto unlock;
4303 }
4304
4305 if (hdev->adv_monitors_cnt > prev_adv_monitors_cnt)
4306 mgmt_adv_monitor_added(sk, hdev, m->handle);
4307
4308 hci_dev_unlock(hdev);
4309
4310 rp.monitor_handle = cpu_to_le16(m->handle);
4311
4312 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4313 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4314
4315 unlock:
4316 hci_dev_unlock(hdev);
4317
4318 failed:
4319 hci_free_adv_monitor(m);
4320 return err;
4321 }
4322
remove_adv_monitor(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)4323 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4324 void *data, u16 len)
4325 {
4326 struct mgmt_cp_remove_adv_monitor *cp = data;
4327 struct mgmt_rp_remove_adv_monitor rp;
4328 unsigned int prev_adv_monitors_cnt;
4329 u16 handle;
4330 int err;
4331
4332 BT_DBG("request for %s", hdev->name);
4333
4334 hci_dev_lock(hdev);
4335
4336 handle = __le16_to_cpu(cp->monitor_handle);
4337 prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4338
4339 err = hci_remove_adv_monitor(hdev, handle);
4340 if (err == -ENOENT) {
4341 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4342 MGMT_STATUS_INVALID_INDEX);
4343 goto unlock;
4344 }
4345
4346 if (hdev->adv_monitors_cnt < prev_adv_monitors_cnt)
4347 mgmt_adv_monitor_removed(sk, hdev, handle);
4348
4349 hci_dev_unlock(hdev);
4350
4351 rp.monitor_handle = cp->monitor_handle;
4352
4353 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4354 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4355
4356 unlock:
4357 hci_dev_unlock(hdev);
4358 return err;
4359 }
4360
read_local_oob_data_complete(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb)4361 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4362 u16 opcode, struct sk_buff *skb)
4363 {
4364 struct mgmt_rp_read_local_oob_data mgmt_rp;
4365 size_t rp_size = sizeof(mgmt_rp);
4366 struct mgmt_pending_cmd *cmd;
4367
4368 bt_dev_dbg(hdev, "status %u", status);
4369
4370 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4371 if (!cmd)
4372 return;
4373
4374 if (status || !skb) {
4375 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4376 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4377 goto remove;
4378 }
4379
4380 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4381
4382 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4383 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4384
4385 if (skb->len < sizeof(*rp)) {
4386 mgmt_cmd_status(cmd->sk, hdev->id,
4387 MGMT_OP_READ_LOCAL_OOB_DATA,
4388 MGMT_STATUS_FAILED);
4389 goto remove;
4390 }
4391
4392 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4393 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4394
4395 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4396 } else {
4397 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4398
4399 if (skb->len < sizeof(*rp)) {
4400 mgmt_cmd_status(cmd->sk, hdev->id,
4401 MGMT_OP_READ_LOCAL_OOB_DATA,
4402 MGMT_STATUS_FAILED);
4403 goto remove;
4404 }
4405
4406 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4407 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4408
4409 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4410 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4411 }
4412
4413 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4414 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4415
4416 remove:
4417 mgmt_pending_remove(cmd);
4418 }
4419
read_local_oob_data(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len)4420 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4421 void *data, u16 data_len)
4422 {
4423 struct mgmt_pending_cmd *cmd;
4424 struct hci_request req;
4425 int err;
4426
4427 bt_dev_dbg(hdev, "sock %p", sk);
4428
4429 hci_dev_lock(hdev);
4430
4431 if (!hdev_is_powered(hdev)) {
4432 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4433 MGMT_STATUS_NOT_POWERED);
4434 goto unlock;
4435 }
4436
4437 if (!lmp_ssp_capable(hdev)) {
4438 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4439 MGMT_STATUS_NOT_SUPPORTED);
4440 goto unlock;
4441 }
4442
4443 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4444 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4445 MGMT_STATUS_BUSY);
4446 goto unlock;
4447 }
4448
4449 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4450 if (!cmd) {
4451 err = -ENOMEM;
4452 goto unlock;
4453 }
4454
4455 hci_req_init(&req, hdev);
4456
4457 if (bredr_sc_enabled(hdev))
4458 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4459 else
4460 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4461
4462 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4463 if (err < 0)
4464 mgmt_pending_remove(cmd);
4465
4466 unlock:
4467 hci_dev_unlock(hdev);
4468 return err;
4469 }
4470
add_remote_oob_data(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)4471 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4472 void *data, u16 len)
4473 {
4474 struct mgmt_addr_info *addr = data;
4475 int err;
4476
4477 bt_dev_dbg(hdev, "sock %p", sk);
4478
4479 if (!bdaddr_type_is_valid(addr->type))
4480 return mgmt_cmd_complete(sk, hdev->id,
4481 MGMT_OP_ADD_REMOTE_OOB_DATA,
4482 MGMT_STATUS_INVALID_PARAMS,
4483 addr, sizeof(*addr));
4484
4485 hci_dev_lock(hdev);
4486
4487 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4488 struct mgmt_cp_add_remote_oob_data *cp = data;
4489 u8 status;
4490
4491 if (cp->addr.type != BDADDR_BREDR) {
4492 err = mgmt_cmd_complete(sk, hdev->id,
4493 MGMT_OP_ADD_REMOTE_OOB_DATA,
4494 MGMT_STATUS_INVALID_PARAMS,
4495 &cp->addr, sizeof(cp->addr));
4496 goto unlock;
4497 }
4498
4499 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4500 cp->addr.type, cp->hash,
4501 cp->rand, NULL, NULL);
4502 if (err < 0)
4503 status = MGMT_STATUS_FAILED;
4504 else
4505 status = MGMT_STATUS_SUCCESS;
4506
4507 err = mgmt_cmd_complete(sk, hdev->id,
4508 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4509 &cp->addr, sizeof(cp->addr));
4510 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4511 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4512 u8 *rand192, *hash192, *rand256, *hash256;
4513 u8 status;
4514
4515 if (bdaddr_type_is_le(cp->addr.type)) {
4516 /* Enforce zero-valued 192-bit parameters as
4517 * long as legacy SMP OOB isn't implemented.
4518 */
4519 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4520 memcmp(cp->hash192, ZERO_KEY, 16)) {
4521 err = mgmt_cmd_complete(sk, hdev->id,
4522 MGMT_OP_ADD_REMOTE_OOB_DATA,
4523 MGMT_STATUS_INVALID_PARAMS,
4524 addr, sizeof(*addr));
4525 goto unlock;
4526 }
4527
4528 rand192 = NULL;
4529 hash192 = NULL;
4530 } else {
4531 /* In case one of the P-192 values is set to zero,
4532 * then just disable OOB data for P-192.
4533 */
4534 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4535 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4536 rand192 = NULL;
4537 hash192 = NULL;
4538 } else {
4539 rand192 = cp->rand192;
4540 hash192 = cp->hash192;
4541 }
4542 }
4543
4544 /* In case one of the P-256 values is set to zero, then just
4545 * disable OOB data for P-256.
4546 */
4547 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4548 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4549 rand256 = NULL;
4550 hash256 = NULL;
4551 } else {
4552 rand256 = cp->rand256;
4553 hash256 = cp->hash256;
4554 }
4555
4556 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4557 cp->addr.type, hash192, rand192,
4558 hash256, rand256);
4559 if (err < 0)
4560 status = MGMT_STATUS_FAILED;
4561 else
4562 status = MGMT_STATUS_SUCCESS;
4563
4564 err = mgmt_cmd_complete(sk, hdev->id,
4565 MGMT_OP_ADD_REMOTE_OOB_DATA,
4566 status, &cp->addr, sizeof(cp->addr));
4567 } else {
4568 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4569 len);
4570 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4571 MGMT_STATUS_INVALID_PARAMS);
4572 }
4573
4574 unlock:
4575 hci_dev_unlock(hdev);
4576 return err;
4577 }
4578
remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)4579 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4580 void *data, u16 len)
4581 {
4582 struct mgmt_cp_remove_remote_oob_data *cp = data;
4583 u8 status;
4584 int err;
4585
4586 bt_dev_dbg(hdev, "sock %p", sk);
4587
4588 if (cp->addr.type != BDADDR_BREDR)
4589 return mgmt_cmd_complete(sk, hdev->id,
4590 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4591 MGMT_STATUS_INVALID_PARAMS,
4592 &cp->addr, sizeof(cp->addr));
4593
4594 hci_dev_lock(hdev);
4595
4596 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4597 hci_remote_oob_data_clear(hdev);
4598 status = MGMT_STATUS_SUCCESS;
4599 goto done;
4600 }
4601
4602 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4603 if (err < 0)
4604 status = MGMT_STATUS_INVALID_PARAMS;
4605 else
4606 status = MGMT_STATUS_SUCCESS;
4607
4608 done:
4609 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4610 status, &cp->addr, sizeof(cp->addr));
4611
4612 hci_dev_unlock(hdev);
4613 return err;
4614 }
4615
mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)4616 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4617 {
4618 struct mgmt_pending_cmd *cmd;
4619
4620 bt_dev_dbg(hdev, "status %d", status);
4621
4622 hci_dev_lock(hdev);
4623
4624 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4625 if (!cmd)
4626 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4627
4628 if (!cmd)
4629 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4630
4631 if (cmd) {
4632 cmd->cmd_complete(cmd, mgmt_status(status));
4633 mgmt_pending_remove(cmd);
4634 }
4635
4636 hci_dev_unlock(hdev);
4637
4638 /* Handle suspend notifier */
4639 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4640 hdev->suspend_tasks)) {
4641 bt_dev_dbg(hdev, "Unpaused discovery");
4642 wake_up(&hdev->suspend_wait_q);
4643 }
4644 }
4645
discovery_type_is_valid(struct hci_dev *hdev, uint8_t type, uint8_t *mgmt_status)4646 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4647 uint8_t *mgmt_status)
4648 {
4649 switch (type) {
4650 case DISCOV_TYPE_LE:
4651 *mgmt_status = mgmt_le_support(hdev);
4652 if (*mgmt_status)
4653 return false;
4654 break;
4655 case DISCOV_TYPE_INTERLEAVED:
4656 *mgmt_status = mgmt_le_support(hdev);
4657 if (*mgmt_status)
4658 return false;
4659 fallthrough;
4660 case DISCOV_TYPE_BREDR:
4661 *mgmt_status = mgmt_bredr_support(hdev);
4662 if (*mgmt_status)
4663 return false;
4664 break;
4665 default:
4666 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4667 return false;
4668 }
4669
4670 return true;
4671 }
4672
start_discovery_internal(struct sock *sk, struct hci_dev *hdev, u16 op, void *data, u16 len)4673 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4674 u16 op, void *data, u16 len)
4675 {
4676 struct mgmt_cp_start_discovery *cp = data;
4677 struct mgmt_pending_cmd *cmd;
4678 u8 status;
4679 int err;
4680
4681 bt_dev_dbg(hdev, "sock %p", sk);
4682
4683 hci_dev_lock(hdev);
4684
4685 if (!hdev_is_powered(hdev)) {
4686 err = mgmt_cmd_complete(sk, hdev->id, op,
4687 MGMT_STATUS_NOT_POWERED,
4688 &cp->type, sizeof(cp->type));
4689 goto failed;
4690 }
4691
4692 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4693 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4694 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4695 &cp->type, sizeof(cp->type));
4696 goto failed;
4697 }
4698
4699 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4700 err = mgmt_cmd_complete(sk, hdev->id, op, status,
4701 &cp->type, sizeof(cp->type));
4702 goto failed;
4703 }
4704
4705 /* Can't start discovery when it is paused */
4706 if (hdev->discovery_paused) {
4707 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4708 &cp->type, sizeof(cp->type));
4709 goto failed;
4710 }
4711
4712 /* Clear the discovery filter first to free any previously
4713 * allocated memory for the UUID list.
4714 */
4715 hci_discovery_filter_clear(hdev);
4716
4717 hdev->discovery.type = cp->type;
4718 hdev->discovery.report_invalid_rssi = false;
4719 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4720 hdev->discovery.limited = true;
4721 else
4722 hdev->discovery.limited = false;
4723
4724 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4725 if (!cmd) {
4726 err = -ENOMEM;
4727 goto failed;
4728 }
4729
4730 cmd->cmd_complete = generic_cmd_complete;
4731
4732 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4733 queue_work(hdev->req_workqueue, &hdev->discov_update);
4734 err = 0;
4735
4736 failed:
4737 hci_dev_unlock(hdev);
4738 return err;
4739 }
4740
start_discovery(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)4741 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4742 void *data, u16 len)
4743 {
4744 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4745 data, len);
4746 }
4747
start_limited_discovery(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)4748 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4749 void *data, u16 len)
4750 {
4751 return start_discovery_internal(sk, hdev,
4752 MGMT_OP_START_LIMITED_DISCOVERY,
4753 data, len);
4754 }
4755
service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)4756 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4757 u8 status)
4758 {
4759 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4760 cmd->param, 1);
4761 }
4762
start_service_discovery(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)4763 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4764 void *data, u16 len)
4765 {
4766 struct mgmt_cp_start_service_discovery *cp = data;
4767 struct mgmt_pending_cmd *cmd;
4768 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4769 u16 uuid_count, expected_len;
4770 u8 status;
4771 int err;
4772
4773 bt_dev_dbg(hdev, "sock %p", sk);
4774
4775 hci_dev_lock(hdev);
4776
4777 if (!hdev_is_powered(hdev)) {
4778 err = mgmt_cmd_complete(sk, hdev->id,
4779 MGMT_OP_START_SERVICE_DISCOVERY,
4780 MGMT_STATUS_NOT_POWERED,
4781 &cp->type, sizeof(cp->type));
4782 goto failed;
4783 }
4784
4785 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4786 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4787 err = mgmt_cmd_complete(sk, hdev->id,
4788 MGMT_OP_START_SERVICE_DISCOVERY,
4789 MGMT_STATUS_BUSY, &cp->type,
4790 sizeof(cp->type));
4791 goto failed;
4792 }
4793
4794 uuid_count = __le16_to_cpu(cp->uuid_count);
4795 if (uuid_count > max_uuid_count) {
4796 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
4797 uuid_count);
4798 err = mgmt_cmd_complete(sk, hdev->id,
4799 MGMT_OP_START_SERVICE_DISCOVERY,
4800 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4801 sizeof(cp->type));
4802 goto failed;
4803 }
4804
4805 expected_len = sizeof(*cp) + uuid_count * 16;
4806 if (expected_len != len) {
4807 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
4808 expected_len, len);
4809 err = mgmt_cmd_complete(sk, hdev->id,
4810 MGMT_OP_START_SERVICE_DISCOVERY,
4811 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4812 sizeof(cp->type));
4813 goto failed;
4814 }
4815
4816 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4817 err = mgmt_cmd_complete(sk, hdev->id,
4818 MGMT_OP_START_SERVICE_DISCOVERY,
4819 status, &cp->type, sizeof(cp->type));
4820 goto failed;
4821 }
4822
4823 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4824 hdev, data, len);
4825 if (!cmd) {
4826 err = -ENOMEM;
4827 goto failed;
4828 }
4829
4830 cmd->cmd_complete = service_discovery_cmd_complete;
4831
4832 /* Clear the discovery filter first to free any previously
4833 * allocated memory for the UUID list.
4834 */
4835 hci_discovery_filter_clear(hdev);
4836
4837 hdev->discovery.result_filtering = true;
4838 hdev->discovery.type = cp->type;
4839 hdev->discovery.rssi = cp->rssi;
4840 hdev->discovery.uuid_count = uuid_count;
4841
4842 if (uuid_count > 0) {
4843 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4844 GFP_KERNEL);
4845 if (!hdev->discovery.uuids) {
4846 err = mgmt_cmd_complete(sk, hdev->id,
4847 MGMT_OP_START_SERVICE_DISCOVERY,
4848 MGMT_STATUS_FAILED,
4849 &cp->type, sizeof(cp->type));
4850 mgmt_pending_remove(cmd);
4851 goto failed;
4852 }
4853 }
4854
4855 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4856 queue_work(hdev->req_workqueue, &hdev->discov_update);
4857 err = 0;
4858
4859 failed:
4860 hci_dev_unlock(hdev);
4861 return err;
4862 }
4863
mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)4864 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4865 {
4866 struct mgmt_pending_cmd *cmd;
4867
4868 bt_dev_dbg(hdev, "status %d", status);
4869
4870 hci_dev_lock(hdev);
4871
4872 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4873 if (cmd) {
4874 cmd->cmd_complete(cmd, mgmt_status(status));
4875 mgmt_pending_remove(cmd);
4876 }
4877
4878 hci_dev_unlock(hdev);
4879
4880 /* Handle suspend notifier */
4881 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
4882 bt_dev_dbg(hdev, "Paused discovery");
4883 wake_up(&hdev->suspend_wait_q);
4884 }
4885 }
4886
stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)4887 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4888 u16 len)
4889 {
4890 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4891 struct mgmt_pending_cmd *cmd;
4892 int err;
4893
4894 bt_dev_dbg(hdev, "sock %p", sk);
4895
4896 hci_dev_lock(hdev);
4897
4898 if (!hci_discovery_active(hdev)) {
4899 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4900 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4901 sizeof(mgmt_cp->type));
4902 goto unlock;
4903 }
4904
4905 if (hdev->discovery.type != mgmt_cp->type) {
4906 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4907 MGMT_STATUS_INVALID_PARAMS,
4908 &mgmt_cp->type, sizeof(mgmt_cp->type));
4909 goto unlock;
4910 }
4911
4912 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4913 if (!cmd) {
4914 err = -ENOMEM;
4915 goto unlock;
4916 }
4917
4918 cmd->cmd_complete = generic_cmd_complete;
4919
4920 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4921 queue_work(hdev->req_workqueue, &hdev->discov_update);
4922 err = 0;
4923
4924 unlock:
4925 hci_dev_unlock(hdev);
4926 return err;
4927 }
4928
confirm_name(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)4929 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4930 u16 len)
4931 {
4932 struct mgmt_cp_confirm_name *cp = data;
4933 struct inquiry_entry *e;
4934 int err;
4935
4936 bt_dev_dbg(hdev, "sock %p", sk);
4937
4938 hci_dev_lock(hdev);
4939
4940 if (!hci_discovery_active(hdev)) {
4941 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4942 MGMT_STATUS_FAILED, &cp->addr,
4943 sizeof(cp->addr));
4944 goto failed;
4945 }
4946
4947 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4948 if (!e) {
4949 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4950 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4951 sizeof(cp->addr));
4952 goto failed;
4953 }
4954
4955 if (cp->name_known) {
4956 e->name_state = NAME_KNOWN;
4957 list_del(&e->list);
4958 } else {
4959 e->name_state = NAME_NEEDED;
4960 hci_inquiry_cache_update_resolve(hdev, e);
4961 }
4962
4963 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4964 &cp->addr, sizeof(cp->addr));
4965
4966 failed:
4967 hci_dev_unlock(hdev);
4968 return err;
4969 }
4970
block_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)4971 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4972 u16 len)
4973 {
4974 struct mgmt_cp_block_device *cp = data;
4975 u8 status;
4976 int err;
4977
4978 bt_dev_dbg(hdev, "sock %p", sk);
4979
4980 if (!bdaddr_type_is_valid(cp->addr.type))
4981 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4982 MGMT_STATUS_INVALID_PARAMS,
4983 &cp->addr, sizeof(cp->addr));
4984
4985 hci_dev_lock(hdev);
4986
4987 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
4988 cp->addr.type);
4989 if (err < 0) {
4990 status = MGMT_STATUS_FAILED;
4991 goto done;
4992 }
4993
4994 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4995 sk);
4996 status = MGMT_STATUS_SUCCESS;
4997
4998 done:
4999 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5000 &cp->addr, sizeof(cp->addr));
5001
5002 hci_dev_unlock(hdev);
5003
5004 return err;
5005 }
5006
unblock_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)5007 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5008 u16 len)
5009 {
5010 struct mgmt_cp_unblock_device *cp = data;
5011 u8 status;
5012 int err;
5013
5014 bt_dev_dbg(hdev, "sock %p", sk);
5015
5016 if (!bdaddr_type_is_valid(cp->addr.type))
5017 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5018 MGMT_STATUS_INVALID_PARAMS,
5019 &cp->addr, sizeof(cp->addr));
5020
5021 hci_dev_lock(hdev);
5022
5023 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5024 cp->addr.type);
5025 if (err < 0) {
5026 status = MGMT_STATUS_INVALID_PARAMS;
5027 goto done;
5028 }
5029
5030 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5031 sk);
5032 status = MGMT_STATUS_SUCCESS;
5033
5034 done:
5035 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5036 &cp->addr, sizeof(cp->addr));
5037
5038 hci_dev_unlock(hdev);
5039
5040 return err;
5041 }
5042
set_device_id(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)5043 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5044 u16 len)
5045 {
5046 struct mgmt_cp_set_device_id *cp = data;
5047 struct hci_request req;
5048 int err;
5049 __u16 source;
5050
5051 bt_dev_dbg(hdev, "sock %p", sk);
5052
5053 source = __le16_to_cpu(cp->source);
5054
5055 if (source > 0x0002)
5056 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5057 MGMT_STATUS_INVALID_PARAMS);
5058
5059 hci_dev_lock(hdev);
5060
5061 hdev->devid_source = source;
5062 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5063 hdev->devid_product = __le16_to_cpu(cp->product);
5064 hdev->devid_version = __le16_to_cpu(cp->version);
5065
5066 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5067 NULL, 0);
5068
5069 hci_req_init(&req, hdev);
5070 __hci_req_update_eir(&req);
5071 hci_req_run(&req, NULL);
5072
5073 hci_dev_unlock(hdev);
5074
5075 return err;
5076 }
5077
enable_advertising_instance(struct hci_dev *hdev, u8 status, u16 opcode)5078 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5079 u16 opcode)
5080 {
5081 bt_dev_dbg(hdev, "status %d", status);
5082 }
5083
set_advertising_complete(struct hci_dev *hdev, u8 status, u16 opcode)5084 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5085 u16 opcode)
5086 {
5087 struct cmd_lookup match = { NULL, hdev };
5088 struct hci_request req;
5089 u8 instance;
5090 struct adv_info *adv_instance;
5091 int err;
5092
5093 hci_dev_lock(hdev);
5094
5095 if (status) {
5096 u8 mgmt_err = mgmt_status(status);
5097
5098 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5099 cmd_status_rsp, &mgmt_err);
5100 goto unlock;
5101 }
5102
5103 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5104 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5105 else
5106 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5107
5108 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5109 &match);
5110
5111 new_settings(hdev, match.sk);
5112
5113 if (match.sk)
5114 sock_put(match.sk);
5115
5116 /* Handle suspend notifier */
5117 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5118 hdev->suspend_tasks)) {
5119 bt_dev_dbg(hdev, "Paused advertising");
5120 wake_up(&hdev->suspend_wait_q);
5121 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5122 hdev->suspend_tasks)) {
5123 bt_dev_dbg(hdev, "Unpaused advertising");
5124 wake_up(&hdev->suspend_wait_q);
5125 }
5126
5127 /* If "Set Advertising" was just disabled and instance advertising was
5128 * set up earlier, then re-enable multi-instance advertising.
5129 */
5130 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5131 list_empty(&hdev->adv_instances))
5132 goto unlock;
5133
5134 instance = hdev->cur_adv_instance;
5135 if (!instance) {
5136 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5137 struct adv_info, list);
5138 if (!adv_instance)
5139 goto unlock;
5140
5141 instance = adv_instance->instance;
5142 }
5143
5144 hci_req_init(&req, hdev);
5145
5146 err = __hci_req_schedule_adv_instance(&req, instance, true);
5147
5148 if (!err)
5149 err = hci_req_run(&req, enable_advertising_instance);
5150
5151 if (err)
5152 bt_dev_err(hdev, "failed to re-configure advertising");
5153
5154 unlock:
5155 hci_dev_unlock(hdev);
5156 }
5157
set_advertising(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)5158 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5159 u16 len)
5160 {
5161 struct mgmt_mode *cp = data;
5162 struct mgmt_pending_cmd *cmd;
5163 struct hci_request req;
5164 u8 val, status;
5165 int err;
5166
5167 bt_dev_dbg(hdev, "sock %p", sk);
5168
5169 status = mgmt_le_support(hdev);
5170 if (status)
5171 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5172 status);
5173
5174 /* Enabling the experimental LL Privay support disables support for
5175 * advertising.
5176 */
5177 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5178 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5179 MGMT_STATUS_NOT_SUPPORTED);
5180
5181 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5182 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5183 MGMT_STATUS_INVALID_PARAMS);
5184
5185 if (hdev->advertising_paused)
5186 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5187 MGMT_STATUS_BUSY);
5188
5189 hci_dev_lock(hdev);
5190
5191 val = !!cp->val;
5192
5193 /* The following conditions are ones which mean that we should
5194 * not do any HCI communication but directly send a mgmt
5195 * response to user space (after toggling the flag if
5196 * necessary).
5197 */
5198 if (!hdev_is_powered(hdev) ||
5199 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5200 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5201 hci_conn_num(hdev, LE_LINK) > 0 ||
5202 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5203 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5204 bool changed;
5205
5206 if (cp->val) {
5207 hdev->cur_adv_instance = 0x00;
5208 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5209 if (cp->val == 0x02)
5210 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5211 else
5212 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5213 } else {
5214 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5215 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5216 }
5217
5218 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5219 if (err < 0)
5220 goto unlock;
5221
5222 if (changed)
5223 err = new_settings(hdev, sk);
5224
5225 goto unlock;
5226 }
5227
5228 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5229 pending_find(MGMT_OP_SET_LE, hdev)) {
5230 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5231 MGMT_STATUS_BUSY);
5232 goto unlock;
5233 }
5234
5235 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5236 if (!cmd) {
5237 err = -ENOMEM;
5238 goto unlock;
5239 }
5240
5241 hci_req_init(&req, hdev);
5242
5243 if (cp->val == 0x02)
5244 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5245 else
5246 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5247
5248 cancel_adv_timeout(hdev);
5249
5250 if (val) {
5251 /* Switch to instance "0" for the Set Advertising setting.
5252 * We cannot use update_[adv|scan_rsp]_data() here as the
5253 * HCI_ADVERTISING flag is not yet set.
5254 */
5255 hdev->cur_adv_instance = 0x00;
5256
5257 if (ext_adv_capable(hdev)) {
5258 __hci_req_start_ext_adv(&req, 0x00);
5259 } else {
5260 __hci_req_update_adv_data(&req, 0x00);
5261 __hci_req_update_scan_rsp_data(&req, 0x00);
5262 __hci_req_enable_advertising(&req);
5263 }
5264 } else {
5265 __hci_req_disable_advertising(&req);
5266 }
5267
5268 err = hci_req_run(&req, set_advertising_complete);
5269 if (err < 0)
5270 mgmt_pending_remove(cmd);
5271
5272 unlock:
5273 hci_dev_unlock(hdev);
5274 return err;
5275 }
5276
set_static_address(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)5277 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5278 void *data, u16 len)
5279 {
5280 struct mgmt_cp_set_static_address *cp = data;
5281 int err;
5282
5283 bt_dev_dbg(hdev, "sock %p", sk);
5284
5285 if (!lmp_le_capable(hdev))
5286 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5287 MGMT_STATUS_NOT_SUPPORTED);
5288
5289 if (hdev_is_powered(hdev))
5290 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5291 MGMT_STATUS_REJECTED);
5292
5293 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5294 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5295 return mgmt_cmd_status(sk, hdev->id,
5296 MGMT_OP_SET_STATIC_ADDRESS,
5297 MGMT_STATUS_INVALID_PARAMS);
5298
5299 /* Two most significant bits shall be set */
5300 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5301 return mgmt_cmd_status(sk, hdev->id,
5302 MGMT_OP_SET_STATIC_ADDRESS,
5303 MGMT_STATUS_INVALID_PARAMS);
5304 }
5305
5306 hci_dev_lock(hdev);
5307
5308 bacpy(&hdev->static_addr, &cp->bdaddr);
5309
5310 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5311 if (err < 0)
5312 goto unlock;
5313
5314 err = new_settings(hdev, sk);
5315
5316 unlock:
5317 hci_dev_unlock(hdev);
5318 return err;
5319 }
5320
set_scan_params(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)5321 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5322 void *data, u16 len)
5323 {
5324 struct mgmt_cp_set_scan_params *cp = data;
5325 __u16 interval, window;
5326 int err;
5327
5328 bt_dev_dbg(hdev, "sock %p", sk);
5329
5330 if (!lmp_le_capable(hdev))
5331 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5332 MGMT_STATUS_NOT_SUPPORTED);
5333
5334 interval = __le16_to_cpu(cp->interval);
5335
5336 if (interval < 0x0004 || interval > 0x4000)
5337 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5338 MGMT_STATUS_INVALID_PARAMS);
5339
5340 window = __le16_to_cpu(cp->window);
5341
5342 if (window < 0x0004 || window > 0x4000)
5343 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5344 MGMT_STATUS_INVALID_PARAMS);
5345
5346 if (window > interval)
5347 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5348 MGMT_STATUS_INVALID_PARAMS);
5349
5350 hci_dev_lock(hdev);
5351
5352 hdev->le_scan_interval = interval;
5353 hdev->le_scan_window = window;
5354
5355 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5356 NULL, 0);
5357
5358 /* If background scan is running, restart it so new parameters are
5359 * loaded.
5360 */
5361 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5362 hdev->discovery.state == DISCOVERY_STOPPED) {
5363 struct hci_request req;
5364
5365 hci_req_init(&req, hdev);
5366
5367 hci_req_add_le_scan_disable(&req, false);
5368 hci_req_add_le_passive_scan(&req);
5369
5370 hci_req_run(&req, NULL);
5371 }
5372
5373 hci_dev_unlock(hdev);
5374
5375 return err;
5376 }
5377
fast_connectable_complete(struct hci_dev *hdev, u8 status, u16 opcode)5378 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5379 u16 opcode)
5380 {
5381 struct mgmt_pending_cmd *cmd;
5382
5383 bt_dev_dbg(hdev, "status 0x%02x", status);
5384
5385 hci_dev_lock(hdev);
5386
5387 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5388 if (!cmd)
5389 goto unlock;
5390
5391 if (status) {
5392 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5393 mgmt_status(status));
5394 } else {
5395 struct mgmt_mode *cp = cmd->param;
5396
5397 if (cp->val)
5398 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5399 else
5400 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5401
5402 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5403 new_settings(hdev, cmd->sk);
5404 }
5405
5406 mgmt_pending_remove(cmd);
5407
5408 unlock:
5409 hci_dev_unlock(hdev);
5410 }
5411
set_fast_connectable(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)5412 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5413 void *data, u16 len)
5414 {
5415 struct mgmt_mode *cp = data;
5416 struct mgmt_pending_cmd *cmd;
5417 struct hci_request req;
5418 int err;
5419
5420 bt_dev_dbg(hdev, "sock %p", sk);
5421
5422 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5423 hdev->hci_ver < BLUETOOTH_VER_1_2)
5424 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5425 MGMT_STATUS_NOT_SUPPORTED);
5426
5427 if (cp->val != 0x00 && cp->val != 0x01)
5428 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5429 MGMT_STATUS_INVALID_PARAMS);
5430
5431 hci_dev_lock(hdev);
5432
5433 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5434 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5435 MGMT_STATUS_BUSY);
5436 goto unlock;
5437 }
5438
5439 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5440 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5441 hdev);
5442 goto unlock;
5443 }
5444
5445 if (!hdev_is_powered(hdev)) {
5446 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5447 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5448 hdev);
5449 new_settings(hdev, sk);
5450 goto unlock;
5451 }
5452
5453 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5454 data, len);
5455 if (!cmd) {
5456 err = -ENOMEM;
5457 goto unlock;
5458 }
5459
5460 hci_req_init(&req, hdev);
5461
5462 __hci_req_write_fast_connectable(&req, cp->val);
5463
5464 err = hci_req_run(&req, fast_connectable_complete);
5465 if (err < 0) {
5466 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5467 MGMT_STATUS_FAILED);
5468 mgmt_pending_remove(cmd);
5469 }
5470
5471 unlock:
5472 hci_dev_unlock(hdev);
5473
5474 return err;
5475 }
5476
set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)5477 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5478 {
5479 struct mgmt_pending_cmd *cmd;
5480
5481 bt_dev_dbg(hdev, "status 0x%02x", status);
5482
5483 hci_dev_lock(hdev);
5484
5485 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5486 if (!cmd)
5487 goto unlock;
5488
5489 if (status) {
5490 u8 mgmt_err = mgmt_status(status);
5491
5492 /* We need to restore the flag if related HCI commands
5493 * failed.
5494 */
5495 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5496
5497 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5498 } else {
5499 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5500 new_settings(hdev, cmd->sk);
5501 }
5502
5503 mgmt_pending_remove(cmd);
5504
5505 unlock:
5506 hci_dev_unlock(hdev);
5507 }
5508
set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)5509 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5510 {
5511 struct mgmt_mode *cp = data;
5512 struct mgmt_pending_cmd *cmd;
5513 struct hci_request req;
5514 int err;
5515
5516 bt_dev_dbg(hdev, "sock %p", sk);
5517
5518 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5519 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5520 MGMT_STATUS_NOT_SUPPORTED);
5521
5522 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5523 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5524 MGMT_STATUS_REJECTED);
5525
5526 if (cp->val != 0x00 && cp->val != 0x01)
5527 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5528 MGMT_STATUS_INVALID_PARAMS);
5529
5530 hci_dev_lock(hdev);
5531
5532 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5533 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5534 goto unlock;
5535 }
5536
5537 if (!hdev_is_powered(hdev)) {
5538 if (!cp->val) {
5539 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5540 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5541 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5542 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5543 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5544 }
5545
5546 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5547
5548 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5549 if (err < 0)
5550 goto unlock;
5551
5552 err = new_settings(hdev, sk);
5553 goto unlock;
5554 }
5555
5556 /* Reject disabling when powered on */
5557 if (!cp->val) {
5558 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5559 MGMT_STATUS_REJECTED);
5560 goto unlock;
5561 } else {
5562 /* When configuring a dual-mode controller to operate
5563 * with LE only and using a static address, then switching
5564 * BR/EDR back on is not allowed.
5565 *
5566 * Dual-mode controllers shall operate with the public
5567 * address as its identity address for BR/EDR and LE. So
5568 * reject the attempt to create an invalid configuration.
5569 *
5570 * The same restrictions applies when secure connections
5571 * has been enabled. For BR/EDR this is a controller feature
5572 * while for LE it is a host stack feature. This means that
5573 * switching BR/EDR back on when secure connections has been
5574 * enabled is not a supported transaction.
5575 */
5576 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5577 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5578 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5579 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5580 MGMT_STATUS_REJECTED);
5581 goto unlock;
5582 }
5583 }
5584
5585 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5586 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5587 MGMT_STATUS_BUSY);
5588 goto unlock;
5589 }
5590
5591 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5592 if (!cmd) {
5593 err = -ENOMEM;
5594 goto unlock;
5595 }
5596
5597 /* We need to flip the bit already here so that
5598 * hci_req_update_adv_data generates the correct flags.
5599 */
5600 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5601
5602 hci_req_init(&req, hdev);
5603
5604 __hci_req_write_fast_connectable(&req, false);
5605 __hci_req_update_scan(&req);
5606
5607 /* Since only the advertising data flags will change, there
5608 * is no need to update the scan response data.
5609 */
5610 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5611
5612 err = hci_req_run(&req, set_bredr_complete);
5613 if (err < 0)
5614 mgmt_pending_remove(cmd);
5615
5616 unlock:
5617 hci_dev_unlock(hdev);
5618 return err;
5619 }
5620
sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)5621 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5622 {
5623 struct mgmt_pending_cmd *cmd;
5624 struct mgmt_mode *cp;
5625
5626 bt_dev_dbg(hdev, "status %u", status);
5627
5628 hci_dev_lock(hdev);
5629
5630 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5631 if (!cmd)
5632 goto unlock;
5633
5634 if (status) {
5635 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5636 mgmt_status(status));
5637 goto remove;
5638 }
5639
5640 cp = cmd->param;
5641
5642 switch (cp->val) {
5643 case 0x00:
5644 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5645 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5646 break;
5647 case 0x01:
5648 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5649 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5650 break;
5651 case 0x02:
5652 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5653 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5654 break;
5655 }
5656
5657 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5658 new_settings(hdev, cmd->sk);
5659
5660 remove:
5661 mgmt_pending_remove(cmd);
5662 unlock:
5663 hci_dev_unlock(hdev);
5664 }
5665
set_secure_conn(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)5666 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5667 void *data, u16 len)
5668 {
5669 struct mgmt_mode *cp = data;
5670 struct mgmt_pending_cmd *cmd;
5671 struct hci_request req;
5672 u8 val;
5673 int err;
5674
5675 bt_dev_dbg(hdev, "sock %p", sk);
5676
5677 if (!lmp_sc_capable(hdev) &&
5678 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5679 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5680 MGMT_STATUS_NOT_SUPPORTED);
5681
5682 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5683 lmp_sc_capable(hdev) &&
5684 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5685 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5686 MGMT_STATUS_REJECTED);
5687
5688 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5689 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5690 MGMT_STATUS_INVALID_PARAMS);
5691
5692 hci_dev_lock(hdev);
5693
5694 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5695 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5696 bool changed;
5697
5698 if (cp->val) {
5699 changed = !hci_dev_test_and_set_flag(hdev,
5700 HCI_SC_ENABLED);
5701 if (cp->val == 0x02)
5702 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5703 else
5704 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5705 } else {
5706 changed = hci_dev_test_and_clear_flag(hdev,
5707 HCI_SC_ENABLED);
5708 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5709 }
5710
5711 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5712 if (err < 0)
5713 goto failed;
5714
5715 if (changed)
5716 err = new_settings(hdev, sk);
5717
5718 goto failed;
5719 }
5720
5721 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5722 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5723 MGMT_STATUS_BUSY);
5724 goto failed;
5725 }
5726
5727 val = !!cp->val;
5728
5729 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5730 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5731 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5732 goto failed;
5733 }
5734
5735 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5736 if (!cmd) {
5737 err = -ENOMEM;
5738 goto failed;
5739 }
5740
5741 hci_req_init(&req, hdev);
5742 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5743 err = hci_req_run(&req, sc_enable_complete);
5744 if (err < 0) {
5745 mgmt_pending_remove(cmd);
5746 goto failed;
5747 }
5748
5749 failed:
5750 hci_dev_unlock(hdev);
5751 return err;
5752 }
5753
set_debug_keys(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)5754 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5755 void *data, u16 len)
5756 {
5757 struct mgmt_mode *cp = data;
5758 bool changed, use_changed;
5759 int err;
5760
5761 bt_dev_dbg(hdev, "sock %p", sk);
5762
5763 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5764 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5765 MGMT_STATUS_INVALID_PARAMS);
5766
5767 hci_dev_lock(hdev);
5768
5769 if (cp->val)
5770 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5771 else
5772 changed = hci_dev_test_and_clear_flag(hdev,
5773 HCI_KEEP_DEBUG_KEYS);
5774
5775 if (cp->val == 0x02)
5776 use_changed = !hci_dev_test_and_set_flag(hdev,
5777 HCI_USE_DEBUG_KEYS);
5778 else
5779 use_changed = hci_dev_test_and_clear_flag(hdev,
5780 HCI_USE_DEBUG_KEYS);
5781
5782 if (hdev_is_powered(hdev) && use_changed &&
5783 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5784 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5785 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5786 sizeof(mode), &mode);
5787 }
5788
5789 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5790 if (err < 0)
5791 goto unlock;
5792
5793 if (changed)
5794 err = new_settings(hdev, sk);
5795
5796 unlock:
5797 hci_dev_unlock(hdev);
5798 return err;
5799 }
5800
set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data, u16 len)5801 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5802 u16 len)
5803 {
5804 struct mgmt_cp_set_privacy *cp = cp_data;
5805 bool changed;
5806 int err;
5807
5808 bt_dev_dbg(hdev, "sock %p", sk);
5809
5810 if (!lmp_le_capable(hdev))
5811 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5812 MGMT_STATUS_NOT_SUPPORTED);
5813
5814 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
5815 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5816 MGMT_STATUS_INVALID_PARAMS);
5817
5818 if (hdev_is_powered(hdev))
5819 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5820 MGMT_STATUS_REJECTED);
5821
5822 hci_dev_lock(hdev);
5823
5824 /* If user space supports this command it is also expected to
5825 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5826 */
5827 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5828
5829 if (cp->privacy) {
5830 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5831 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5832 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5833 hci_adv_instances_set_rpa_expired(hdev, true);
5834 if (cp->privacy == 0x02)
5835 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
5836 else
5837 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5838 } else {
5839 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5840 memset(hdev->irk, 0, sizeof(hdev->irk));
5841 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5842 hci_adv_instances_set_rpa_expired(hdev, false);
5843 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5844 }
5845
5846 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5847 if (err < 0)
5848 goto unlock;
5849
5850 if (changed)
5851 err = new_settings(hdev, sk);
5852
5853 unlock:
5854 hci_dev_unlock(hdev);
5855 return err;
5856 }
5857
irk_is_valid(struct mgmt_irk_info *irk)5858 static bool irk_is_valid(struct mgmt_irk_info *irk)
5859 {
5860 switch (irk->addr.type) {
5861 case BDADDR_LE_PUBLIC:
5862 return true;
5863
5864 case BDADDR_LE_RANDOM:
5865 /* Two most significant bits shall be set */
5866 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5867 return false;
5868 return true;
5869 }
5870
5871 return false;
5872 }
5873
load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data, u16 len)5874 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5875 u16 len)
5876 {
5877 struct mgmt_cp_load_irks *cp = cp_data;
5878 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5879 sizeof(struct mgmt_irk_info));
5880 u16 irk_count, expected_len;
5881 int i, err;
5882
5883 bt_dev_dbg(hdev, "sock %p", sk);
5884
5885 if (!lmp_le_capable(hdev))
5886 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5887 MGMT_STATUS_NOT_SUPPORTED);
5888
5889 irk_count = __le16_to_cpu(cp->irk_count);
5890 if (irk_count > max_irk_count) {
5891 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5892 irk_count);
5893 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5894 MGMT_STATUS_INVALID_PARAMS);
5895 }
5896
5897 expected_len = struct_size(cp, irks, irk_count);
5898 if (expected_len != len) {
5899 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5900 expected_len, len);
5901 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5902 MGMT_STATUS_INVALID_PARAMS);
5903 }
5904
5905 bt_dev_dbg(hdev, "irk_count %u", irk_count);
5906
5907 for (i = 0; i < irk_count; i++) {
5908 struct mgmt_irk_info *key = &cp->irks[i];
5909
5910 if (!irk_is_valid(key))
5911 return mgmt_cmd_status(sk, hdev->id,
5912 MGMT_OP_LOAD_IRKS,
5913 MGMT_STATUS_INVALID_PARAMS);
5914 }
5915
5916 hci_dev_lock(hdev);
5917
5918 hci_smp_irks_clear(hdev);
5919
5920 for (i = 0; i < irk_count; i++) {
5921 struct mgmt_irk_info *irk = &cp->irks[i];
5922 u8 addr_type = le_addr_type(irk->addr.type);
5923
5924 if (hci_is_blocked_key(hdev,
5925 HCI_BLOCKED_KEY_TYPE_IRK,
5926 irk->val)) {
5927 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
5928 &irk->addr.bdaddr);
5929 continue;
5930 }
5931
5932 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
5933 if (irk->addr.type == BDADDR_BREDR)
5934 addr_type = BDADDR_BREDR;
5935
5936 hci_add_irk(hdev, &irk->addr.bdaddr,
5937 addr_type, irk->val,
5938 BDADDR_ANY);
5939 }
5940
5941 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5942
5943 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5944
5945 hci_dev_unlock(hdev);
5946
5947 return err;
5948 }
5949
ltk_is_valid(struct mgmt_ltk_info *key)5950 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5951 {
5952 if (key->initiator != 0x00 && key->initiator != 0x01)
5953 return false;
5954
5955 switch (key->addr.type) {
5956 case BDADDR_LE_PUBLIC:
5957 return true;
5958
5959 case BDADDR_LE_RANDOM:
5960 /* Two most significant bits shall be set */
5961 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5962 return false;
5963 return true;
5964 }
5965
5966 return false;
5967 }
5968
load_long_term_keys(struct sock *sk, struct hci_dev *hdev, void *cp_data, u16 len)5969 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5970 void *cp_data, u16 len)
5971 {
5972 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5973 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5974 sizeof(struct mgmt_ltk_info));
5975 u16 key_count, expected_len;
5976 int i, err;
5977
5978 bt_dev_dbg(hdev, "sock %p", sk);
5979
5980 if (!lmp_le_capable(hdev))
5981 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5982 MGMT_STATUS_NOT_SUPPORTED);
5983
5984 key_count = __le16_to_cpu(cp->key_count);
5985 if (key_count > max_key_count) {
5986 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
5987 key_count);
5988 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5989 MGMT_STATUS_INVALID_PARAMS);
5990 }
5991
5992 expected_len = struct_size(cp, keys, key_count);
5993 if (expected_len != len) {
5994 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
5995 expected_len, len);
5996 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5997 MGMT_STATUS_INVALID_PARAMS);
5998 }
5999
6000 bt_dev_dbg(hdev, "key_count %u", key_count);
6001
6002 for (i = 0; i < key_count; i++) {
6003 struct mgmt_ltk_info *key = &cp->keys[i];
6004
6005 if (!ltk_is_valid(key))
6006 return mgmt_cmd_status(sk, hdev->id,
6007 MGMT_OP_LOAD_LONG_TERM_KEYS,
6008 MGMT_STATUS_INVALID_PARAMS);
6009 }
6010
6011 hci_dev_lock(hdev);
6012
6013 hci_smp_ltks_clear(hdev);
6014
6015 for (i = 0; i < key_count; i++) {
6016 struct mgmt_ltk_info *key = &cp->keys[i];
6017 u8 type, authenticated;
6018 u8 addr_type = le_addr_type(key->addr.type);
6019
6020 if (hci_is_blocked_key(hdev,
6021 HCI_BLOCKED_KEY_TYPE_LTK,
6022 key->val)) {
6023 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6024 &key->addr.bdaddr);
6025 continue;
6026 }
6027
6028 switch (key->type) {
6029 case MGMT_LTK_UNAUTHENTICATED:
6030 authenticated = 0x00;
6031 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6032 break;
6033 case MGMT_LTK_AUTHENTICATED:
6034 authenticated = 0x01;
6035 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6036 break;
6037 case MGMT_LTK_P256_UNAUTH:
6038 authenticated = 0x00;
6039 type = SMP_LTK_P256;
6040 break;
6041 case MGMT_LTK_P256_AUTH:
6042 authenticated = 0x01;
6043 type = SMP_LTK_P256;
6044 break;
6045 case MGMT_LTK_P256_DEBUG:
6046 authenticated = 0x00;
6047 type = SMP_LTK_P256_DEBUG;
6048 fallthrough;
6049 default:
6050 continue;
6051 }
6052
6053 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
6054 if (key->addr.type == BDADDR_BREDR)
6055 addr_type = BDADDR_BREDR;
6056
6057 hci_add_ltk(hdev, &key->addr.bdaddr,
6058 addr_type, type, authenticated,
6059 key->val, key->enc_size, key->ediv, key->rand);
6060 }
6061
6062 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6063 NULL, 0);
6064
6065 hci_dev_unlock(hdev);
6066
6067 return err;
6068 }
6069
conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)6070 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6071 {
6072 struct hci_conn *conn = cmd->user_data;
6073 struct mgmt_rp_get_conn_info rp;
6074 int err;
6075
6076 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6077
6078 if (status == MGMT_STATUS_SUCCESS) {
6079 rp.rssi = conn->rssi;
6080 rp.tx_power = conn->tx_power;
6081 rp.max_tx_power = conn->max_tx_power;
6082 } else {
6083 rp.rssi = HCI_RSSI_INVALID;
6084 rp.tx_power = HCI_TX_POWER_INVALID;
6085 rp.max_tx_power = HCI_TX_POWER_INVALID;
6086 }
6087
6088 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
6089 status, &rp, sizeof(rp));
6090
6091 hci_conn_drop(conn);
6092 hci_conn_put(conn);
6093
6094 return err;
6095 }
6096
conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status, u16 opcode)6097 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
6098 u16 opcode)
6099 {
6100 struct hci_cp_read_rssi *cp;
6101 struct mgmt_pending_cmd *cmd;
6102 struct hci_conn *conn;
6103 u16 handle;
6104 u8 status;
6105
6106 bt_dev_dbg(hdev, "status 0x%02x", hci_status);
6107
6108 hci_dev_lock(hdev);
6109
6110 /* Commands sent in request are either Read RSSI or Read Transmit Power
6111 * Level so we check which one was last sent to retrieve connection
6112 * handle. Both commands have handle as first parameter so it's safe to
6113 * cast data on the same command struct.
6114 *
6115 * First command sent is always Read RSSI and we fail only if it fails.
6116 * In other case we simply override error to indicate success as we
6117 * already remembered if TX power value is actually valid.
6118 */
6119 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
6120 if (!cp) {
6121 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
6122 status = MGMT_STATUS_SUCCESS;
6123 } else {
6124 status = mgmt_status(hci_status);
6125 }
6126
6127 if (!cp) {
6128 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
6129 goto unlock;
6130 }
6131
6132 handle = __le16_to_cpu(cp->handle);
6133 conn = hci_conn_hash_lookup_handle(hdev, handle);
6134 if (!conn) {
6135 bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
6136 handle);
6137 goto unlock;
6138 }
6139
6140 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
6141 if (!cmd)
6142 goto unlock;
6143
6144 cmd->cmd_complete(cmd, status);
6145 mgmt_pending_remove(cmd);
6146
6147 unlock:
6148 hci_dev_unlock(hdev);
6149 }
6150
get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)6151 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6152 u16 len)
6153 {
6154 struct mgmt_cp_get_conn_info *cp = data;
6155 struct mgmt_rp_get_conn_info rp;
6156 struct hci_conn *conn;
6157 unsigned long conn_info_age;
6158 int err = 0;
6159
6160 bt_dev_dbg(hdev, "sock %p", sk);
6161
6162 memset(&rp, 0, sizeof(rp));
6163 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6164 rp.addr.type = cp->addr.type;
6165
6166 if (!bdaddr_type_is_valid(cp->addr.type))
6167 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6168 MGMT_STATUS_INVALID_PARAMS,
6169 &rp, sizeof(rp));
6170
6171 hci_dev_lock(hdev);
6172
6173 if (!hdev_is_powered(hdev)) {
6174 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6175 MGMT_STATUS_NOT_POWERED, &rp,
6176 sizeof(rp));
6177 goto unlock;
6178 }
6179
6180 if (cp->addr.type == BDADDR_BREDR)
6181 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6182 &cp->addr.bdaddr);
6183 else
6184 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6185
6186 if (!conn || conn->state != BT_CONNECTED) {
6187 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6188 MGMT_STATUS_NOT_CONNECTED, &rp,
6189 sizeof(rp));
6190 goto unlock;
6191 }
6192
6193 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
6194 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6195 MGMT_STATUS_BUSY, &rp, sizeof(rp));
6196 goto unlock;
6197 }
6198
6199 /* To avoid client trying to guess when to poll again for information we
6200 * calculate conn info age as random value between min/max set in hdev.
6201 */
6202 conn_info_age = hdev->conn_info_min_age +
6203 prandom_u32_max(hdev->conn_info_max_age -
6204 hdev->conn_info_min_age);
6205
6206 /* Query controller to refresh cached values if they are too old or were
6207 * never read.
6208 */
6209 if (time_after(jiffies, conn->conn_info_timestamp +
6210 msecs_to_jiffies(conn_info_age)) ||
6211 !conn->conn_info_timestamp) {
6212 struct hci_request req;
6213 struct hci_cp_read_tx_power req_txp_cp;
6214 struct hci_cp_read_rssi req_rssi_cp;
6215 struct mgmt_pending_cmd *cmd;
6216
6217 hci_req_init(&req, hdev);
6218 req_rssi_cp.handle = cpu_to_le16(conn->handle);
6219 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
6220 &req_rssi_cp);
6221
6222 /* For LE links TX power does not change thus we don't need to
6223 * query for it once value is known.
6224 */
6225 if (!bdaddr_type_is_le(cp->addr.type) ||
6226 conn->tx_power == HCI_TX_POWER_INVALID) {
6227 req_txp_cp.handle = cpu_to_le16(conn->handle);
6228 req_txp_cp.type = 0x00;
6229 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6230 sizeof(req_txp_cp), &req_txp_cp);
6231 }
6232
6233 /* Max TX power needs to be read only once per connection */
6234 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
6235 req_txp_cp.handle = cpu_to_le16(conn->handle);
6236 req_txp_cp.type = 0x01;
6237 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6238 sizeof(req_txp_cp), &req_txp_cp);
6239 }
6240
6241 err = hci_req_run(&req, conn_info_refresh_complete);
6242 if (err < 0)
6243 goto unlock;
6244
6245 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
6246 data, len);
6247 if (!cmd) {
6248 err = -ENOMEM;
6249 goto unlock;
6250 }
6251
6252 hci_conn_hold(conn);
6253 cmd->user_data = hci_conn_get(conn);
6254 cmd->cmd_complete = conn_info_cmd_complete;
6255
6256 conn->conn_info_timestamp = jiffies;
6257 } else {
6258 /* Cache is valid, just reply with values cached in hci_conn */
6259 rp.rssi = conn->rssi;
6260 rp.tx_power = conn->tx_power;
6261 rp.max_tx_power = conn->max_tx_power;
6262
6263 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6264 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6265 }
6266
6267 unlock:
6268 hci_dev_unlock(hdev);
6269 return err;
6270 }
6271
clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)6272 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6273 {
6274 struct hci_conn *conn = cmd->user_data;
6275 struct mgmt_rp_get_clock_info rp;
6276 struct hci_dev *hdev;
6277 int err;
6278
6279 memset(&rp, 0, sizeof(rp));
6280 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6281
6282 if (status)
6283 goto complete;
6284
6285 hdev = hci_dev_get(cmd->index);
6286 if (hdev) {
6287 rp.local_clock = cpu_to_le32(hdev->clock);
6288 hci_dev_put(hdev);
6289 }
6290
6291 if (conn) {
6292 rp.piconet_clock = cpu_to_le32(conn->clock);
6293 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6294 }
6295
6296 complete:
6297 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6298 sizeof(rp));
6299
6300 if (conn) {
6301 hci_conn_drop(conn);
6302 hci_conn_put(conn);
6303 }
6304
6305 return err;
6306 }
6307
get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)6308 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6309 {
6310 struct hci_cp_read_clock *hci_cp;
6311 struct mgmt_pending_cmd *cmd;
6312 struct hci_conn *conn;
6313
6314 bt_dev_dbg(hdev, "status %u", status);
6315
6316 hci_dev_lock(hdev);
6317
6318 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
6319 if (!hci_cp)
6320 goto unlock;
6321
6322 if (hci_cp->which) {
6323 u16 handle = __le16_to_cpu(hci_cp->handle);
6324 conn = hci_conn_hash_lookup_handle(hdev, handle);
6325 } else {
6326 conn = NULL;
6327 }
6328
6329 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
6330 if (!cmd)
6331 goto unlock;
6332
6333 cmd->cmd_complete(cmd, mgmt_status(status));
6334 mgmt_pending_remove(cmd);
6335
6336 unlock:
6337 hci_dev_unlock(hdev);
6338 }
6339
get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)6340 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6341 u16 len)
6342 {
6343 struct mgmt_cp_get_clock_info *cp = data;
6344 struct mgmt_rp_get_clock_info rp;
6345 struct hci_cp_read_clock hci_cp;
6346 struct mgmt_pending_cmd *cmd;
6347 struct hci_request req;
6348 struct hci_conn *conn;
6349 int err;
6350
6351 bt_dev_dbg(hdev, "sock %p", sk);
6352
6353 memset(&rp, 0, sizeof(rp));
6354 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6355 rp.addr.type = cp->addr.type;
6356
6357 if (cp->addr.type != BDADDR_BREDR)
6358 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6359 MGMT_STATUS_INVALID_PARAMS,
6360 &rp, sizeof(rp));
6361
6362 hci_dev_lock(hdev);
6363
6364 if (!hdev_is_powered(hdev)) {
6365 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6366 MGMT_STATUS_NOT_POWERED, &rp,
6367 sizeof(rp));
6368 goto unlock;
6369 }
6370
6371 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6372 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6373 &cp->addr.bdaddr);
6374 if (!conn || conn->state != BT_CONNECTED) {
6375 err = mgmt_cmd_complete(sk, hdev->id,
6376 MGMT_OP_GET_CLOCK_INFO,
6377 MGMT_STATUS_NOT_CONNECTED,
6378 &rp, sizeof(rp));
6379 goto unlock;
6380 }
6381 } else {
6382 conn = NULL;
6383 }
6384
6385 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6386 if (!cmd) {
6387 err = -ENOMEM;
6388 goto unlock;
6389 }
6390
6391 cmd->cmd_complete = clock_info_cmd_complete;
6392
6393 hci_req_init(&req, hdev);
6394
6395 memset(&hci_cp, 0, sizeof(hci_cp));
6396 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6397
6398 if (conn) {
6399 hci_conn_hold(conn);
6400 cmd->user_data = hci_conn_get(conn);
6401
6402 hci_cp.handle = cpu_to_le16(conn->handle);
6403 hci_cp.which = 0x01; /* Piconet clock */
6404 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6405 }
6406
6407 err = hci_req_run(&req, get_clock_info_complete);
6408 if (err < 0)
6409 mgmt_pending_remove(cmd);
6410
6411 unlock:
6412 hci_dev_unlock(hdev);
6413 return err;
6414 }
6415
is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)6416 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6417 {
6418 struct hci_conn *conn;
6419
6420 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6421 if (!conn)
6422 return false;
6423
6424 if (conn->dst_type != type)
6425 return false;
6426
6427 if (conn->state != BT_CONNECTED)
6428 return false;
6429
6430 return true;
6431 }
6432
6433 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type, u8 auto_connect)6434 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6435 u8 addr_type, u8 auto_connect)
6436 {
6437 struct hci_conn_params *params;
6438
6439 params = hci_conn_params_add(hdev, addr, addr_type);
6440 if (!params)
6441 return -EIO;
6442
6443 if (params->auto_connect == auto_connect)
6444 return 0;
6445
6446 list_del_init(¶ms->action);
6447
6448 switch (auto_connect) {
6449 case HCI_AUTO_CONN_DISABLED:
6450 case HCI_AUTO_CONN_LINK_LOSS:
6451 /* If auto connect is being disabled when we're trying to
6452 * connect to device, keep connecting.
6453 */
6454 if (params->explicit_connect)
6455 list_add(¶ms->action, &hdev->pend_le_conns);
6456 break;
6457 case HCI_AUTO_CONN_REPORT:
6458 if (params->explicit_connect)
6459 list_add(¶ms->action, &hdev->pend_le_conns);
6460 else
6461 list_add(¶ms->action, &hdev->pend_le_reports);
6462 break;
6463 case HCI_AUTO_CONN_DIRECT:
6464 case HCI_AUTO_CONN_ALWAYS:
6465 if (!is_connected(hdev, addr, addr_type))
6466 list_add(¶ms->action, &hdev->pend_le_conns);
6467 break;
6468 }
6469
6470 params->auto_connect = auto_connect;
6471
6472 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
6473 addr, addr_type, auto_connect);
6474
6475 return 0;
6476 }
6477
device_added(struct sock *sk, struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type, u8 action)6478 static void device_added(struct sock *sk, struct hci_dev *hdev,
6479 bdaddr_t *bdaddr, u8 type, u8 action)
6480 {
6481 struct mgmt_ev_device_added ev;
6482
6483 bacpy(&ev.addr.bdaddr, bdaddr);
6484 ev.addr.type = type;
6485 ev.action = action;
6486
6487 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6488 }
6489
add_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)6490 static int add_device(struct sock *sk, struct hci_dev *hdev,
6491 void *data, u16 len)
6492 {
6493 struct mgmt_cp_add_device *cp = data;
6494 u8 auto_conn, addr_type;
6495 struct hci_conn_params *params;
6496 int err;
6497 u32 current_flags = 0;
6498
6499 bt_dev_dbg(hdev, "sock %p", sk);
6500
6501 if (!bdaddr_type_is_valid(cp->addr.type) ||
6502 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6503 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6504 MGMT_STATUS_INVALID_PARAMS,
6505 &cp->addr, sizeof(cp->addr));
6506
6507 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6508 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6509 MGMT_STATUS_INVALID_PARAMS,
6510 &cp->addr, sizeof(cp->addr));
6511
6512 hci_dev_lock(hdev);
6513
6514 if (cp->addr.type == BDADDR_BREDR) {
6515 /* Only incoming connections action is supported for now */
6516 if (cp->action != 0x01) {
6517 err = mgmt_cmd_complete(sk, hdev->id,
6518 MGMT_OP_ADD_DEVICE,
6519 MGMT_STATUS_INVALID_PARAMS,
6520 &cp->addr, sizeof(cp->addr));
6521 goto unlock;
6522 }
6523
6524 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
6525 &cp->addr.bdaddr,
6526 cp->addr.type, 0);
6527 if (err)
6528 goto unlock;
6529
6530 hci_req_update_scan(hdev);
6531
6532 goto added;
6533 }
6534
6535 addr_type = le_addr_type(cp->addr.type);
6536
6537 if (cp->action == 0x02)
6538 auto_conn = HCI_AUTO_CONN_ALWAYS;
6539 else if (cp->action == 0x01)
6540 auto_conn = HCI_AUTO_CONN_DIRECT;
6541 else
6542 auto_conn = HCI_AUTO_CONN_REPORT;
6543
6544 /* Kernel internally uses conn_params with resolvable private
6545 * address, but Add Device allows only identity addresses.
6546 * Make sure it is enforced before calling
6547 * hci_conn_params_lookup.
6548 */
6549 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6550 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6551 MGMT_STATUS_INVALID_PARAMS,
6552 &cp->addr, sizeof(cp->addr));
6553 goto unlock;
6554 }
6555
6556 /* If the connection parameters don't exist for this device,
6557 * they will be created and configured with defaults.
6558 */
6559 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
6560 auto_conn) < 0) {
6561 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6562 MGMT_STATUS_FAILED, &cp->addr,
6563 sizeof(cp->addr));
6564 goto unlock;
6565 } else {
6566 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6567 addr_type);
6568 if (params)
6569 current_flags = params->current_flags;
6570 }
6571
6572 hci_update_background_scan(hdev);
6573
6574 added:
6575 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6576 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
6577 SUPPORTED_DEVICE_FLAGS(), current_flags);
6578
6579 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6580 MGMT_STATUS_SUCCESS, &cp->addr,
6581 sizeof(cp->addr));
6582
6583 unlock:
6584 hci_dev_unlock(hdev);
6585 return err;
6586 }
6587
device_removed(struct sock *sk, struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)6588 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6589 bdaddr_t *bdaddr, u8 type)
6590 {
6591 struct mgmt_ev_device_removed ev;
6592
6593 bacpy(&ev.addr.bdaddr, bdaddr);
6594 ev.addr.type = type;
6595
6596 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6597 }
6598
remove_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)6599 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6600 void *data, u16 len)
6601 {
6602 struct mgmt_cp_remove_device *cp = data;
6603 int err;
6604
6605 bt_dev_dbg(hdev, "sock %p", sk);
6606
6607 hci_dev_lock(hdev);
6608
6609 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6610 struct hci_conn_params *params;
6611 u8 addr_type;
6612
6613 if (!bdaddr_type_is_valid(cp->addr.type)) {
6614 err = mgmt_cmd_complete(sk, hdev->id,
6615 MGMT_OP_REMOVE_DEVICE,
6616 MGMT_STATUS_INVALID_PARAMS,
6617 &cp->addr, sizeof(cp->addr));
6618 goto unlock;
6619 }
6620
6621 if (cp->addr.type == BDADDR_BREDR) {
6622 err = hci_bdaddr_list_del(&hdev->accept_list,
6623 &cp->addr.bdaddr,
6624 cp->addr.type);
6625 if (err) {
6626 err = mgmt_cmd_complete(sk, hdev->id,
6627 MGMT_OP_REMOVE_DEVICE,
6628 MGMT_STATUS_INVALID_PARAMS,
6629 &cp->addr,
6630 sizeof(cp->addr));
6631 goto unlock;
6632 }
6633
6634 hci_req_update_scan(hdev);
6635
6636 device_removed(sk, hdev, &cp->addr.bdaddr,
6637 cp->addr.type);
6638 goto complete;
6639 }
6640
6641 addr_type = le_addr_type(cp->addr.type);
6642
6643 /* Kernel internally uses conn_params with resolvable private
6644 * address, but Remove Device allows only identity addresses.
6645 * Make sure it is enforced before calling
6646 * hci_conn_params_lookup.
6647 */
6648 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6649 err = mgmt_cmd_complete(sk, hdev->id,
6650 MGMT_OP_REMOVE_DEVICE,
6651 MGMT_STATUS_INVALID_PARAMS,
6652 &cp->addr, sizeof(cp->addr));
6653 goto unlock;
6654 }
6655
6656 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6657 addr_type);
6658 if (!params) {
6659 err = mgmt_cmd_complete(sk, hdev->id,
6660 MGMT_OP_REMOVE_DEVICE,
6661 MGMT_STATUS_INVALID_PARAMS,
6662 &cp->addr, sizeof(cp->addr));
6663 goto unlock;
6664 }
6665
6666 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6667 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6668 err = mgmt_cmd_complete(sk, hdev->id,
6669 MGMT_OP_REMOVE_DEVICE,
6670 MGMT_STATUS_INVALID_PARAMS,
6671 &cp->addr, sizeof(cp->addr));
6672 goto unlock;
6673 }
6674
6675 list_del(¶ms->action);
6676 list_del(¶ms->list);
6677 kfree(params);
6678 hci_update_background_scan(hdev);
6679
6680 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6681 } else {
6682 struct hci_conn_params *p, *tmp;
6683 struct bdaddr_list *b, *btmp;
6684
6685 if (cp->addr.type) {
6686 err = mgmt_cmd_complete(sk, hdev->id,
6687 MGMT_OP_REMOVE_DEVICE,
6688 MGMT_STATUS_INVALID_PARAMS,
6689 &cp->addr, sizeof(cp->addr));
6690 goto unlock;
6691 }
6692
6693 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
6694 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6695 list_del(&b->list);
6696 kfree(b);
6697 }
6698
6699 hci_req_update_scan(hdev);
6700
6701 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6702 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6703 continue;
6704 device_removed(sk, hdev, &p->addr, p->addr_type);
6705 if (p->explicit_connect) {
6706 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6707 continue;
6708 }
6709 list_del(&p->action);
6710 list_del(&p->list);
6711 kfree(p);
6712 }
6713
6714 bt_dev_dbg(hdev, "All LE connection parameters were removed");
6715
6716 hci_update_background_scan(hdev);
6717 }
6718
6719 complete:
6720 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
6721 MGMT_STATUS_SUCCESS, &cp->addr,
6722 sizeof(cp->addr));
6723 unlock:
6724 hci_dev_unlock(hdev);
6725 return err;
6726 }
6727
load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)6728 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6729 u16 len)
6730 {
6731 struct mgmt_cp_load_conn_param *cp = data;
6732 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6733 sizeof(struct mgmt_conn_param));
6734 u16 param_count, expected_len;
6735 int i;
6736
6737 if (!lmp_le_capable(hdev))
6738 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6739 MGMT_STATUS_NOT_SUPPORTED);
6740
6741 param_count = __le16_to_cpu(cp->param_count);
6742 if (param_count > max_param_count) {
6743 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
6744 param_count);
6745 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6746 MGMT_STATUS_INVALID_PARAMS);
6747 }
6748
6749 expected_len = struct_size(cp, params, param_count);
6750 if (expected_len != len) {
6751 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
6752 expected_len, len);
6753 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6754 MGMT_STATUS_INVALID_PARAMS);
6755 }
6756
6757 bt_dev_dbg(hdev, "param_count %u", param_count);
6758
6759 hci_dev_lock(hdev);
6760
6761 hci_conn_params_clear_disabled(hdev);
6762
6763 for (i = 0; i < param_count; i++) {
6764 struct mgmt_conn_param *param = &cp->params[i];
6765 struct hci_conn_params *hci_param;
6766 u16 min, max, latency, timeout;
6767 u8 addr_type;
6768
6769 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
6770 param->addr.type);
6771
6772 if (param->addr.type == BDADDR_LE_PUBLIC) {
6773 addr_type = ADDR_LE_DEV_PUBLIC;
6774 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6775 addr_type = ADDR_LE_DEV_RANDOM;
6776 } else {
6777 bt_dev_err(hdev, "ignoring invalid connection parameters");
6778 continue;
6779 }
6780
6781 min = le16_to_cpu(param->min_interval);
6782 max = le16_to_cpu(param->max_interval);
6783 latency = le16_to_cpu(param->latency);
6784 timeout = le16_to_cpu(param->timeout);
6785
6786 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6787 min, max, latency, timeout);
6788
6789 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6790 bt_dev_err(hdev, "ignoring invalid connection parameters");
6791 continue;
6792 }
6793
6794 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
6795 addr_type);
6796 if (!hci_param) {
6797 bt_dev_err(hdev, "failed to add connection parameters");
6798 continue;
6799 }
6800
6801 hci_param->conn_min_interval = min;
6802 hci_param->conn_max_interval = max;
6803 hci_param->conn_latency = latency;
6804 hci_param->supervision_timeout = timeout;
6805 }
6806
6807 hci_dev_unlock(hdev);
6808
6809 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6810 NULL, 0);
6811 }
6812
set_external_config(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)6813 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6814 void *data, u16 len)
6815 {
6816 struct mgmt_cp_set_external_config *cp = data;
6817 bool changed;
6818 int err;
6819
6820 bt_dev_dbg(hdev, "sock %p", sk);
6821
6822 if (hdev_is_powered(hdev))
6823 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6824 MGMT_STATUS_REJECTED);
6825
6826 if (cp->config != 0x00 && cp->config != 0x01)
6827 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6828 MGMT_STATUS_INVALID_PARAMS);
6829
6830 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6831 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6832 MGMT_STATUS_NOT_SUPPORTED);
6833
6834 hci_dev_lock(hdev);
6835
6836 if (cp->config)
6837 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6838 else
6839 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6840
6841 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6842 if (err < 0)
6843 goto unlock;
6844
6845 if (!changed)
6846 goto unlock;
6847
6848 err = new_options(hdev, sk);
6849
6850 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6851 mgmt_index_removed(hdev);
6852
6853 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6854 hci_dev_set_flag(hdev, HCI_CONFIG);
6855 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6856
6857 queue_work(hdev->req_workqueue, &hdev->power_on);
6858 } else {
6859 set_bit(HCI_RAW, &hdev->flags);
6860 mgmt_index_added(hdev);
6861 }
6862 }
6863
6864 unlock:
6865 hci_dev_unlock(hdev);
6866 return err;
6867 }
6868
set_public_address(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)6869 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6870 void *data, u16 len)
6871 {
6872 struct mgmt_cp_set_public_address *cp = data;
6873 bool changed;
6874 int err;
6875
6876 bt_dev_dbg(hdev, "sock %p", sk);
6877
6878 if (hdev_is_powered(hdev))
6879 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6880 MGMT_STATUS_REJECTED);
6881
6882 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6883 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6884 MGMT_STATUS_INVALID_PARAMS);
6885
6886 if (!hdev->set_bdaddr)
6887 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6888 MGMT_STATUS_NOT_SUPPORTED);
6889
6890 hci_dev_lock(hdev);
6891
6892 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6893 bacpy(&hdev->public_addr, &cp->bdaddr);
6894
6895 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6896 if (err < 0)
6897 goto unlock;
6898
6899 if (!changed)
6900 goto unlock;
6901
6902 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6903 err = new_options(hdev, sk);
6904
6905 if (is_configured(hdev)) {
6906 mgmt_index_removed(hdev);
6907
6908 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6909
6910 hci_dev_set_flag(hdev, HCI_CONFIG);
6911 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6912
6913 queue_work(hdev->req_workqueue, &hdev->power_on);
6914 }
6915
6916 unlock:
6917 hci_dev_unlock(hdev);
6918 return err;
6919 }
6920
read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb)6921 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6922 u16 opcode, struct sk_buff *skb)
6923 {
6924 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6925 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6926 u8 *h192, *r192, *h256, *r256;
6927 struct mgmt_pending_cmd *cmd;
6928 u16 eir_len;
6929 int err;
6930
6931 bt_dev_dbg(hdev, "status %u", status);
6932
6933 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6934 if (!cmd)
6935 return;
6936
6937 mgmt_cp = cmd->param;
6938
6939 if (status) {
6940 status = mgmt_status(status);
6941 eir_len = 0;
6942
6943 h192 = NULL;
6944 r192 = NULL;
6945 h256 = NULL;
6946 r256 = NULL;
6947 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6948 struct hci_rp_read_local_oob_data *rp;
6949
6950 if (skb->len != sizeof(*rp)) {
6951 status = MGMT_STATUS_FAILED;
6952 eir_len = 0;
6953 } else {
6954 status = MGMT_STATUS_SUCCESS;
6955 rp = (void *)skb->data;
6956
6957 eir_len = 5 + 18 + 18;
6958 h192 = rp->hash;
6959 r192 = rp->rand;
6960 h256 = NULL;
6961 r256 = NULL;
6962 }
6963 } else {
6964 struct hci_rp_read_local_oob_ext_data *rp;
6965
6966 if (skb->len != sizeof(*rp)) {
6967 status = MGMT_STATUS_FAILED;
6968 eir_len = 0;
6969 } else {
6970 status = MGMT_STATUS_SUCCESS;
6971 rp = (void *)skb->data;
6972
6973 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6974 eir_len = 5 + 18 + 18;
6975 h192 = NULL;
6976 r192 = NULL;
6977 } else {
6978 eir_len = 5 + 18 + 18 + 18 + 18;
6979 h192 = rp->hash192;
6980 r192 = rp->rand192;
6981 }
6982
6983 h256 = rp->hash256;
6984 r256 = rp->rand256;
6985 }
6986 }
6987
6988 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6989 if (!mgmt_rp)
6990 goto done;
6991
6992 if (status)
6993 goto send_rsp;
6994
6995 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6996 hdev->dev_class, 3);
6997
6998 if (h192 && r192) {
6999 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7000 EIR_SSP_HASH_C192, h192, 16);
7001 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7002 EIR_SSP_RAND_R192, r192, 16);
7003 }
7004
7005 if (h256 && r256) {
7006 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7007 EIR_SSP_HASH_C256, h256, 16);
7008 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7009 EIR_SSP_RAND_R256, r256, 16);
7010 }
7011
7012 send_rsp:
7013 mgmt_rp->type = mgmt_cp->type;
7014 mgmt_rp->eir_len = cpu_to_le16(eir_len);
7015
7016 err = mgmt_cmd_complete(cmd->sk, hdev->id,
7017 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7018 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7019 if (err < 0 || status)
7020 goto done;
7021
7022 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7023
7024 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7025 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7026 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7027 done:
7028 kfree(mgmt_rp);
7029 mgmt_pending_remove(cmd);
7030 }
7031
read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk, struct mgmt_cp_read_local_oob_ext_data *cp)7032 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7033 struct mgmt_cp_read_local_oob_ext_data *cp)
7034 {
7035 struct mgmt_pending_cmd *cmd;
7036 struct hci_request req;
7037 int err;
7038
7039 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7040 cp, sizeof(*cp));
7041 if (!cmd)
7042 return -ENOMEM;
7043
7044 hci_req_init(&req, hdev);
7045
7046 if (bredr_sc_enabled(hdev))
7047 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
7048 else
7049 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
7050
7051 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
7052 if (err < 0) {
7053 mgmt_pending_remove(cmd);
7054 return err;
7055 }
7056
7057 return 0;
7058 }
7059
read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len)7060 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7061 void *data, u16 data_len)
7062 {
7063 struct mgmt_cp_read_local_oob_ext_data *cp = data;
7064 struct mgmt_rp_read_local_oob_ext_data *rp;
7065 size_t rp_len;
7066 u16 eir_len;
7067 u8 status, flags, role, addr[7], hash[16], rand[16];
7068 int err;
7069
7070 bt_dev_dbg(hdev, "sock %p", sk);
7071
7072 if (hdev_is_powered(hdev)) {
7073 switch (cp->type) {
7074 case BIT(BDADDR_BREDR):
7075 status = mgmt_bredr_support(hdev);
7076 if (status)
7077 eir_len = 0;
7078 else
7079 eir_len = 5;
7080 break;
7081 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7082 status = mgmt_le_support(hdev);
7083 if (status)
7084 eir_len = 0;
7085 else
7086 eir_len = 9 + 3 + 18 + 18 + 3;
7087 break;
7088 default:
7089 status = MGMT_STATUS_INVALID_PARAMS;
7090 eir_len = 0;
7091 break;
7092 }
7093 } else {
7094 status = MGMT_STATUS_NOT_POWERED;
7095 eir_len = 0;
7096 }
7097
7098 rp_len = sizeof(*rp) + eir_len;
7099 rp = kmalloc(rp_len, GFP_ATOMIC);
7100 if (!rp)
7101 return -ENOMEM;
7102
7103 if (status)
7104 goto complete;
7105
7106 hci_dev_lock(hdev);
7107
7108 eir_len = 0;
7109 switch (cp->type) {
7110 case BIT(BDADDR_BREDR):
7111 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7112 err = read_local_ssp_oob_req(hdev, sk, cp);
7113 hci_dev_unlock(hdev);
7114 if (!err)
7115 goto done;
7116
7117 status = MGMT_STATUS_FAILED;
7118 goto complete;
7119 } else {
7120 eir_len = eir_append_data(rp->eir, eir_len,
7121 EIR_CLASS_OF_DEV,
7122 hdev->dev_class, 3);
7123 }
7124 break;
7125 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7126 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7127 smp_generate_oob(hdev, hash, rand) < 0) {
7128 hci_dev_unlock(hdev);
7129 status = MGMT_STATUS_FAILED;
7130 goto complete;
7131 }
7132
7133 /* This should return the active RPA, but since the RPA
7134 * is only programmed on demand, it is really hard to fill
7135 * this in at the moment. For now disallow retrieving
7136 * local out-of-band data when privacy is in use.
7137 *
7138 * Returning the identity address will not help here since
7139 * pairing happens before the identity resolving key is
7140 * known and thus the connection establishment happens
7141 * based on the RPA and not the identity address.
7142 */
7143 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7144 hci_dev_unlock(hdev);
7145 status = MGMT_STATUS_REJECTED;
7146 goto complete;
7147 }
7148
7149 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7150 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7151 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7152 bacmp(&hdev->static_addr, BDADDR_ANY))) {
7153 memcpy(addr, &hdev->static_addr, 6);
7154 addr[6] = 0x01;
7155 } else {
7156 memcpy(addr, &hdev->bdaddr, 6);
7157 addr[6] = 0x00;
7158 }
7159
7160 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7161 addr, sizeof(addr));
7162
7163 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7164 role = 0x02;
7165 else
7166 role = 0x01;
7167
7168 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7169 &role, sizeof(role));
7170
7171 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7172 eir_len = eir_append_data(rp->eir, eir_len,
7173 EIR_LE_SC_CONFIRM,
7174 hash, sizeof(hash));
7175
7176 eir_len = eir_append_data(rp->eir, eir_len,
7177 EIR_LE_SC_RANDOM,
7178 rand, sizeof(rand));
7179 }
7180
7181 flags = mgmt_get_adv_discov_flags(hdev);
7182
7183 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7184 flags |= LE_AD_NO_BREDR;
7185
7186 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7187 &flags, sizeof(flags));
7188 break;
7189 }
7190
7191 hci_dev_unlock(hdev);
7192
7193 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7194
7195 status = MGMT_STATUS_SUCCESS;
7196
7197 complete:
7198 rp->type = cp->type;
7199 rp->eir_len = cpu_to_le16(eir_len);
7200
7201 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7202 status, rp, sizeof(*rp) + eir_len);
7203 if (err < 0 || status)
7204 goto done;
7205
7206 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7207 rp, sizeof(*rp) + eir_len,
7208 HCI_MGMT_OOB_DATA_EVENTS, sk);
7209
7210 done:
7211 kfree(rp);
7212
7213 return err;
7214 }
7215
get_supported_adv_flags(struct hci_dev *hdev)7216 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7217 {
7218 u32 flags = 0;
7219
7220 flags |= MGMT_ADV_FLAG_CONNECTABLE;
7221 flags |= MGMT_ADV_FLAG_DISCOV;
7222 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7223 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7224 flags |= MGMT_ADV_FLAG_APPEARANCE;
7225 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7226
7227 /* In extended adv TX_POWER returned from Set Adv Param
7228 * will be always valid.
7229 */
7230 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7231 ext_adv_capable(hdev))
7232 flags |= MGMT_ADV_FLAG_TX_POWER;
7233
7234 if (ext_adv_capable(hdev)) {
7235 flags |= MGMT_ADV_FLAG_SEC_1M;
7236 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7237 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7238
7239 if (hdev->le_features[1] & HCI_LE_PHY_2M)
7240 flags |= MGMT_ADV_FLAG_SEC_2M;
7241
7242 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7243 flags |= MGMT_ADV_FLAG_SEC_CODED;
7244 }
7245
7246 return flags;
7247 }
7248
read_adv_features(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len)7249 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7250 void *data, u16 data_len)
7251 {
7252 struct mgmt_rp_read_adv_features *rp;
7253 size_t rp_len;
7254 int err;
7255 struct adv_info *adv_instance;
7256 u32 supported_flags;
7257 u8 *instance;
7258
7259 bt_dev_dbg(hdev, "sock %p", sk);
7260
7261 if (!lmp_le_capable(hdev))
7262 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7263 MGMT_STATUS_REJECTED);
7264
7265 /* Enabling the experimental LL Privay support disables support for
7266 * advertising.
7267 */
7268 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7269 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7270 MGMT_STATUS_NOT_SUPPORTED);
7271
7272 hci_dev_lock(hdev);
7273
7274 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7275 rp = kmalloc(rp_len, GFP_ATOMIC);
7276 if (!rp) {
7277 hci_dev_unlock(hdev);
7278 return -ENOMEM;
7279 }
7280
7281 supported_flags = get_supported_adv_flags(hdev);
7282
7283 rp->supported_flags = cpu_to_le32(supported_flags);
7284 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7285 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7286 rp->max_instances = hdev->le_num_of_adv_sets;
7287 rp->num_instances = hdev->adv_instance_cnt;
7288
7289 instance = rp->instance;
7290 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7291 *instance = adv_instance->instance;
7292 instance++;
7293 }
7294
7295 hci_dev_unlock(hdev);
7296
7297 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7298 MGMT_STATUS_SUCCESS, rp, rp_len);
7299
7300 kfree(rp);
7301
7302 return err;
7303 }
7304
calculate_name_len(struct hci_dev *hdev)7305 static u8 calculate_name_len(struct hci_dev *hdev)
7306 {
7307 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7308
7309 return append_local_name(hdev, buf, 0);
7310 }
7311
tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags, bool is_adv_data)7312 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7313 bool is_adv_data)
7314 {
7315 u8 max_len = HCI_MAX_AD_LENGTH;
7316
7317 if (is_adv_data) {
7318 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7319 MGMT_ADV_FLAG_LIMITED_DISCOV |
7320 MGMT_ADV_FLAG_MANAGED_FLAGS))
7321 max_len -= 3;
7322
7323 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7324 max_len -= 3;
7325 } else {
7326 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7327 max_len -= calculate_name_len(hdev);
7328
7329 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7330 max_len -= 4;
7331 }
7332
7333 return max_len;
7334 }
7335
flags_managed(u32 adv_flags)7336 static bool flags_managed(u32 adv_flags)
7337 {
7338 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7339 MGMT_ADV_FLAG_LIMITED_DISCOV |
7340 MGMT_ADV_FLAG_MANAGED_FLAGS);
7341 }
7342
tx_power_managed(u32 adv_flags)7343 static bool tx_power_managed(u32 adv_flags)
7344 {
7345 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7346 }
7347
name_managed(u32 adv_flags)7348 static bool name_managed(u32 adv_flags)
7349 {
7350 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7351 }
7352
appearance_managed(u32 adv_flags)7353 static bool appearance_managed(u32 adv_flags)
7354 {
7355 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7356 }
7357
tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data, u8 len, bool is_adv_data)7358 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7359 u8 len, bool is_adv_data)
7360 {
7361 int i, cur_len;
7362 u8 max_len;
7363
7364 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7365
7366 if (len > max_len)
7367 return false;
7368
7369 /* Make sure that the data is correctly formatted. */
7370 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7371 cur_len = data[i];
7372
7373 if (!cur_len)
7374 continue;
7375
7376 if (data[i + 1] == EIR_FLAGS &&
7377 (!is_adv_data || flags_managed(adv_flags)))
7378 return false;
7379
7380 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7381 return false;
7382
7383 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7384 return false;
7385
7386 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7387 return false;
7388
7389 if (data[i + 1] == EIR_APPEARANCE &&
7390 appearance_managed(adv_flags))
7391 return false;
7392
7393 /* If the current field length would exceed the total data
7394 * length, then it's invalid.
7395 */
7396 if (i + cur_len >= len)
7397 return false;
7398 }
7399
7400 return true;
7401 }
7402
add_advertising_complete(struct hci_dev *hdev, u8 status, u16 opcode)7403 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
7404 u16 opcode)
7405 {
7406 struct mgmt_pending_cmd *cmd;
7407 struct mgmt_cp_add_advertising *cp;
7408 struct mgmt_rp_add_advertising rp;
7409 struct adv_info *adv_instance, *n;
7410 u8 instance;
7411
7412 bt_dev_dbg(hdev, "status %d", status);
7413
7414 hci_dev_lock(hdev);
7415
7416 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
7417
7418 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
7419 if (!adv_instance->pending)
7420 continue;
7421
7422 if (!status) {
7423 adv_instance->pending = false;
7424 continue;
7425 }
7426
7427 instance = adv_instance->instance;
7428
7429 if (hdev->cur_adv_instance == instance)
7430 cancel_adv_timeout(hdev);
7431
7432 hci_remove_adv_instance(hdev, instance);
7433 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7434 }
7435
7436 if (!cmd)
7437 goto unlock;
7438
7439 cp = cmd->param;
7440 rp.instance = cp->instance;
7441
7442 if (status)
7443 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7444 mgmt_status(status));
7445 else
7446 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7447 mgmt_status(status), &rp, sizeof(rp));
7448
7449 mgmt_pending_remove(cmd);
7450
7451 unlock:
7452 hci_dev_unlock(hdev);
7453 }
7454
add_advertising(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len)7455 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7456 void *data, u16 data_len)
7457 {
7458 struct mgmt_cp_add_advertising *cp = data;
7459 struct mgmt_rp_add_advertising rp;
7460 u32 flags;
7461 u32 supported_flags, phy_flags;
7462 u8 status;
7463 u16 timeout, duration;
7464 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7465 u8 schedule_instance = 0;
7466 struct adv_info *next_instance;
7467 int err;
7468 struct mgmt_pending_cmd *cmd;
7469 struct hci_request req;
7470
7471 bt_dev_dbg(hdev, "sock %p", sk);
7472
7473 status = mgmt_le_support(hdev);
7474 if (status)
7475 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7476 status);
7477
7478 /* Enabling the experimental LL Privay support disables support for
7479 * advertising.
7480 */
7481 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7482 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7483 MGMT_STATUS_NOT_SUPPORTED);
7484
7485 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7486 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7487 MGMT_STATUS_INVALID_PARAMS);
7488
7489 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7490 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7491 MGMT_STATUS_INVALID_PARAMS);
7492
7493 flags = __le32_to_cpu(cp->flags);
7494 timeout = __le16_to_cpu(cp->timeout);
7495 duration = __le16_to_cpu(cp->duration);
7496
7497 /* The current implementation only supports a subset of the specified
7498 * flags. Also need to check mutual exclusiveness of sec flags.
7499 */
7500 supported_flags = get_supported_adv_flags(hdev);
7501 phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
7502 if (flags & ~supported_flags ||
7503 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7504 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7505 MGMT_STATUS_INVALID_PARAMS);
7506
7507 hci_dev_lock(hdev);
7508
7509 if (timeout && !hdev_is_powered(hdev)) {
7510 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7511 MGMT_STATUS_REJECTED);
7512 goto unlock;
7513 }
7514
7515 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7516 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7517 pending_find(MGMT_OP_SET_LE, hdev)) {
7518 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7519 MGMT_STATUS_BUSY);
7520 goto unlock;
7521 }
7522
7523 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7524 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7525 cp->scan_rsp_len, false)) {
7526 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7527 MGMT_STATUS_INVALID_PARAMS);
7528 goto unlock;
7529 }
7530
7531 err = hci_add_adv_instance(hdev, cp->instance, flags,
7532 cp->adv_data_len, cp->data,
7533 cp->scan_rsp_len,
7534 cp->data + cp->adv_data_len,
7535 timeout, duration);
7536 if (err < 0) {
7537 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7538 MGMT_STATUS_FAILED);
7539 goto unlock;
7540 }
7541
7542 /* Only trigger an advertising added event if a new instance was
7543 * actually added.
7544 */
7545 if (hdev->adv_instance_cnt > prev_instance_cnt)
7546 mgmt_advertising_added(sk, hdev, cp->instance);
7547
7548 if (hdev->cur_adv_instance == cp->instance) {
7549 /* If the currently advertised instance is being changed then
7550 * cancel the current advertising and schedule the next
7551 * instance. If there is only one instance then the overridden
7552 * advertising data will be visible right away.
7553 */
7554 cancel_adv_timeout(hdev);
7555
7556 next_instance = hci_get_next_instance(hdev, cp->instance);
7557 if (next_instance)
7558 schedule_instance = next_instance->instance;
7559 } else if (!hdev->adv_instance_timeout) {
7560 /* Immediately advertise the new instance if no other
7561 * instance is currently being advertised.
7562 */
7563 schedule_instance = cp->instance;
7564 }
7565
7566 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
7567 * there is no instance to be advertised then we have no HCI
7568 * communication to make. Simply return.
7569 */
7570 if (!hdev_is_powered(hdev) ||
7571 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7572 !schedule_instance) {
7573 rp.instance = cp->instance;
7574 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7575 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7576 goto unlock;
7577 }
7578
7579 /* We're good to go, update advertising data, parameters, and start
7580 * advertising.
7581 */
7582 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7583 data_len);
7584 if (!cmd) {
7585 err = -ENOMEM;
7586 goto unlock;
7587 }
7588
7589 hci_req_init(&req, hdev);
7590
7591 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
7592
7593 if (!err)
7594 err = hci_req_run(&req, add_advertising_complete);
7595
7596 if (err < 0) {
7597 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7598 MGMT_STATUS_FAILED);
7599 mgmt_pending_remove(cmd);
7600 }
7601
7602 unlock:
7603 hci_dev_unlock(hdev);
7604
7605 return err;
7606 }
7607
remove_advertising_complete(struct hci_dev *hdev, u8 status, u16 opcode)7608 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
7609 u16 opcode)
7610 {
7611 struct mgmt_pending_cmd *cmd;
7612 struct mgmt_cp_remove_advertising *cp;
7613 struct mgmt_rp_remove_advertising rp;
7614
7615 bt_dev_dbg(hdev, "status %d", status);
7616
7617 hci_dev_lock(hdev);
7618
7619 /* A failure status here only means that we failed to disable
7620 * advertising. Otherwise, the advertising instance has been removed,
7621 * so report success.
7622 */
7623 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
7624 if (!cmd)
7625 goto unlock;
7626
7627 cp = cmd->param;
7628 rp.instance = cp->instance;
7629
7630 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
7631 &rp, sizeof(rp));
7632 mgmt_pending_remove(cmd);
7633
7634 unlock:
7635 hci_dev_unlock(hdev);
7636 }
7637
remove_advertising(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len)7638 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
7639 void *data, u16 data_len)
7640 {
7641 struct mgmt_cp_remove_advertising *cp = data;
7642 struct mgmt_rp_remove_advertising rp;
7643 struct mgmt_pending_cmd *cmd;
7644 struct hci_request req;
7645 int err;
7646
7647 bt_dev_dbg(hdev, "sock %p", sk);
7648
7649 /* Enabling the experimental LL Privay support disables support for
7650 * advertising.
7651 */
7652 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7653 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7654 MGMT_STATUS_NOT_SUPPORTED);
7655
7656 hci_dev_lock(hdev);
7657
7658 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
7659 err = mgmt_cmd_status(sk, hdev->id,
7660 MGMT_OP_REMOVE_ADVERTISING,
7661 MGMT_STATUS_INVALID_PARAMS);
7662 goto unlock;
7663 }
7664
7665 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7666 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7667 pending_find(MGMT_OP_SET_LE, hdev)) {
7668 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7669 MGMT_STATUS_BUSY);
7670 goto unlock;
7671 }
7672
7673 if (list_empty(&hdev->adv_instances)) {
7674 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7675 MGMT_STATUS_INVALID_PARAMS);
7676 goto unlock;
7677 }
7678
7679 hci_req_init(&req, hdev);
7680
7681 /* If we use extended advertising, instance is disabled and removed */
7682 if (ext_adv_capable(hdev)) {
7683 __hci_req_disable_ext_adv_instance(&req, cp->instance);
7684 __hci_req_remove_ext_adv_instance(&req, cp->instance);
7685 }
7686
7687 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
7688
7689 if (list_empty(&hdev->adv_instances))
7690 __hci_req_disable_advertising(&req);
7691
7692 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
7693 * flag is set or the device isn't powered then we have no HCI
7694 * communication to make. Simply return.
7695 */
7696 if (skb_queue_empty(&req.cmd_q) ||
7697 !hdev_is_powered(hdev) ||
7698 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7699 hci_req_purge(&req);
7700 rp.instance = cp->instance;
7701 err = mgmt_cmd_complete(sk, hdev->id,
7702 MGMT_OP_REMOVE_ADVERTISING,
7703 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7704 goto unlock;
7705 }
7706
7707 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
7708 data_len);
7709 if (!cmd) {
7710 err = -ENOMEM;
7711 goto unlock;
7712 }
7713
7714 err = hci_req_run(&req, remove_advertising_complete);
7715 if (err < 0)
7716 mgmt_pending_remove(cmd);
7717
7718 unlock:
7719 hci_dev_unlock(hdev);
7720
7721 return err;
7722 }
7723
get_adv_size_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len)7724 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
7725 void *data, u16 data_len)
7726 {
7727 struct mgmt_cp_get_adv_size_info *cp = data;
7728 struct mgmt_rp_get_adv_size_info rp;
7729 u32 flags, supported_flags;
7730 int err;
7731
7732 bt_dev_dbg(hdev, "sock %p", sk);
7733
7734 if (!lmp_le_capable(hdev))
7735 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7736 MGMT_STATUS_REJECTED);
7737
7738 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7739 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7740 MGMT_STATUS_INVALID_PARAMS);
7741
7742 flags = __le32_to_cpu(cp->flags);
7743
7744 /* The current implementation only supports a subset of the specified
7745 * flags.
7746 */
7747 supported_flags = get_supported_adv_flags(hdev);
7748 if (flags & ~supported_flags)
7749 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7750 MGMT_STATUS_INVALID_PARAMS);
7751
7752 rp.instance = cp->instance;
7753 rp.flags = cp->flags;
7754 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
7755 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7756
7757 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7758 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7759
7760 return err;
7761 }
7762
7763 static const struct hci_mgmt_handler mgmt_handlers[] = {
7764 { NULL }, /* 0x0000 (no command) */
7765 { read_version, MGMT_READ_VERSION_SIZE,
7766 HCI_MGMT_NO_HDEV |
7767 HCI_MGMT_UNTRUSTED },
7768 { read_commands, MGMT_READ_COMMANDS_SIZE,
7769 HCI_MGMT_NO_HDEV |
7770 HCI_MGMT_UNTRUSTED },
7771 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
7772 HCI_MGMT_NO_HDEV |
7773 HCI_MGMT_UNTRUSTED },
7774 { read_controller_info, MGMT_READ_INFO_SIZE,
7775 HCI_MGMT_UNTRUSTED },
7776 { set_powered, MGMT_SETTING_SIZE },
7777 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
7778 { set_connectable, MGMT_SETTING_SIZE },
7779 { set_fast_connectable, MGMT_SETTING_SIZE },
7780 { set_bondable, MGMT_SETTING_SIZE },
7781 { set_link_security, MGMT_SETTING_SIZE },
7782 { set_ssp, MGMT_SETTING_SIZE },
7783 { set_hs, MGMT_SETTING_SIZE },
7784 { set_le, MGMT_SETTING_SIZE },
7785 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
7786 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
7787 { add_uuid, MGMT_ADD_UUID_SIZE },
7788 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
7789 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
7790 HCI_MGMT_VAR_LEN },
7791 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
7792 HCI_MGMT_VAR_LEN },
7793 { disconnect, MGMT_DISCONNECT_SIZE },
7794 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
7795 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
7796 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
7797 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
7798 { pair_device, MGMT_PAIR_DEVICE_SIZE },
7799 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
7800 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
7801 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
7802 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
7803 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
7804 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7805 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
7806 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
7807 HCI_MGMT_VAR_LEN },
7808 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
7809 { start_discovery, MGMT_START_DISCOVERY_SIZE },
7810 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
7811 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
7812 { block_device, MGMT_BLOCK_DEVICE_SIZE },
7813 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
7814 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
7815 { set_advertising, MGMT_SETTING_SIZE },
7816 { set_bredr, MGMT_SETTING_SIZE },
7817 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
7818 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
7819 { set_secure_conn, MGMT_SETTING_SIZE },
7820 { set_debug_keys, MGMT_SETTING_SIZE },
7821 { set_privacy, MGMT_SET_PRIVACY_SIZE },
7822 { load_irks, MGMT_LOAD_IRKS_SIZE,
7823 HCI_MGMT_VAR_LEN },
7824 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
7825 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
7826 { add_device, MGMT_ADD_DEVICE_SIZE },
7827 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
7828 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
7829 HCI_MGMT_VAR_LEN },
7830 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7831 HCI_MGMT_NO_HDEV |
7832 HCI_MGMT_UNTRUSTED },
7833 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
7834 HCI_MGMT_UNCONFIGURED |
7835 HCI_MGMT_UNTRUSTED },
7836 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
7837 HCI_MGMT_UNCONFIGURED },
7838 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
7839 HCI_MGMT_UNCONFIGURED },
7840 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
7841 HCI_MGMT_VAR_LEN },
7842 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7843 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
7844 HCI_MGMT_NO_HDEV |
7845 HCI_MGMT_UNTRUSTED },
7846 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
7847 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
7848 HCI_MGMT_VAR_LEN },
7849 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
7850 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
7851 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
7852 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
7853 HCI_MGMT_UNTRUSTED },
7854 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
7855 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
7856 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
7857 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
7858 HCI_MGMT_VAR_LEN },
7859 { set_wideband_speech, MGMT_SETTING_SIZE },
7860 { read_security_info, MGMT_READ_SECURITY_INFO_SIZE,
7861 HCI_MGMT_UNTRUSTED },
7862 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
7863 HCI_MGMT_UNTRUSTED |
7864 HCI_MGMT_HDEV_OPTIONAL },
7865 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
7866 HCI_MGMT_VAR_LEN |
7867 HCI_MGMT_HDEV_OPTIONAL },
7868 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
7869 HCI_MGMT_UNTRUSTED },
7870 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
7871 HCI_MGMT_VAR_LEN },
7872 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
7873 HCI_MGMT_UNTRUSTED },
7874 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
7875 HCI_MGMT_VAR_LEN },
7876 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
7877 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
7878 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
7879 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
7880 HCI_MGMT_VAR_LEN },
7881 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
7882 };
7883
mgmt_index_added(struct hci_dev *hdev)7884 void mgmt_index_added(struct hci_dev *hdev)
7885 {
7886 struct mgmt_ev_ext_index ev;
7887
7888 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7889 return;
7890
7891 switch (hdev->dev_type) {
7892 case HCI_PRIMARY:
7893 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7894 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7895 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7896 ev.type = 0x01;
7897 } else {
7898 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7899 HCI_MGMT_INDEX_EVENTS);
7900 ev.type = 0x00;
7901 }
7902 break;
7903 case HCI_AMP:
7904 ev.type = 0x02;
7905 break;
7906 default:
7907 return;
7908 }
7909
7910 ev.bus = hdev->bus;
7911
7912 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7913 HCI_MGMT_EXT_INDEX_EVENTS);
7914 }
7915
mgmt_index_removed(struct hci_dev *hdev)7916 void mgmt_index_removed(struct hci_dev *hdev)
7917 {
7918 struct mgmt_ev_ext_index ev;
7919 u8 status = MGMT_STATUS_INVALID_INDEX;
7920
7921 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7922 return;
7923
7924 switch (hdev->dev_type) {
7925 case HCI_PRIMARY:
7926 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7927
7928 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7929 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7930 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7931 ev.type = 0x01;
7932 } else {
7933 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7934 HCI_MGMT_INDEX_EVENTS);
7935 ev.type = 0x00;
7936 }
7937 break;
7938 case HCI_AMP:
7939 ev.type = 0x02;
7940 break;
7941 default:
7942 return;
7943 }
7944
7945 ev.bus = hdev->bus;
7946
7947 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7948 HCI_MGMT_EXT_INDEX_EVENTS);
7949 }
7950
7951 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev *hdev)7952 static void restart_le_actions(struct hci_dev *hdev)
7953 {
7954 struct hci_conn_params *p;
7955
7956 list_for_each_entry(p, &hdev->le_conn_params, list) {
7957 /* Needed for AUTO_OFF case where might not "really"
7958 * have been powered off.
7959 */
7960 list_del_init(&p->action);
7961
7962 switch (p->auto_connect) {
7963 case HCI_AUTO_CONN_DIRECT:
7964 case HCI_AUTO_CONN_ALWAYS:
7965 list_add(&p->action, &hdev->pend_le_conns);
7966 break;
7967 case HCI_AUTO_CONN_REPORT:
7968 list_add(&p->action, &hdev->pend_le_reports);
7969 break;
7970 default:
7971 break;
7972 }
7973 }
7974 }
7975
mgmt_power_on(struct hci_dev *hdev, int err)7976 void mgmt_power_on(struct hci_dev *hdev, int err)
7977 {
7978 struct cmd_lookup match = { NULL, hdev };
7979
7980 bt_dev_dbg(hdev, "err %d", err);
7981
7982 hci_dev_lock(hdev);
7983
7984 if (!err) {
7985 restart_le_actions(hdev);
7986 hci_update_background_scan(hdev);
7987 }
7988
7989 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7990
7991 new_settings(hdev, match.sk);
7992
7993 if (match.sk)
7994 sock_put(match.sk);
7995
7996 hci_dev_unlock(hdev);
7997 }
7998
__mgmt_power_off(struct hci_dev *hdev)7999 void __mgmt_power_off(struct hci_dev *hdev)
8000 {
8001 struct cmd_lookup match = { NULL, hdev };
8002 u8 status, zero_cod[] = { 0, 0, 0 };
8003
8004 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8005
8006 /* If the power off is because of hdev unregistration let
8007 * use the appropriate INVALID_INDEX status. Otherwise use
8008 * NOT_POWERED. We cover both scenarios here since later in
8009 * mgmt_index_removed() any hci_conn callbacks will have already
8010 * been triggered, potentially causing misleading DISCONNECTED
8011 * status responses.
8012 */
8013 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8014 status = MGMT_STATUS_INVALID_INDEX;
8015 else
8016 status = MGMT_STATUS_NOT_POWERED;
8017
8018 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8019
8020 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8021 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8022 zero_cod, sizeof(zero_cod),
8023 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8024 ext_info_changed(hdev, NULL);
8025 }
8026
8027 new_settings(hdev, match.sk);
8028
8029 if (match.sk)
8030 sock_put(match.sk);
8031 }
8032
mgmt_set_powered_failed(struct hci_dev *hdev, int err)8033 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8034 {
8035 struct mgmt_pending_cmd *cmd;
8036 u8 status;
8037
8038 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8039 if (!cmd)
8040 return;
8041
8042 if (err == -ERFKILL)
8043 status = MGMT_STATUS_RFKILLED;
8044 else
8045 status = MGMT_STATUS_FAILED;
8046
8047 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8048
8049 mgmt_pending_remove(cmd);
8050 }
8051
mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent)8052 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8053 bool persistent)
8054 {
8055 struct mgmt_ev_new_link_key ev;
8056
8057 memset(&ev, 0, sizeof(ev));
8058
8059 ev.store_hint = persistent;
8060 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8061 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
8062 ev.key.type = key->type;
8063 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8064 ev.key.pin_len = key->pin_len;
8065
8066 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8067 }
8068
mgmt_ltk_type(struct smp_ltk *ltk)8069 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8070 {
8071 switch (ltk->type) {
8072 case SMP_LTK:
8073 case SMP_LTK_RESPONDER:
8074 if (ltk->authenticated)
8075 return MGMT_LTK_AUTHENTICATED;
8076 return MGMT_LTK_UNAUTHENTICATED;
8077 case SMP_LTK_P256:
8078 if (ltk->authenticated)
8079 return MGMT_LTK_P256_AUTH;
8080 return MGMT_LTK_P256_UNAUTH;
8081 case SMP_LTK_P256_DEBUG:
8082 return MGMT_LTK_P256_DEBUG;
8083 }
8084
8085 return MGMT_LTK_UNAUTHENTICATED;
8086 }
8087
mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)8088 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8089 {
8090 struct mgmt_ev_new_long_term_key ev;
8091
8092 memset(&ev, 0, sizeof(ev));
8093
8094 /* Devices using resolvable or non-resolvable random addresses
8095 * without providing an identity resolving key don't require
8096 * to store long term keys. Their addresses will change the
8097 * next time around.
8098 *
8099 * Only when a remote device provides an identity address
8100 * make sure the long term key is stored. If the remote
8101 * identity is known, the long term keys are internally
8102 * mapped to the identity address. So allow static random
8103 * and public addresses here.
8104 */
8105 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8106 (key->bdaddr.b[5] & 0xc0) != 0xc0)
8107 ev.store_hint = 0x00;
8108 else
8109 ev.store_hint = persistent;
8110
8111 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8112 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
8113 ev.key.type = mgmt_ltk_type(key);
8114 ev.key.enc_size = key->enc_size;
8115 ev.key.ediv = key->ediv;
8116 ev.key.rand = key->rand;
8117
8118 if (key->type == SMP_LTK)
8119 ev.key.initiator = 1;
8120
8121 /* Make sure we copy only the significant bytes based on the
8122 * encryption key size, and set the rest of the value to zeroes.
8123 */
8124 memcpy(ev.key.val, key->val, key->enc_size);
8125 memset(ev.key.val + key->enc_size, 0,
8126 sizeof(ev.key.val) - key->enc_size);
8127
8128 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8129 }
8130
mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)8131 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8132 {
8133 struct mgmt_ev_new_irk ev;
8134
8135 memset(&ev, 0, sizeof(ev));
8136
8137 ev.store_hint = persistent;
8138
8139 bacpy(&ev.rpa, &irk->rpa);
8140 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8141 ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
8142 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8143
8144 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8145 }
8146
mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk, bool persistent)8147 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8148 bool persistent)
8149 {
8150 struct mgmt_ev_new_csrk ev;
8151
8152 memset(&ev, 0, sizeof(ev));
8153
8154 /* Devices using resolvable or non-resolvable random addresses
8155 * without providing an identity resolving key don't require
8156 * to store signature resolving keys. Their addresses will change
8157 * the next time around.
8158 *
8159 * Only when a remote device provides an identity address
8160 * make sure the signature resolving key is stored. So allow
8161 * static random and public addresses here.
8162 */
8163 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8164 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8165 ev.store_hint = 0x00;
8166 else
8167 ev.store_hint = persistent;
8168
8169 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8170 ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
8171 ev.key.type = csrk->type;
8172 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8173
8174 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8175 }
8176
mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type, u8 store_hint, u16 min_interval, u16 max_interval, u16 latency, u16 timeout)8177 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8178 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8179 u16 max_interval, u16 latency, u16 timeout)
8180 {
8181 struct mgmt_ev_new_conn_param ev;
8182
8183 if (!hci_is_identity_address(bdaddr, bdaddr_type))
8184 return;
8185
8186 memset(&ev, 0, sizeof(ev));
8187 bacpy(&ev.addr.bdaddr, bdaddr);
8188 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8189 ev.store_hint = store_hint;
8190 ev.min_interval = cpu_to_le16(min_interval);
8191 ev.max_interval = cpu_to_le16(max_interval);
8192 ev.latency = cpu_to_le16(latency);
8193 ev.timeout = cpu_to_le16(timeout);
8194
8195 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
8196 }
8197
mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, u32 flags, u8 *name, u8 name_len)8198 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
8199 u32 flags, u8 *name, u8 name_len)
8200 {
8201 char buf[512];
8202 struct mgmt_ev_device_connected *ev = (void *) buf;
8203 u16 eir_len = 0;
8204
8205 bacpy(&ev->addr.bdaddr, &conn->dst);
8206 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8207
8208 ev->flags = __cpu_to_le32(flags);
8209
8210 /* We must ensure that the EIR Data fields are ordered and
8211 * unique. Keep it simple for now and avoid the problem by not
8212 * adding any BR/EDR data to the LE adv.
8213 */
8214 if (conn->le_adv_data_len > 0) {
8215 memcpy(&ev->eir[eir_len],
8216 conn->le_adv_data, conn->le_adv_data_len);
8217 eir_len = conn->le_adv_data_len;
8218 } else {
8219 if (name_len > 0)
8220 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
8221 name, name_len);
8222
8223 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
8224 eir_len = eir_append_data(ev->eir, eir_len,
8225 EIR_CLASS_OF_DEV,
8226 conn->dev_class, 3);
8227 }
8228
8229 ev->eir_len = cpu_to_le16(eir_len);
8230
8231 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
8232 sizeof(*ev) + eir_len, NULL);
8233 }
8234
disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)8235 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
8236 {
8237 struct sock **sk = data;
8238
8239 cmd->cmd_complete(cmd, 0);
8240
8241 *sk = cmd->sk;
8242 sock_hold(*sk);
8243
8244 mgmt_pending_remove(cmd);
8245 }
8246
unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)8247 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
8248 {
8249 struct hci_dev *hdev = data;
8250 struct mgmt_cp_unpair_device *cp = cmd->param;
8251
8252 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
8253
8254 cmd->cmd_complete(cmd, 0);
8255 mgmt_pending_remove(cmd);
8256 }
8257
mgmt_powering_down(struct hci_dev *hdev)8258 bool mgmt_powering_down(struct hci_dev *hdev)
8259 {
8260 struct mgmt_pending_cmd *cmd;
8261 struct mgmt_mode *cp;
8262
8263 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8264 if (!cmd)
8265 return false;
8266
8267 cp = cmd->param;
8268 if (!cp->val)
8269 return true;
8270
8271 return false;
8272 }
8273
mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 reason, bool mgmt_connected)8274 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
8275 u8 link_type, u8 addr_type, u8 reason,
8276 bool mgmt_connected)
8277 {
8278 struct mgmt_ev_device_disconnected ev;
8279 struct sock *sk = NULL;
8280
8281 /* The connection is still in hci_conn_hash so test for 1
8282 * instead of 0 to know if this is the last one.
8283 */
8284 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8285 cancel_delayed_work(&hdev->power_off);
8286 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8287 }
8288
8289 if (!mgmt_connected)
8290 return;
8291
8292 if (link_type != ACL_LINK && link_type != LE_LINK)
8293 return;
8294
8295 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
8296
8297 bacpy(&ev.addr.bdaddr, bdaddr);
8298 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8299 ev.reason = reason;
8300
8301 /* Report disconnects due to suspend */
8302 if (hdev->suspended)
8303 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
8304
8305 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
8306
8307 if (sk)
8308 sock_put(sk);
8309
8310 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8311 hdev);
8312 }
8313
mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status)8314 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8315 u8 link_type, u8 addr_type, u8 status)
8316 {
8317 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
8318 struct mgmt_cp_disconnect *cp;
8319 struct mgmt_pending_cmd *cmd;
8320
8321 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8322 hdev);
8323
8324 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
8325 if (!cmd)
8326 return;
8327
8328 cp = cmd->param;
8329
8330 if (bacmp(bdaddr, &cp->addr.bdaddr))
8331 return;
8332
8333 if (cp->addr.type != bdaddr_type)
8334 return;
8335
8336 cmd->cmd_complete(cmd, mgmt_status(status));
8337 mgmt_pending_remove(cmd);
8338 }
8339
mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status)8340 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8341 u8 addr_type, u8 status)
8342 {
8343 struct mgmt_ev_connect_failed ev;
8344
8345 /* The connection is still in hci_conn_hash so test for 1
8346 * instead of 0 to know if this is the last one.
8347 */
8348 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8349 cancel_delayed_work(&hdev->power_off);
8350 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8351 }
8352
8353 bacpy(&ev.addr.bdaddr, bdaddr);
8354 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8355 ev.status = mgmt_status(status);
8356
8357 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
8358 }
8359
mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)8360 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
8361 {
8362 struct mgmt_ev_pin_code_request ev;
8363
8364 bacpy(&ev.addr.bdaddr, bdaddr);
8365 ev.addr.type = BDADDR_BREDR;
8366 ev.secure = secure;
8367
8368 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
8369 }
8370
mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status)8371 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8372 u8 status)
8373 {
8374 struct mgmt_pending_cmd *cmd;
8375
8376 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
8377 if (!cmd)
8378 return;
8379
8380 cmd->cmd_complete(cmd, mgmt_status(status));
8381 mgmt_pending_remove(cmd);
8382 }
8383
mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status)8384 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8385 u8 status)
8386 {
8387 struct mgmt_pending_cmd *cmd;
8388
8389 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
8390 if (!cmd)
8391 return;
8392
8393 cmd->cmd_complete(cmd, mgmt_status(status));
8394 mgmt_pending_remove(cmd);
8395 }
8396
mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u32 value, u8 confirm_hint)8397 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8398 u8 link_type, u8 addr_type, u32 value,
8399 u8 confirm_hint)
8400 {
8401 struct mgmt_ev_user_confirm_request ev;
8402
8403 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8404
8405 bacpy(&ev.addr.bdaddr, bdaddr);
8406 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8407 ev.confirm_hint = confirm_hint;
8408 ev.value = cpu_to_le32(value);
8409
8410 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
8411 NULL);
8412 }
8413
mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type)8414 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8415 u8 link_type, u8 addr_type)
8416 {
8417 struct mgmt_ev_user_passkey_request ev;
8418
8419 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8420
8421 bacpy(&ev.addr.bdaddr, bdaddr);
8422 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8423
8424 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
8425 NULL);
8426 }
8427
user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status, u8 opcode)8428 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8429 u8 link_type, u8 addr_type, u8 status,
8430 u8 opcode)
8431 {
8432 struct mgmt_pending_cmd *cmd;
8433
8434 cmd = pending_find(opcode, hdev);
8435 if (!cmd)
8436 return -ENOENT;
8437
8438 cmd->cmd_complete(cmd, mgmt_status(status));
8439 mgmt_pending_remove(cmd);
8440
8441 return 0;
8442 }
8443
mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status)8444 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8445 u8 link_type, u8 addr_type, u8 status)
8446 {
8447 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8448 status, MGMT_OP_USER_CONFIRM_REPLY);
8449 }
8450
mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status)8451 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8452 u8 link_type, u8 addr_type, u8 status)
8453 {
8454 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8455 status,
8456 MGMT_OP_USER_CONFIRM_NEG_REPLY);
8457 }
8458
mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status)8459 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8460 u8 link_type, u8 addr_type, u8 status)
8461 {
8462 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8463 status, MGMT_OP_USER_PASSKEY_REPLY);
8464 }
8465
mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status)8466 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8467 u8 link_type, u8 addr_type, u8 status)
8468 {
8469 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8470 status,
8471 MGMT_OP_USER_PASSKEY_NEG_REPLY);
8472 }
8473
mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u32 passkey, u8 entered)8474 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
8475 u8 link_type, u8 addr_type, u32 passkey,
8476 u8 entered)
8477 {
8478 struct mgmt_ev_passkey_notify ev;
8479
8480 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8481
8482 bacpy(&ev.addr.bdaddr, bdaddr);
8483 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8484 ev.passkey = __cpu_to_le32(passkey);
8485 ev.entered = entered;
8486
8487 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
8488 }
8489
mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)8490 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
8491 {
8492 struct mgmt_ev_auth_failed ev;
8493 struct mgmt_pending_cmd *cmd;
8494 u8 status = mgmt_status(hci_status);
8495
8496 bacpy(&ev.addr.bdaddr, &conn->dst);
8497 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8498 ev.status = status;
8499
8500 cmd = find_pairing(conn);
8501
8502 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
8503 cmd ? cmd->sk : NULL);
8504
8505 if (cmd) {
8506 cmd->cmd_complete(cmd, status);
8507 mgmt_pending_remove(cmd);
8508 }
8509 }
8510
mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)8511 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
8512 {
8513 struct cmd_lookup match = { NULL, hdev };
8514 bool changed;
8515
8516 if (status) {
8517 u8 mgmt_err = mgmt_status(status);
8518 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
8519 cmd_status_rsp, &mgmt_err);
8520 return;
8521 }
8522
8523 if (test_bit(HCI_AUTH, &hdev->flags))
8524 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
8525 else
8526 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
8527
8528 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
8529 &match);
8530
8531 if (changed)
8532 new_settings(hdev, match.sk);
8533
8534 if (match.sk)
8535 sock_put(match.sk);
8536 }
8537
clear_eir(struct hci_request *req)8538 static void clear_eir(struct hci_request *req)
8539 {
8540 struct hci_dev *hdev = req->hdev;
8541 struct hci_cp_write_eir cp;
8542
8543 if (!lmp_ext_inq_capable(hdev))
8544 return;
8545
8546 memset(hdev->eir, 0, sizeof(hdev->eir));
8547
8548 memset(&cp, 0, sizeof(cp));
8549
8550 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
8551 }
8552
mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)8553 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
8554 {
8555 struct cmd_lookup match = { NULL, hdev };
8556 struct hci_request req;
8557 bool changed = false;
8558
8559 if (status) {
8560 u8 mgmt_err = mgmt_status(status);
8561
8562 if (enable && hci_dev_test_and_clear_flag(hdev,
8563 HCI_SSP_ENABLED)) {
8564 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8565 new_settings(hdev, NULL);
8566 }
8567
8568 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
8569 &mgmt_err);
8570 return;
8571 }
8572
8573 if (enable) {
8574 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
8575 } else {
8576 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
8577 if (!changed)
8578 changed = hci_dev_test_and_clear_flag(hdev,
8579 HCI_HS_ENABLED);
8580 else
8581 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8582 }
8583
8584 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
8585
8586 if (changed)
8587 new_settings(hdev, match.sk);
8588
8589 if (match.sk)
8590 sock_put(match.sk);
8591
8592 hci_req_init(&req, hdev);
8593
8594 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8595 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
8596 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
8597 sizeof(enable), &enable);
8598 __hci_req_update_eir(&req);
8599 } else {
8600 clear_eir(&req);
8601 }
8602
8603 hci_req_run(&req, NULL);
8604 }
8605
sk_lookup(struct mgmt_pending_cmd *cmd, void *data)8606 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8607 {
8608 struct cmd_lookup *match = data;
8609
8610 if (match->sk == NULL) {
8611 match->sk = cmd->sk;
8612 sock_hold(match->sk);
8613 }
8614 }
8615
mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, u8 status)8616 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
8617 u8 status)
8618 {
8619 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8620
8621 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
8622 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
8623 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
8624
8625 if (!status) {
8626 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
8627 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8628 ext_info_changed(hdev, NULL);
8629 }
8630
8631 if (match.sk)
8632 sock_put(match.sk);
8633 }
8634
mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)8635 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8636 {
8637 struct mgmt_cp_set_local_name ev;
8638 struct mgmt_pending_cmd *cmd;
8639
8640 if (status)
8641 return;
8642
8643 memset(&ev, 0, sizeof(ev));
8644 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
8645 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
8646
8647 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
8648 if (!cmd) {
8649 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
8650
8651 /* If this is a HCI command related to powering on the
8652 * HCI dev don't send any mgmt signals.
8653 */
8654 if (pending_find(MGMT_OP_SET_POWERED, hdev))
8655 return;
8656 }
8657
8658 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
8659 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
8660 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
8661 }
8662
has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])8663 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
8664 {
8665 int i;
8666
8667 for (i = 0; i < uuid_count; i++) {
8668 if (!memcmp(uuid, uuids[i], 16))
8669 return true;
8670 }
8671
8672 return false;
8673 }
8674
eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])8675 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
8676 {
8677 u16 parsed = 0;
8678
8679 while (parsed < eir_len) {
8680 u8 field_len = eir[0];
8681 u8 uuid[16];
8682 int i;
8683
8684 if (field_len == 0)
8685 break;
8686
8687 if (eir_len - parsed < field_len + 1)
8688 break;
8689
8690 switch (eir[1]) {
8691 case EIR_UUID16_ALL:
8692 case EIR_UUID16_SOME:
8693 for (i = 0; i + 3 <= field_len; i += 2) {
8694 memcpy(uuid, bluetooth_base_uuid, 16);
8695 uuid[13] = eir[i + 3];
8696 uuid[12] = eir[i + 2];
8697 if (has_uuid(uuid, uuid_count, uuids))
8698 return true;
8699 }
8700 break;
8701 case EIR_UUID32_ALL:
8702 case EIR_UUID32_SOME:
8703 for (i = 0; i + 5 <= field_len; i += 4) {
8704 memcpy(uuid, bluetooth_base_uuid, 16);
8705 uuid[15] = eir[i + 5];
8706 uuid[14] = eir[i + 4];
8707 uuid[13] = eir[i + 3];
8708 uuid[12] = eir[i + 2];
8709 if (has_uuid(uuid, uuid_count, uuids))
8710 return true;
8711 }
8712 break;
8713 case EIR_UUID128_ALL:
8714 case EIR_UUID128_SOME:
8715 for (i = 0; i + 17 <= field_len; i += 16) {
8716 memcpy(uuid, eir + i + 2, 16);
8717 if (has_uuid(uuid, uuid_count, uuids))
8718 return true;
8719 }
8720 break;
8721 }
8722
8723 parsed += field_len + 1;
8724 eir += field_len + 1;
8725 }
8726
8727 return false;
8728 }
8729
restart_le_scan(struct hci_dev *hdev)8730 static void restart_le_scan(struct hci_dev *hdev)
8731 {
8732 /* If controller is not scanning we are done. */
8733 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8734 return;
8735
8736 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8737 hdev->discovery.scan_start +
8738 hdev->discovery.scan_duration))
8739 return;
8740
8741 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
8742 DISCOV_LE_RESTART_DELAY);
8743 }
8744
is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)8745 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8746 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8747 {
8748 /* If a RSSI threshold has been specified, and
8749 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8750 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8751 * is set, let it through for further processing, as we might need to
8752 * restart the scan.
8753 *
8754 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8755 * the results are also dropped.
8756 */
8757 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8758 (rssi == HCI_RSSI_INVALID ||
8759 (rssi < hdev->discovery.rssi &&
8760 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8761 return false;
8762
8763 if (hdev->discovery.uuid_count != 0) {
8764 /* If a list of UUIDs is provided in filter, results with no
8765 * matching UUID should be dropped.
8766 */
8767 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8768 hdev->discovery.uuids) &&
8769 !eir_has_uuids(scan_rsp, scan_rsp_len,
8770 hdev->discovery.uuid_count,
8771 hdev->discovery.uuids))
8772 return false;
8773 }
8774
8775 /* If duplicate filtering does not report RSSI changes, then restart
8776 * scanning to ensure updated result with updated RSSI values.
8777 */
8778 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8779 restart_le_scan(hdev);
8780
8781 /* Validate RSSI value against the RSSI threshold once more. */
8782 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8783 rssi < hdev->discovery.rssi)
8784 return false;
8785 }
8786
8787 return true;
8788 }
8789
mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)8790 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8791 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8792 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8793 {
8794 char buf[512];
8795 struct mgmt_ev_device_found *ev = (void *)buf;
8796 size_t ev_size;
8797
8798 /* Don't send events for a non-kernel initiated discovery. With
8799 * LE one exception is if we have pend_le_reports > 0 in which
8800 * case we're doing passive scanning and want these events.
8801 */
8802 if (!hci_discovery_active(hdev)) {
8803 if (link_type == ACL_LINK)
8804 return;
8805 if (link_type == LE_LINK &&
8806 list_empty(&hdev->pend_le_reports) &&
8807 !hci_is_adv_monitoring(hdev)) {
8808 return;
8809 }
8810 }
8811
8812 if (hdev->discovery.result_filtering) {
8813 /* We are using service discovery */
8814 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8815 scan_rsp_len))
8816 return;
8817 }
8818
8819 if (hdev->discovery.limited) {
8820 /* Check for limited discoverable bit */
8821 if (dev_class) {
8822 if (!(dev_class[1] & 0x20))
8823 return;
8824 } else {
8825 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
8826 if (!flags || !(flags[0] & LE_AD_LIMITED))
8827 return;
8828 }
8829 }
8830
8831 /* Make sure that the buffer is big enough. The 5 extra bytes
8832 * are for the potential CoD field.
8833 */
8834 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8835 return;
8836
8837 memset(buf, 0, sizeof(buf));
8838
8839 /* In case of device discovery with BR/EDR devices (pre 1.2), the
8840 * RSSI value was reported as 0 when not available. This behavior
8841 * is kept when using device discovery. This is required for full
8842 * backwards compatibility with the API.
8843 *
8844 * However when using service discovery, the value 127 will be
8845 * returned when the RSSI is not available.
8846 */
8847 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8848 link_type == ACL_LINK)
8849 rssi = 0;
8850
8851 bacpy(&ev->addr.bdaddr, bdaddr);
8852 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8853 ev->rssi = rssi;
8854 ev->flags = cpu_to_le32(flags);
8855
8856 if (eir_len > 0)
8857 /* Copy EIR or advertising data into event */
8858 memcpy(ev->eir, eir, eir_len);
8859
8860 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8861 NULL))
8862 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8863 dev_class, 3);
8864
8865 if (scan_rsp_len > 0)
8866 /* Append scan response data to event */
8867 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8868
8869 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8870 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8871
8872 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8873 }
8874
mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, s8 rssi, u8 *name, u8 name_len)8875 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8876 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8877 {
8878 struct mgmt_ev_device_found *ev;
8879 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8880 u16 eir_len;
8881
8882 ev = (struct mgmt_ev_device_found *) buf;
8883
8884 memset(buf, 0, sizeof(buf));
8885
8886 bacpy(&ev->addr.bdaddr, bdaddr);
8887 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8888 ev->rssi = rssi;
8889
8890 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8891 name_len);
8892
8893 ev->eir_len = cpu_to_le16(eir_len);
8894
8895 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8896 }
8897
mgmt_discovering(struct hci_dev *hdev, u8 discovering)8898 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8899 {
8900 struct mgmt_ev_discovering ev;
8901
8902 bt_dev_dbg(hdev, "discovering %u", discovering);
8903
8904 memset(&ev, 0, sizeof(ev));
8905 ev.type = hdev->discovery.type;
8906 ev.discovering = discovering;
8907
8908 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8909 }
8910
mgmt_suspending(struct hci_dev *hdev, u8 state)8911 void mgmt_suspending(struct hci_dev *hdev, u8 state)
8912 {
8913 struct mgmt_ev_controller_suspend ev;
8914
8915 ev.suspend_state = state;
8916 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
8917 }
8918
mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr, u8 addr_type)8919 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
8920 u8 addr_type)
8921 {
8922 struct mgmt_ev_controller_resume ev;
8923
8924 ev.wake_reason = reason;
8925 if (bdaddr) {
8926 bacpy(&ev.addr.bdaddr, bdaddr);
8927 ev.addr.type = addr_type;
8928 } else {
8929 memset(&ev.addr, 0, sizeof(ev.addr));
8930 }
8931
8932 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
8933 }
8934
8935 static struct hci_mgmt_chan chan = {
8936 .channel = HCI_CHANNEL_CONTROL,
8937 .handler_count = ARRAY_SIZE(mgmt_handlers),
8938 .handlers = mgmt_handlers,
8939 .hdev_init = mgmt_init_hdev,
8940 };
8941
mgmt_init(void)8942 int mgmt_init(void)
8943 {
8944 return hci_mgmt_chan_register(&chan);
8945 }
8946
mgmt_exit(void)8947 void mgmt_exit(void)
8948 {
8949 hci_mgmt_chan_unregister(&chan);
8950 }
8951